diff --git a/bolt/include/bolt/Core/MCPlusBuilder.h b/bolt/include/bolt/Core/MCPlusBuilder.h index fe7905c33080f..c8f4e2aa8c580 100644 --- a/bolt/include/bolt/Core/MCPlusBuilder.h +++ b/bolt/include/bolt/Core/MCPlusBuilder.h @@ -538,6 +538,11 @@ class MCPlusBuilder { llvm_unreachable("not implemented"); } + virtual void createDirectBranch(MCInst &Inst, const MCSymbol *Target, + MCContext *Ctx) { + llvm_unreachable("not implemented"); + } + virtual MCPhysReg getX86R11() const { llvm_unreachable("not implemented"); } virtual unsigned getShortBranchOpcode(unsigned Opcode) const { @@ -1888,6 +1893,12 @@ class MCPlusBuilder { llvm_unreachable("not implemented"); } + /// Update operand of BTI instruction. + virtual void updateBTIVariant(MCInst &Inst, bool CallTarget, + bool JumpTarget) const { + llvm_unreachable("not implemented"); + } + /// Store \p Target absolute address to \p RegName virtual InstructionListType materializeAddress(const MCSymbol *Target, MCContext *Ctx, diff --git a/bolt/lib/Passes/Instrumentation.cpp b/bolt/lib/Passes/Instrumentation.cpp index 150461b020f06..10479f35d8f9d 100644 --- a/bolt/lib/Passes/Instrumentation.cpp +++ b/bolt/lib/Passes/Instrumentation.cpp @@ -305,9 +305,12 @@ void Instrumentation::instrumentIndirectTarget(BinaryBasicBlock &BB, : IndCallHandlerExitBBFunction->getSymbol(), IndCallSiteID, &*BC.Ctx); - Iter = BB.eraseInstruction(Iter); - Iter = insertInstructions(CounterInstrs, BB, Iter); - --Iter; + if (!BC.isAArch64()) { + Iter = BB.eraseInstruction(Iter); + Iter = insertInstructions(CounterInstrs, BB, Iter); + --Iter; + } else + Iter = insertInstructions(CounterInstrs, BB, Iter); } bool Instrumentation::instrumentOneTarget( diff --git a/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp b/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp index f1291f676f1b5..dc7644fbabdcf 100644 --- a/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp +++ b/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp @@ -48,14 +48,14 @@ static cl::opt NoLSEAtomics( namespace { -static void getSystemFlag(MCInst &Inst, MCPhysReg RegName) { +[[maybe_unused]] static void getSystemFlag(MCInst &Inst, MCPhysReg RegName) { Inst.setOpcode(AArch64::MRS); Inst.clear(); Inst.addOperand(MCOperand::createReg(RegName)); Inst.addOperand(MCOperand::createImm(AArch64SysReg::NZCV)); } -static void setSystemFlag(MCInst &Inst, MCPhysReg RegName) { +[[maybe_unused]] static void setSystemFlag(MCInst &Inst, MCPhysReg RegName) { Inst.setOpcode(AArch64::MSR); Inst.clear(); Inst.addOperand(MCOperand::createImm(AArch64SysReg::NZCV)); @@ -2114,6 +2114,14 @@ class AArch64MCPlusBuilder : public MCPlusBuilder { convertJmpToTailCall(Inst); } + void createDirectBranch(MCInst &Inst, const MCSymbol *Target, + MCContext *Ctx) override { + Inst.setOpcode(AArch64::B); + Inst.clear(); + Inst.addOperand(MCOperand::createExpr(getTargetExprFor( + Inst, MCSymbolRefExpr::create(Target, *Ctx), *Ctx, 0))); + } + bool analyzeBranch(InstructionIterator Begin, InstructionIterator End, const MCSymbol *&TBB, const MCSymbol *&FBB, MCInst *&CondBranch, @@ -2471,21 +2479,14 @@ class AArch64MCPlusBuilder : public MCPlusBuilder { } InstructionListType createInstrumentedIndCallHandlerExitBB() const override { - InstructionListType Insts(5); // Code sequence for instrumented indirect call handler: - // msr nzcv, x1 - // ldp x0, x1, [sp], #16 - // ldr x16, [sp], #16 - // ldp x0, x1, [sp], #16 - // br x16 - setSystemFlag(Insts[0], AArch64::X1); - createPopRegisters(Insts[1], AArch64::X0, AArch64::X1); - // Here we load address of the next function which should be called in the - // original binary to X16 register. Writing to X16 is permitted without - // needing to restore. - loadReg(Insts[2], AArch64::X16, AArch64::SP); - createPopRegisters(Insts[3], AArch64::X0, AArch64::X1); - createIndirectBranch(Insts[4], AArch64::X16, 0); + // ret + + InstructionListType Insts; + + Insts.emplace_back(); + createReturn(Insts.back()); + return Insts; } @@ -2561,39 +2562,59 @@ class AArch64MCPlusBuilder : public MCPlusBuilder { MCSymbol *HandlerFuncAddr, int CallSiteID, MCContext *Ctx) override { - InstructionListType Insts; // Code sequence used to enter indirect call instrumentation helper: - // stp x0, x1, [sp, #-16]! createPushRegisters - // mov target x0 convertIndirectCallToLoad -> orr x0 target xzr + // stp x0, x1, [sp, #-16]! createPushRegisters (1) + // mov target, x0 convertIndirectCallToLoad -> orr x0 target xzr // mov x1 CallSiteID createLoadImmediate -> // movk x1, #0x0, lsl #48 // movk x1, #0x0, lsl #32 // movk x1, #0x0, lsl #16 // movk x1, #0x0 - // stp x0, x1, [sp, #-16]! - // bl *HandlerFuncAddr createIndirectCall -> + // stp x0, x30, [sp, #-16]! (2) // adr x0 *HandlerFuncAddr -> adrp + add - // blr x0 + // blr x0 (__bolt_instr_ind_call_handler_func) + // ldp x0, x30, [sp], #16 (2) + // mov x0, target ; move target address to used register + // ldp x0, x1, [sp], #16 (1) + + InstructionListType Insts; Insts.emplace_back(); - createPushRegisters(Insts.back(), AArch64::X0, AArch64::X1); + createPushRegisters(Insts.back(), getIntArgRegister(0), + getIntArgRegister(1)); Insts.emplace_back(CallInst); - convertIndirectCallToLoad(Insts.back(), AArch64::X0); + convertIndirectCallToLoad(Insts.back(), getIntArgRegister(0)); InstructionListType LoadImm = createLoadImmediate(getIntArgRegister(1), CallSiteID); Insts.insert(Insts.end(), LoadImm.begin(), LoadImm.end()); Insts.emplace_back(); - createPushRegisters(Insts.back(), AArch64::X0, AArch64::X1); + createPushRegisters(Insts.back(), getIntArgRegister(0), AArch64::LR); Insts.resize(Insts.size() + 2); - InstructionListType Addr = - materializeAddress(HandlerFuncAddr, Ctx, AArch64::X0); + InstructionListType Addr = materializeAddress( + HandlerFuncAddr, Ctx, CallInst.getOperand(0).getReg()); assert(Addr.size() == 2 && "Invalid Addr size"); std::copy(Addr.begin(), Addr.end(), Insts.end() - Addr.size()); + + Insts.emplace_back(); + createIndirectCallInst(Insts.back(), false, + CallInst.getOperand(0).getReg()); + + Insts.emplace_back(); + createPopRegisters(Insts.back(), getIntArgRegister(0), AArch64::LR); + + // move x0 to indirect call register Insts.emplace_back(); - createIndirectCallInst(Insts.back(), isTailCall(CallInst), AArch64::X0); + Insts.back().setOpcode(AArch64::ORRXrs); + Insts.back().insert(Insts.back().begin(), + MCOperand::createReg(CallInst.getOperand(0).getReg())); + Insts.back().insert(Insts.back().begin() + 1, + MCOperand::createReg(AArch64::XZR)); + Insts.back().insert(Insts.back().begin() + 2, + MCOperand::createReg(getIntArgRegister(0))); + Insts.back().insert(Insts.back().begin() + 3, MCOperand::createImm(0)); - // Carry over metadata including tail call marker if present. - stripAnnotations(Insts.back()); - moveAnnotations(std::move(CallInst), Insts.back()); + Insts.emplace_back(); + createPopRegisters(Insts.back(), getIntArgRegister(0), + getIntArgRegister(1)); return Insts; } @@ -2602,12 +2623,10 @@ class AArch64MCPlusBuilder : public MCPlusBuilder { createInstrumentedIndCallHandlerEntryBB(const MCSymbol *InstrTrampoline, const MCSymbol *IndCallHandler, MCContext *Ctx) override { - // Code sequence used to check whether InstrTampoline was initialized + // Code sequence used to check whether InstrTrampoline was initialized // and call it if so, returns via IndCallHandler - // stp x0, x1, [sp, #-16]! - // mrs x1, nzcv - // adr x0, InstrTrampoline -> adrp + add - // ldr x0, [x0] + // adrp x0, InstrTrampoline + // ldr x0, [x0, #lo12:InstrTrampoline] // subs x0, x0, #0x0 // b.eq IndCallHandler // str x30, [sp, #-16]! @@ -2615,30 +2634,42 @@ class AArch64MCPlusBuilder : public MCPlusBuilder { // ldr x30, [sp], #16 // b IndCallHandler InstructionListType Insts; + + // load handler address + MCInst InstAdrp; + InstAdrp.setOpcode(AArch64::ADRP); + InstAdrp.addOperand(MCOperand::createReg(getIntArgRegister(0))); + InstAdrp.addOperand(MCOperand::createImm(0)); + setOperandToSymbolRef(InstAdrp, /* OpNum */ 1, InstrTrampoline, + /* Addend */ 0, Ctx, ELF::R_AARCH64_ADR_GOT_PAGE); + Insts.emplace_back(InstAdrp); + + MCInst InstLoad; + InstLoad.setOpcode(AArch64::LDRXui); + InstLoad.addOperand(MCOperand::createReg(getIntArgRegister(0))); + InstLoad.addOperand(MCOperand::createReg(getIntArgRegister(0))); + InstLoad.addOperand(MCOperand::createImm(0)); + setOperandToSymbolRef(InstLoad, /* OpNum */ 2, InstrTrampoline, + /* Addend */ 0, Ctx, ELF::R_AARCH64_LD64_GOT_LO12_NC); + Insts.emplace_back(InstLoad); + + InstructionListType CmpJmp = + createCmpJE(getIntArgRegister(0), 0, IndCallHandler, Ctx); + Insts.insert(Insts.end(), CmpJmp.begin(), CmpJmp.end()); + Insts.emplace_back(); - createPushRegisters(Insts.back(), AArch64::X0, AArch64::X1); - Insts.emplace_back(); - getSystemFlag(Insts.back(), getIntArgRegister(1)); - Insts.emplace_back(); - Insts.emplace_back(); - InstructionListType Addr = - materializeAddress(InstrTrampoline, Ctx, AArch64::X0); - std::copy(Addr.begin(), Addr.end(), Insts.end() - Addr.size()); - assert(Addr.size() == 2 && "Invalid Addr size"); - Insts.emplace_back(); - loadReg(Insts.back(), AArch64::X0, AArch64::X0); - InstructionListType cmpJmp = - createCmpJE(AArch64::X0, 0, IndCallHandler, Ctx); - Insts.insert(Insts.end(), cmpJmp.begin(), cmpJmp.end()); - Insts.emplace_back(); - storeReg(Insts.back(), AArch64::LR, AArch64::SP); + storeReg(Insts.back(), AArch64::LR, getSpRegister(/*Size*/ 8)); + Insts.emplace_back(); Insts.back().setOpcode(AArch64::BLR); - Insts.back().addOperand(MCOperand::createReg(AArch64::X0)); + Insts.back().addOperand(MCOperand::createReg(getIntArgRegister(0))); + Insts.emplace_back(); - loadReg(Insts.back(), AArch64::LR, AArch64::SP); + loadReg(Insts.back(), AArch64::LR, getSpRegister(/*Size*/ 8)); + Insts.emplace_back(); - createDirectCall(Insts.back(), IndCallHandler, Ctx, /*IsTailCall*/ true); + createDirectBranch(Insts.back(), IndCallHandler, Ctx); + return Insts; } @@ -2800,6 +2831,14 @@ class AArch64MCPlusBuilder : public MCPlusBuilder { Inst.getOpcode() == AArch64::PACIBSP; } + void updateBTIVariant(MCInst &Inst, bool CallTarget, + bool JumpTarget) const override { + assert(Inst.getOpcode() == AArch64::HINT && "Not a BTI instruction."); + unsigned HintNum = getBTIHintNum(CallTarget, JumpTarget); + Inst.clear(); + Inst.addOperand(MCOperand::createImm(HintNum)); + } + InstructionListType materializeAddress(const MCSymbol *Target, MCContext *Ctx, MCPhysReg RegName, int64_t Addend = 0) const override { diff --git a/bolt/runtime/instr.cpp b/bolt/runtime/instr.cpp index f586db2b0f9ba..634ade6bdd407 100644 --- a/bolt/runtime/instr.cpp +++ b/bolt/runtime/instr.cpp @@ -1691,9 +1691,12 @@ instrumentIndirectCall(uint64_t Target, uint64_t IndCallID) { extern "C" __attribute((naked)) void __bolt_instr_indirect_call() { #if defined(__aarch64__) + // the target address is placed on stack + // the identifier of the indirect call site is placed in X1 register + // clang-format off __asm__ __volatile__(SAVE_ALL - "ldp x0, x1, [sp, #288]\n" + "ldr x0, [sp, #272]\n" "bl instrumentIndirectCall\n" RESTORE_ALL "ret\n" @@ -1728,9 +1731,12 @@ extern "C" __attribute((naked)) void __bolt_instr_indirect_call() extern "C" __attribute((naked)) void __bolt_instr_indirect_tailcall() { #if defined(__aarch64__) + // the target address is placed on stack + // the identifier of the indirect call site is placed in X1 register + // clang-format off __asm__ __volatile__(SAVE_ALL - "ldp x0, x1, [sp, #288]\n" + "ldr x0, [sp, #272]\n" "bl instrumentIndirectCall\n" RESTORE_ALL "ret\n" diff --git a/bolt/runtime/sys_aarch64.h b/bolt/runtime/sys_aarch64.h index b1d04f9d558e0..9cb8e022f58df 100644 --- a/bolt/runtime/sys_aarch64.h +++ b/bolt/runtime/sys_aarch64.h @@ -18,10 +18,12 @@ "stp x24, x25, [sp, #-16]!\n" \ "stp x26, x27, [sp, #-16]!\n" \ "stp x28, x29, [sp, #-16]!\n" \ - "str x30, [sp,#-16]!\n" + "mrs x29, nzcv\n" \ + "stp x29, x30, [sp, #-16]!\n" // Mirrors SAVE_ALL #define RESTORE_ALL \ - "ldr x30, [sp], #16\n" \ + "ldp x29, x30, [sp], #16\n" \ + "msr nzcv, x29\n" \ "ldp x28, x29, [sp], #16\n" \ "ldp x26, x27, [sp], #16\n" \ "ldp x24, x25, [sp], #16\n" \ diff --git a/bolt/test/runtime/AArch64/instrumentation-ind-call.c b/bolt/test/runtime/AArch64/instrumentation-ind-call.c index f9056da333b4e..eddecba4d8b52 100644 --- a/bolt/test/runtime/AArch64/instrumentation-ind-call.c +++ b/bolt/test/runtime/AArch64/instrumentation-ind-call.c @@ -15,9 +15,63 @@ int main() { REQUIRES: system-linux,bolt-runtime RUN: %clang %cflags %s -o %t.exe -Wl,-q -no-pie -fpie +RUN: llvm-objdump --disassemble-symbols=main %t.exe \ +RUN: | FileCheck %s --check-prefix=CHECKINDIRECTREG + +CHECKINDIRECTREG: mov w0, #0xa +CHECKINDIRECTREG-NEXT: mov w1, #0x14 +CHECKINDIRECTREG-NEXT: blr x8 RUN: llvm-bolt %t.exe --instrument --instrumentation-file=%t.fdata \ -RUN: -o %t.instrumented +RUN: -o %t.instrumented \ +RUN: | FileCheck %s --check-prefix=CHECK-INSTR-LOG + +CHECK-INSTR-LOG: BOLT-INSTRUMENTER: Number of indirect call site descriptors: 1 + +RUN: llvm-objdump --disassemble-symbols=main %t.instrumented \ +RUN: | FileCheck %s --check-prefix=CHECK-INSTR-INDIRECTREG + +RUN: llvm-objdump --disassemble-symbols=__bolt_instr_ind_call_handler \ +RUN: %t.instrumented | FileCheck %s --check-prefix=CHECK-INSTR-INDIR-CALL +RUN: llvm-objdump --disassemble-symbols=__bolt_instr_ind_call_handler_func \ +RUN: %t.instrumented | FileCheck %s --check-prefix=CHECK-INSTR-INDIR-CALL-FUNC + +CHECK-INSTR-INDIRECTREG: mov w0, #0xa +CHECK-INSTR-INDIRECTREG-NEXT: mov w1, #0x14 +// store current values +CHECK-INSTR-INDIRECTREG-NEXT: stp x0, x1, {{.*}} +// store the indirect target address in x0 +CHECK-INSTR-INDIRECTREG-NEXT: mov x0, x8 +// load callsite id into x1 +CHECK-INSTR-INDIRECTREG-NEXT: movk x1, {{.*}} +CHECK-INSTR-INDIRECTREG-NEXT: movk x1, {{.*}} +CHECK-INSTR-INDIRECTREG-NEXT: movk x1, {{.*}} +CHECK-INSTR-INDIRECTREG-NEXT: movk x1, {{.*}} +CHECK-INSTR-INDIRECTREG-NEXT: stp x0, x30, {{.*}} +CHECK-INSTR-INDIRECTREG-NEXT: adrp x8, {{.*}} +CHECK-INSTR-INDIRECTREG-NEXT: add x8, {{.*}} +// call instrumentation library handler function +CHECK-INSTR-INDIRECTREG-NEXT: blr x8 +// restore registers saved before +CHECK-INSTR-INDIRECTREG-NEXT: ldp x0, x30, {{.*}} +CHECK-INSTR-INDIRECTREG-NEXT: mov x8, x0 +CHECK-INSTR-INDIRECTREG-NEXT: ldp x0, x1, {{.*}} +// original indirect call instruction +CHECK-INSTR-INDIRECTREG-NEXT: blr x8 + + +CHECK-INSTR-INDIR-CALL: __bolt_instr_ind_call_handler>: +CHECK-INSTR-INDIR-CALL-NEXT: ret + +CHECK-INSTR-INDIR-CALL-FUNC: __bolt_instr_ind_call_handler_func>: +CHECK-INSTR-INDIR-CALL-FUNC-NEXT: adrp x0 +CHECK-INSTR-INDIR-CALL-FUNC-NEXT: ldr x0 +CHECK-INSTR-INDIR-CALL-FUNC-NEXT: cmp x0, #0x0 +CHECK-INSTR-INDIR-CALL-FUNC-NEXT: b.eq{{.*}}__bolt_instr_ind_call_handler +CHECK-INSTR-INDIR-CALL-FUNC-NEXT: str x30 +CHECK-INSTR-INDIR-CALL-FUNC-NEXT: blr x0 +CHECK-INSTR-INDIR-CALL-FUNC-NEXT: ldr x30 +CHECK-INSTR-INDIR-CALL-FUNC-NEXT: b{{.*}}__bolt_instr_ind_call_handler # Instrumented program needs to finish returning zero RUN: %t.instrumented | FileCheck %s -check-prefix=CHECK-OUTPUT diff --git a/bolt/unittests/Core/MCPlusBuilder.cpp b/bolt/unittests/Core/MCPlusBuilder.cpp index 439d72a343ce8..7b6f1620a3f2c 100644 --- a/bolt/unittests/Core/MCPlusBuilder.cpp +++ b/bolt/unittests/Core/MCPlusBuilder.cpp @@ -156,6 +156,8 @@ TEST_P(MCPlusBuilderTester, AArch64_BTI) { ASSERT_EQ(II->getOpcode(), AArch64::HINT); ASSERT_EQ(II->getOperand(0).getImm(), 38); ASSERT_TRUE(BC->MIB->isBTILandingPad(*II, true, true)); + BC->MIB->updateBTIVariant(*II, true, false); + ASSERT_TRUE(BC->MIB->isBTILandingPad(*II, true, false)); MCInst BTIj; BC->MIB->createBTI(BTIj, false, true); @@ -163,6 +165,8 @@ TEST_P(MCPlusBuilderTester, AArch64_BTI) { ASSERT_EQ(II->getOpcode(), AArch64::HINT); ASSERT_EQ(II->getOperand(0).getImm(), 36); ASSERT_TRUE(BC->MIB->isBTILandingPad(*II, false, true)); + BC->MIB->updateBTIVariant(*II, true, true); + ASSERT_TRUE(BC->MIB->isBTILandingPad(*II, true, true)); MCInst BTIc; BC->MIB->createBTI(BTIc, true, false); @@ -170,10 +174,14 @@ TEST_P(MCPlusBuilderTester, AArch64_BTI) { ASSERT_EQ(II->getOpcode(), AArch64::HINT); ASSERT_EQ(II->getOperand(0).getImm(), 34); ASSERT_TRUE(BC->MIB->isBTILandingPad(*II, true, false)); + BC->MIB->updateBTIVariant(*II, false, true); + ASSERT_TRUE(BC->MIB->isBTILandingPad(*II, false, true)); +#ifndef NDEBUG MCInst BTIinvalid; ASSERT_DEATH(BC->MIB->createBTI(BTIinvalid, false, false), "No target kinds!"); +#endif MCInst Paciasp = MCInstBuilder(AArch64::PACIASP); II = BB->addInstruction(Paciasp); diff --git a/clang-tools-extra/clang-tidy/utils/FormatStringConverter.cpp b/clang-tools-extra/clang-tidy/utils/FormatStringConverter.cpp index 23dae04916e9b..d210b000dfd33 100644 --- a/clang-tools-extra/clang-tidy/utils/FormatStringConverter.cpp +++ b/clang-tools-extra/clang-tidy/utils/FormatStringConverter.cpp @@ -700,6 +700,7 @@ void FormatStringConverter::finalizeFormatText() { /// Append literal parts of the format text, reinstating escapes as required. void FormatStringConverter::appendFormatText(const StringRef Text) { for (const char Ch : Text) { + const auto UCh = static_cast(Ch); if (Ch == '\a') StandardFormatString += "\\a"; else if (Ch == '\b') @@ -724,10 +725,10 @@ void FormatStringConverter::appendFormatText(const StringRef Text) { } else if (Ch == '}') { StandardFormatString += "}}"; FormatStringNeededRewriting = true; - } else if (Ch < 32) { + } else if (UCh < 32) { StandardFormatString += "\\x"; - StandardFormatString += llvm::hexdigit(Ch >> 4, true); - StandardFormatString += llvm::hexdigit(Ch & 0xf, true); + StandardFormatString += llvm::hexdigit(UCh >> 4, true); + StandardFormatString += llvm::hexdigit(UCh & 0xf, true); } else StandardFormatString += Ch; } diff --git a/clang-tools-extra/clangd/CompileCommands.cpp b/clang-tools-extra/clangd/CompileCommands.cpp index 7990f2719e9a0..4eda330716f21 100644 --- a/clang-tools-extra/clangd/CompileCommands.cpp +++ b/clang-tools-extra/clangd/CompileCommands.cpp @@ -132,8 +132,7 @@ std::optional detectSysroot() { std::string detectStandardResourceDir() { static int StaticForMainAddr; // Just an address in this process. - return CompilerInvocation::GetResourcesPath("clangd", - (void *)&StaticForMainAddr); + return GetResourcesPath("clangd", (void *)&StaticForMainAddr); } // The path passed to argv[0] is important: diff --git a/clang-tools-extra/clangd/Compiler.cpp b/clang-tools-extra/clangd/Compiler.cpp index 6ebc2eac25745..9ea7df139382a 100644 --- a/clang-tools-extra/clangd/Compiler.cpp +++ b/clang-tools-extra/clangd/Compiler.cpp @@ -9,6 +9,7 @@ #include "Compiler.h" #include "support/Logger.h" #include "clang/Basic/TargetInfo.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Lex/PreprocessorOptions.h" #include "clang/Serialization/PCHContainerOperations.h" diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst index a6f80e3721db1..644c5cb573cf7 100644 --- a/clang-tools-extra/docs/ReleaseNotes.rst +++ b/clang-tools-extra/docs/ReleaseNotes.rst @@ -69,7 +69,7 @@ Potentially Breaking Changes - `CharTypdefsToIgnore` to `CharTypedefsToIgnore` in :doc:`bugprone-signed-char-misuse ` - + - Modified the custom message format of :doc:`bugprone-unsafe-functions ` by assigning a special meaning to the character ``>`` at the start of the value of the option @@ -394,7 +394,7 @@ Changes in existing checks ` check by adding an additional matcher that generalizes the copy-and-swap idiom pattern detection. - + - Improved :doc:`bugprone-unsafe-functions ` check by hiding the default suffix when the reason starts with the character `>` in the `CustomFunctions` @@ -497,7 +497,8 @@ Changes in existing checks - Improved :doc:`modernize-use-std-print ` check to correctly match when the format string is converted to a different type by an implicit - constructor call. + constructor call, and fixed a crash when handling format strings + containing non-ASCII characters. - Improved :doc:`performance-unnecessary-copy-initialization ` by printing diff --git a/clang-tools-extra/docs/clang-tidy/checks/bugprone/signal-handler.rst b/clang-tools-extra/docs/clang-tidy/checks/bugprone/signal-handler.rst index aef27942b9e92..42cfdf0f29eeb 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/bugprone/signal-handler.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/bugprone/signal-handler.rst @@ -45,11 +45,11 @@ Options Selects which set of functions is considered as asynchronous-safe (and therefore allowed in signal handlers). It can be set to the following values: - ``minimal`` + - `minimal` Selects a minimal set that is defined in the CERT SIG30-C rule. and includes functions ``abort()``, ``_Exit()``, ``quick_exit()`` and ``signal()``. - ``POSIX`` + - `POSIX` Selects a larger set of functions that is listed in POSIX.1-2017 (see `this link `_ @@ -94,4 +94,4 @@ Options The function ``quick_exit`` is not included in the POSIX list but it is included here in the set of safe functions. - The default value is ``POSIX``. + The default value is `POSIX`. diff --git a/clang-tools-extra/docs/clang-tidy/checks/bugprone/switch-missing-default-case.rst b/clang-tools-extra/docs/clang-tidy/checks/bugprone/switch-missing-default-case.rst index 3ce862ff8afcc..0f0e549091f46 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/bugprone/switch-missing-default-case.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/bugprone/switch-missing-default-case.rst @@ -51,6 +51,6 @@ Example: on non-enum types where the compiler warnings may not be present. .. seealso:: - The `CppCoreGuideline ES.79 `_ + The `CppCoreGuideline ES.79 `_ provide guidelines on switch statements, including the recommendation to always provide a default case. diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-capturing-lambda-coroutines.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-capturing-lambda-coroutines.rst index 14e5806625b5b..58bfc35c557dc 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-capturing-lambda-coroutines.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-capturing-lambda-coroutines.rst @@ -8,7 +8,7 @@ use-after-free errors and suggests avoiding captures or ensuring the lambda closure object has a guaranteed lifetime. This check implements `CP.51 -`_ +`_ from the C++ Core Guidelines. Using coroutine lambdas with non-empty capture lists can be risky, as capturing diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-const-or-ref-data-members.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-const-or-ref-data-members.rst index 57c4829431e76..82bd33708792f 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-const-or-ref-data-members.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-const-or-ref-data-members.rst @@ -44,7 +44,7 @@ Examples: }; This check implements `C.12 -`_ +`_ from the C++ Core Guidelines. Further reading: diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-do-while.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-do-while.rst index 299ff1e12e0bf..8b0ee304a67f9 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-do-while.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-do-while.rst @@ -9,7 +9,7 @@ condition is not checked prior to the first iteration. This can lead to subtle bugs. This check implements `ES.75 -`_ +`_ from the C++ Core Guidelines. Examples: diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-non-const-global-variables.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-non-const-global-variables.rst index 3d5fef3a07dca..b7d2dc874d4ed 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-non-const-global-variables.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-non-const-global-variables.rst @@ -6,7 +6,7 @@ cppcoreguidelines-avoid-non-const-global-variables Finds non-const global variables as described in `I.2 `_ of C++ Core Guidelines. -As `R.6 `_ +As `R.6 `_ of C++ Core Guidelines is a duplicate of rule `I.2 `_ it also covers that rule. diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-reference-coroutine-parameters.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-reference-coroutine-parameters.rst index 3f8bf9f7a9e02..887bdc66d938a 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-reference-coroutine-parameters.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-reference-coroutine-parameters.rst @@ -18,5 +18,5 @@ Examples: } This check implements `CP.53 -`_ +`_ from the C++ Core Guidelines. diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/init-variables.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/init-variables.rst index 0465436234b13..e8ca8238887c5 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/init-variables.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/init-variables.rst @@ -8,8 +8,8 @@ value. These may lead to unexpected behavior if there is a code path that reads the variable before assigning to it. This rule is part of the `Type safety (Type.5) -`_ -profile and `ES.20 `_ +`_ +profile and `ES.20 `_ from the C++ Core Guidelines. Only integers, booleans, floats, doubles and pointers are checked. The fix diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/interfaces-global-init.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/interfaces-global-init.rst index f8e9da9021773..65ff1db9e4907 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/interfaces-global-init.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/interfaces-global-init.rst @@ -7,7 +7,7 @@ This check flags initializers of globals that access extern objects, and therefore can lead to order-of-initialization problems. This check implements `I.22 -`_ +`_ from the C++ Core Guidelines. Note that currently this does not flag calls to non-constexpr functions, and diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/missing-std-forward.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/missing-std-forward.rst index 62e38fcd3b9dc..c2af1239a7d4c 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/missing-std-forward.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/missing-std-forward.rst @@ -43,5 +43,5 @@ Options Specify the function used for forwarding. Default is `::std::forward`. This check implements `F.19 -`_ +`_ from the C++ Core Guidelines. diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/no-malloc.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/no-malloc.rst index 632a6ad0bc436..6b95604d048f4 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/no-malloc.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/no-malloc.rst @@ -10,7 +10,7 @@ Furthermore, it can be configured to check against a user-specified list of functions that are used for memory management (e.g. ``posix_memalign()``). This check implements `R.10 -`_ +`_ from the C++ Core Guidelines. There is no attempt made to provide fix-it hints, since manual resource diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/no-suspend-with-lock.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/no-suspend-with-lock.rst index 59981f2c8d6d3..81b88e3aedc00 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/no-suspend-with-lock.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/no-suspend-with-lock.rst @@ -36,5 +36,5 @@ Examples: } This check implements `CP.52 -`_ +`_ from the C++ Core Guidelines. diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/owning-memory.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/owning-memory.rst index 91d78002ff709..218de558358fd 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/owning-memory.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/owning-memory.rst @@ -14,7 +14,7 @@ This check implements `I.11 `R.3 `_ and `GSL.Views -`_ +`_ from the C++ Core Guidelines. The definition of a ``gsl::owner`` is straight forward @@ -23,7 +23,7 @@ The definition of a ``gsl::owner`` is straight forward namespace gsl { template owner = T; } It is therefore simple to introduce the owner even without using an implementation of -the `Guideline Support Library `_. +the `Guideline Support Library `_. All checks are purely type based and not (yet) flow sensitive. diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-bounds-array-to-pointer-decay.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-bounds-array-to-pointer-decay.rst index c890e16d2d20f..06de6fcb2c44b 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-bounds-array-to-pointer-decay.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-bounds-array-to-pointer-decay.rst @@ -9,5 +9,5 @@ Pointers should not be used as arrays. ``span`` is a bounds-checked, safe alternative to using pointers to access arrays. This rule is part of the `Bounds safety (Bounds 3) -`_ +`_ profile from the C++ Core Guidelines. diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-bounds-avoid-unchecked-container-access.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-bounds-avoid-unchecked-container-access.rst index 99107a33fccb2..fe78ad8056443 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-bounds-avoid-unchecked-container-access.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-bounds-avoid-unchecked-container-access.rst @@ -31,7 +31,7 @@ excluded from this check (e.g.: ``std::map::operator[]``). This check enforces part of the `SL.con.3 ` guideline and is part of the `Bounds Safety (Bounds 4) -` +` profile from the C++ Core Guidelines. Options diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-bounds-constant-array-index.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-bounds-constant-array-index.rst index 9b82e0c45a314..4eddeb489a1db 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-bounds-constant-array-index.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-bounds-constant-array-index.rst @@ -9,7 +9,7 @@ are out of bounds (for ``std::array``). For out-of-bounds checking of static arrays, see the `-Warray-bounds` Clang diagnostic. This rule is part of the `Bounds safety (Bounds 2) -`_ +`_ profile from the C++ Core Guidelines. Optionally, this check can generate fixes using ``gsl::at`` for indexing. diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-bounds-pointer-arithmetic.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-bounds-pointer-arithmetic.rst index a3f13714e809c..3f020090f8612 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-bounds-pointer-arithmetic.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-bounds-pointer-arithmetic.rst @@ -11,7 +11,7 @@ and easy to get wrong. ``span`` is a bounds-checked, safe type for accessing arrays of data. This rule is part of the `Bounds safety (Bounds 1) -`_ +`_ profile from the C++ Core Guidelines. Options diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-const-cast.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-const-cast.rst index 80584526f13dd..b5cc486aa4a0a 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-const-cast.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-const-cast.rst @@ -22,7 +22,7 @@ situations where the variable's volatility is a crucial aspect of program correctness and reliability. This rule is part of the `Type safety (Type 3) -`_ +`_ profile and `ES.50: Don’t cast away const `_ rule from the C++ Core Guidelines. diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-cstyle-cast.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-cstyle-cast.rst index 9f9db39f27197..a94115e9d5e1c 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-cstyle-cast.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-cstyle-cast.rst @@ -15,5 +15,5 @@ the first of the following that is possible: a ``const_cast``, a This rule bans ``(T)expression`` only when used to perform an unsafe cast. This rule is part of the `Type safety (Type.4) -`_ +`_ profile from the C++ Core Guidelines. diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-member-init.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-member-init.rst index e27ef0572ee67..b86083f82300d 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-member-init.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-member-init.rst @@ -40,5 +40,5 @@ Options Default is `false`. This rule is part of the `Type safety (Type.6) -`_ +`_ profile from the C++ Core Guidelines. diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-reinterpret-cast.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-reinterpret-cast.rst index a0946825156fc..c2e4170ed098d 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-reinterpret-cast.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-reinterpret-cast.rst @@ -10,5 +10,5 @@ variable that is actually of type ``X`` to be accessed as if it were of an unrelated type ``Z``. This rule is part of the `Type safety (Type.1.1) -`_ +`_ profile from the C++ Core Guidelines. diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-static-cast-downcast.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-static-cast-downcast.rst index 333e6db2aacec..21c014577a003 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-static-cast-downcast.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-static-cast-downcast.rst @@ -12,7 +12,7 @@ variable that is actually of type ``X`` to be accessed as if it were of an unrelated type ``Z``. This rule is part of the `Type safety (Type.2) -`_ +`_ profile from the C++ Core Guidelines. Options diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-union-access.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-union-access.rst index 3a5af53331b96..db726a227af31 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-union-access.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-union-access.rst @@ -13,5 +13,5 @@ enforced to be safe in the language and so relies on programmer discipline to get it right. This rule is part of the `Type safety (Type.7) -`_ +`_ profile from the C++ Core Guidelines. diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-vararg.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-vararg.rst index c24ff340cb7f3..09809c25c1447 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-vararg.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/pro-type-vararg.rst @@ -15,5 +15,5 @@ because it cannot generally be enforced to be safe in the language and so relies on programmer discipline to get it right. This rule is part of the `Type safety (Type.8) -`_ +`_ profile from the C++ Core Guidelines. diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/special-member-functions.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/special-member-functions.rst index 982d16fc8d23d..1b050a55d4c9f 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/special-member-functions.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/special-member-functions.rst @@ -16,7 +16,7 @@ Note that defining a function with ``= delete`` is considered to be a definition. This check implements `C.21 -`_ +`_ from the C++ Core Guidelines. Options diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/use-default-member-init.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/use-default-member-init.rst index e785f3133e200..55661c003c253 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/use-default-member-init.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/use-default-member-init.rst @@ -5,7 +5,7 @@ cppcoreguidelines-use-default-member-init ========================================= -This check implements `C.48 `_ +This check implements `C.48 `_ from the C++ Core Guidelines. The `cppcoreguidelines-use-default-member-init` check is an alias, please see diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/use-enum-class.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/use-enum-class.rst index 9e9f4c99dc240..9358996a36eba 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/use-enum-class.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/use-enum-class.rst @@ -7,7 +7,7 @@ Finds unscoped (non-class) ``enum`` declarations and suggests using ``enum class`` instead. This check implements `Enum.3 -`_ +`_ from the C++ Core Guidelines." Example: diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/virtual-class-destructor.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/virtual-class-destructor.rst index 752fd81359096..80932c416fe01 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/virtual-class-destructor.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/virtual-class-destructor.rst @@ -8,7 +8,7 @@ nor protected and non-virtual. A virtual class's destructor should be specified in one of these ways to prevent undefined behavior. This check implements -`C.35 `_ +`C.35 `_ from the C++ Core Guidelines. Note that this check will diagnose a class with a virtual method regardless of diff --git a/clang-tools-extra/docs/clang-tidy/checks/misc/use-internal-linkage.rst b/clang-tools-extra/docs/clang-tidy/checks/misc/use-internal-linkage.rst index 508b0cac09a91..224ad21ecc5c3 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/misc/use-internal-linkage.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/misc/use-internal-linkage.rst @@ -41,8 +41,8 @@ Options Selects what kind of a fix the check should provide. The default is `UseStatic`. - ``None`` + - `None` Don't fix automatically. - ``UseStatic`` + - `UseStatic` Add ``static`` for internal linkage variable and function. diff --git a/clang-tools-extra/docs/clang-tidy/checks/readability/magic-numbers.rst b/clang-tools-extra/docs/clang-tidy/checks/readability/magic-numbers.rst index 0b2d819264daa..55d47d568dcdc 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/readability/magic-numbers.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/readability/magic-numbers.rst @@ -9,7 +9,7 @@ code and not introduced via constants or symbols. Many coding guidelines advise replacing the magic values with symbolic constants to improve readability. Here are a few references: - * `Rule ES.45: Avoid "magic constants"; use symbolic constants in C++ Core Guidelines `_ + * `Rule ES.45: Avoid "magic constants"; use symbolic constants in C++ Core Guidelines `_ * `Rule 5.1.1 Use symbolic names instead of literal values in code in High Integrity C++ `_ * Item 17 in "C++ Coding Standards: 101 Rules, Guidelines and Best Practices" by Herb Sutter and Andrei Alexandrescu diff --git a/clang-tools-extra/test/clang-tidy/check_clang_tidy.py b/clang-tools-extra/test/clang-tidy/check_clang_tidy.py index 183b33f135be8..b173ecf4fbdca 100755 --- a/clang-tools-extra/test/clang-tidy/check_clang_tidy.py +++ b/clang-tools-extra/test/clang-tidy/check_clang_tidy.py @@ -398,6 +398,8 @@ def parse_arguments() -> Tuple[argparse.Namespace, List[str]]: def main() -> None: + sys.stdout.reconfigure(encoding="utf-8") + sys.stderr.reconfigure(encoding="utf-8") args, extra_args = parse_arguments() abbreviated_stds = args.std diff --git a/clang-tools-extra/test/clang-tidy/checkers/modernize/use-std-print.cpp b/clang-tools-extra/test/clang-tidy/checkers/modernize/use-std-print.cpp index ec37f077df7fc..63972cc0fd25e 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/modernize/use-std-print.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/modernize/use-std-print.cpp @@ -54,6 +54,12 @@ void printf_deceptive_newline() { // CHECK-FIXES: std::println("Hello"); } +void printf_utf8_text() { + printf("你好世界\n"); + // CHECK-MESSAGES: [[@LINE-1]]:3: warning: use 'std::println' instead of 'printf' [modernize-use-std-print] + // CHECK-FIXES: std::println("你好世界"); +} + void printf_crlf_newline() { printf("Hello\r\n"); // CHECK-MESSAGES: [[@LINE-1]]:3: warning: use 'std::print' instead of 'printf' [modernize-use-std-print] @@ -303,6 +309,12 @@ void fprintf_simple() { // CHECK-FIXES: std::print(stderr, "Hello"); } +void fprintf_utf8_text() { + fprintf(stderr, "你好世界\n"); + // CHECK-MESSAGES: [[@LINE-1]]:3: warning: use 'std::println' instead of 'fprintf' [modernize-use-std-print] + // CHECK-FIXES: std::println(stderr, "你好世界"); +} + void std_printf_simple() { std::printf("std::Hello"); // CHECK-MESSAGES: [[@LINE-1]]:3: warning: use 'std::print' instead of 'printf' [modernize-use-std-print] diff --git a/clang/cmake/caches/Fuchsia-stage2.cmake b/clang/cmake/caches/Fuchsia-stage2.cmake index be3d0cfa2e657..9df14d444eed6 100644 --- a/clang/cmake/caches/Fuchsia-stage2.cmake +++ b/clang/cmake/caches/Fuchsia-stage2.cmake @@ -58,7 +58,7 @@ set(CMAKE_CXX_VISIBILITY_PRESET default CACHE STRING "") set(CMAKE_BUILD_TYPE Release CACHE STRING "") if (APPLE) - set(CMAKE_OSX_DEPLOYMENT_TARGET "10.13" CACHE STRING "") + set(CMAKE_OSX_DEPLOYMENT_TARGET "11.0" CACHE STRING "") elseif(WIN32) set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded" CACHE STRING "") endif() @@ -83,7 +83,7 @@ if(APPLE) set(LIBCXX_ENABLE_STATIC_ABI_LIBRARY ON CACHE BOOL "") set(LIBCXX_HARDENING_MODE "none" CACHE STRING "") set(LIBCXX_USE_COMPILER_RT ON CACHE BOOL "") - set(RUNTIMES_CMAKE_ARGS "-DCMAKE_OSX_DEPLOYMENT_TARGET=10.13;-DCMAKE_OSX_ARCHITECTURES=arm64|x86_64" CACHE STRING "") + set(RUNTIMES_CMAKE_ARGS "-DCMAKE_OSX_DEPLOYMENT_TARGET=11.0;-DCMAKE_OSX_ARCHITECTURES=arm64|x86_64" CACHE STRING "") endif() if(WIN32 OR LLVM_WINSYSROOT) diff --git a/clang/cmake/caches/Fuchsia.cmake b/clang/cmake/caches/Fuchsia.cmake index 46ae7c603f67a..83ff4ccc167d1 100644 --- a/clang/cmake/caches/Fuchsia.cmake +++ b/clang/cmake/caches/Fuchsia.cmake @@ -97,7 +97,7 @@ set(LLVM_ENABLE_ASSERTIONS ON CACHE BOOL "") set(LLVM_ENABLE_BACKTRACES ON CACHE BOOL "") set(CMAKE_BUILD_TYPE Release CACHE STRING "") if(APPLE) - set(CMAKE_OSX_DEPLOYMENT_TARGET "10.13" CACHE STRING "") + set(CMAKE_OSX_DEPLOYMENT_TARGET "11.0" CACHE STRING "") elseif(WIN32) set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded" CACHE STRING "") endif() @@ -140,7 +140,7 @@ else() set(SANITIZER_TEST_CXX "libc++" CACHE STRING "") set(SANITIZER_TEST_CXX_INTREE ON CACHE BOOL "") set(LLVM_ENABLE_RUNTIMES "compiler-rt;libcxx;libcxxabi;libunwind" CACHE STRING "") - set(RUNTIMES_CMAKE_ARGS "-DCMAKE_OSX_DEPLOYMENT_TARGET=10.13;-DCMAKE_OSX_ARCHITECTURES=arm64|x86_64" CACHE STRING "") + set(RUNTIMES_CMAKE_ARGS "-DCMAKE_OSX_DEPLOYMENT_TARGET=11.0;-DCMAKE_OSX_ARCHITECTURES=arm64|x86_64" CACHE STRING "") endif() if(BOOTSTRAP_CMAKE_SYSTEM_NAME) diff --git a/clang/docs/ClangFormatStyleOptions.rst b/clang/docs/ClangFormatStyleOptions.rst index 94d6f0d27619f..4f81a084dd65b 100644 --- a/clang/docs/ClangFormatStyleOptions.rst +++ b/clang/docs/ClangFormatStyleOptions.rst @@ -4765,9 +4765,21 @@ the configuration (without a prefix: ``Auto``). Decimal: 3 Hex: -1 - You can also specify a minimum number of digits (``BinaryMinDigits``, - ``DecimalMinDigits``, and ``HexMinDigits``) the integer literal must - have in order for the separators to be inserted. + You can also specify a minimum number of digits + (``BinaryMinDigitsInsert``, ``DecimalMinDigitsInsert``, and + ``HexMinDigitsInsert``) the integer literal must have in order for the + separators to be inserted, and a maximum number of digits + (``BinaryMaxDigitsRemove``, ``DecimalMaxDigitsRemove``, and + ``HexMaxDigitsRemove``) until the separators are removed. This divides the + literals in 3 regions, always without separator (up until including + ``xxxMaxDigitsRemove``), maybe with, or without separators (up until + excluding ``xxxMinDigitsInsert``), and finally always with separators. + + .. note:: + + ``BinaryMinDigits``, ``DecimalMinDigits``, and ``HexMinDigits`` are + deprecated and renamed to ``BinaryMinDigitsInsert``, + ``DecimalMinDigitsInsert``, and ``HexMinDigitsInsert``, respectively. * ``int8_t Binary`` Format separators in binary literals. @@ -4778,15 +4790,28 @@ the configuration (without a prefix: ``Auto``). /* 3: */ b = 0b100'111'101'101; /* 4: */ b = 0b1001'1110'1101; - * ``int8_t BinaryMinDigits`` Format separators in binary literals with a minimum number of digits. + * ``int8_t BinaryMinDigitsInsert`` Format separators in binary literals with a minimum number of digits. .. code-block:: text // Binary: 3 - // BinaryMinDigits: 7 + // BinaryMinDigitsInsert: 7 b1 = 0b101101; b2 = 0b1'101'101; + * ``int8_t BinaryMaxDigitsRemove`` Remove separators in binary literals with a maximum number of digits. + + .. code-block:: text + + // Binary: 3 + // BinaryMinDigitsInsert: 7 + // BinaryMaxDigitsRemove: 4 + b0 = 0b1011; // Always removed. + b1 = 0b101101; // Not added. + b2 = 0b1'01'101; // Not removed, not corrected. + b3 = 0b1'101'101; // Always added. + b4 = 0b10'1101; // Corrected to 0b101'101. + * ``int8_t Decimal`` Format separators in decimal literals. .. code-block:: text @@ -4795,15 +4820,28 @@ the configuration (without a prefix: ``Auto``). /* 0: */ d = 184467'440737'0'95505'92ull; /* 3: */ d = 18'446'744'073'709'550'592ull; - * ``int8_t DecimalMinDigits`` Format separators in decimal literals with a minimum number of digits. + * ``int8_t DecimalMinDigitsInsert`` Format separators in decimal literals with a minimum number of digits. .. code-block:: text // Decimal: 3 - // DecimalMinDigits: 5 + // DecimalMinDigitsInsert: 5 d1 = 2023; d2 = 10'000; + * ``int8_t DecimalMaxDigitsRemove`` Remove separators in decimal literals with a maximum number of digits. + + .. code-block:: text + + // Decimal: 3 + // DecimalMinDigitsInsert: 7 + // DecimalMaxDigitsRemove: 4 + d0 = 2023; // Always removed. + d1 = 123456; // Not added. + d2 = 1'23'456; // Not removed, not corrected. + d3 = 5'000'000; // Always added. + d4 = 1'23'45; // Corrected to 12'345. + * ``int8_t Hex`` Format separators in hexadecimal literals. .. code-block:: text @@ -4812,16 +4850,30 @@ the configuration (without a prefix: ``Auto``). /* 0: */ h = 0xDEAD'BEEF'DE'AD'BEE'Fuz; /* 2: */ h = 0xDE'AD'BE'EF'DE'AD'BE'EFuz; - * ``int8_t HexMinDigits`` Format separators in hexadecimal literals with a minimum number of + * ``int8_t HexMinDigitsInsert`` Format separators in hexadecimal literals with a minimum number of digits. .. code-block:: text // Hex: 2 - // HexMinDigits: 6 + // HexMinDigitsInsert: 6 h1 = 0xABCDE; h2 = 0xAB'CD'EF; + * ``int8_t HexMaxDigitsRemove`` Remove separators in hexadecimal literals with a maximum number of + digits. + + .. code-block:: text + + // Hex: 2 + // HexMinDigitsInsert: 6 + // HexMaxDigitsRemove: 4 + h0 = 0xAFFE; // Always removed. + h1 = 0xABCDE; // Not added. + h2 = 0xABC'DE; // Not removed, not corrected. + h3 = 0xAB'CD'EF; // Always added. + h4 = 0xABCD'E; // Corrected to 0xA'BC'DE. + .. _JavaImportGroups: diff --git a/clang/docs/ClangStaticAnalyzer.rst b/clang/docs/ClangStaticAnalyzer.rst index 7a309dc4acd91..9ab485f6800f8 100644 --- a/clang/docs/ClangStaticAnalyzer.rst +++ b/clang/docs/ClangStaticAnalyzer.rst @@ -5,9 +5,9 @@ Clang Static Analyzer The Clang Static Analyzer is a source code analysis tool that finds bugs in C, C++, and Objective-C programs. It implements *path-sensitive*, *inter-procedural analysis* based on *symbolic execution* technique. -This is the Static Analyzer documentation page. +The Static Analyzer is a part of Clang; for downloading and installing Clang visit the `LLVM releases page `_. -See the `Official Tool Page `_. +This is the documentation page of the Static Analyzer; there is also an old `Official Tool Page `_ which provides a short overview of features and limitations. .. toctree:: :caption: Table of Contents diff --git a/clang/docs/HIPSupport.rst b/clang/docs/HIPSupport.rst index 92ea07974373e..6415bc8f248b2 100644 --- a/clang/docs/HIPSupport.rst +++ b/clang/docs/HIPSupport.rst @@ -376,6 +376,43 @@ Example Usage basePtr->virtualFunction(); // Allowed since obj is constructed in device code } +Alias Attribute Support +======================= + +Clang supports alias attributes in HIP code, allowing creation of alternative names for functions and variables. + - Aliases work with ``__host__``, ``__device__``, and ``__host__ __device__`` functions and variables. + - The alias attribute uses the syntax ``__attribute__((alias("target_name")))``. Both weak and strong aliases are supported. + - Outside of ``extern "C"``, the alias target must use the mangled name of the aliasee + - The alias is only emitted if the aliasee is emitted on the same side (ie __host__ or __device__), otherwise it is ignored. + +Example Usage +------------- + +.. code-block:: c++ + + extern "C" { + // Host function alias + int __HostFunc(void) { return 0; } + int HostFunc(void) __attribute__((weak, alias("__HostFunc"))); + + // Device function alias + __device__ int __DeviceFunc(void) { return 1; } + __device__ int DeviceFunc(void) __attribute__((weak, alias("__DeviceFunc"))); + + // Host-device function alias + __host__ __device__ int __BothFunc(void) { return 2; } + __host__ __device__ int BothFunc(void) __attribute__((alias("__BothFunc"))); + + // Variable alias + int __host_var = 3; + extern int __attribute__((weak, alias("__host_var"))) host_var; + } + // Mangled / overload alias + __host__ __device__ float __Four(float f) { return 2.0f * f; } + __host__ __device__ int Four(void) __attribute__((weak, alias("_Z6__Fourv"))); + __host__ __device__ float Four(float f) __attribute__((weak, alias("_Z6__Fourf"))); + + Host and Device Attributes of Default Destructors =================================================== diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst index a3db3e5d356b3..80cea2166bc83 100644 --- a/clang/docs/LanguageExtensions.rst +++ b/clang/docs/LanguageExtensions.rst @@ -4854,6 +4854,14 @@ memory scope argument. These are designed to be a generic alternative to the ``__opencl_atomic_*`` builtin functions for targets that support atomic memory scopes. +Clang provides two additional __scoped_atomic builtins: + +* ``__scoped_atomic_uinc_wrap`` +* ``__scoped_atomic_udec_wrap`` + +See LLVM IR `atomicrmw `_ +instruction for the semantics of uinc_wrap and udec_wrap. + Atomic memory scopes are designed to assist optimizations for systems with several levels of memory hierarchy like GPUs. The following memory scopes are currently supported: diff --git a/clang/docs/OpenMPSupport.rst b/clang/docs/OpenMPSupport.rst index f7e6061044c6d..e7ca7b0bd0792 100644 --- a/clang/docs/OpenMPSupport.rst +++ b/clang/docs/OpenMPSupport.rst @@ -580,6 +580,8 @@ implementation. | need_device_addr modifier for adjust_args clause | :part:`partial` | :none:`unclaimed` | Parsing/Sema: https://github.com/llvm/llvm-project/pull/143442 | | | | | https://github.com/llvm/llvm-project/pull/149586 | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ +| need_device_ptr modifier for adjust_args clause | :part:`unclaimed` | :none:`unclaimed` | | ++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ | Prescriptive num_threads | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/160659 | | | | | https://github.com/llvm/llvm-project/pull/146403 | | | | | https://github.com/llvm/llvm-project/pull/146404 | @@ -631,7 +633,9 @@ implementation. | | | | RT: @abhinavgaba (https://github.com/llvm/llvm-project/pull/149036, | | | | | https://github.com/llvm/llvm-project/pull/158370) | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ - +| need_device_ptr modifier for adjust_args clause | :part:`partial` | :none:`unclaimed` | Clang Parsing/Sema: https://github.com/llvm/llvm-project/pull/168905 | +| | | | https://github.com/llvm/llvm-project/pull/169558 | ++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ OpenMP Extensions ================= diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index 51f07256c5d9f..da064534c25d9 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -84,6 +84,8 @@ Potentially Breaking Changes - Downstream projects that previously linked only against ``clangDriver`` may now (also) need to link against the new ``clangOptions`` library, since options-related code has been moved out of the Driver into a separate library. +- The ``clangFrontend`` library no longer depends on ``clangDriver``, which may + break downstream projects that relied on this transitive dependency. C/C++ Language Potentially Breaking Changes ------------------------------------------- @@ -230,6 +232,8 @@ C23 Feature Support Non-comprehensive list of changes in this release ------------------------------------------------- +- Added ``__scoped_atomic_uinc_wrap`` and ``__scoped_atomic_udec_wrap``. + - Removed OpenCL header-only feature macros (previously unconditionally enabled on SPIR-V and only selectively disabled via ``-D__undef_``). All OpenCL extensions and features are now centralized in OpenCLExtensions.def, @@ -392,6 +396,7 @@ Improvements to Clang's diagnostics - Fixed false positives in ``-Waddress-of-packed-member`` diagnostics when potential misaligned members get processed before they can get discarded. (#GH144729) +- Fix a false positive warning in ``-Wignored-qualifiers`` when the return type is undeduced. (#GH43054) - Clang now emits a diagnostic with the correct message in case of assigning to const reference captured in lambda. (#GH105647) @@ -513,6 +518,8 @@ Bug Fixes to Attribute Support - Fix handling of parameter indexes when an attribute is applied to a C++23 explicit object member function. - Fixed several false positives and false negatives in function effect (`nonblocking`) analysis. (#GH166078) (#GH166101) (#GH166110) - Fix ``cleanup`` attribute by delaying type checks until after the type is deduced. (#GH129631) +- Fix a crash when instantiating a function template with ``constructor`` or ``destructor`` + attributes without a priority argument. (#GH169072) Bug Fixes to C++ Support ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -699,6 +706,9 @@ clang-format ``AlignAfterOpenBracket`` option, and make ``AlignAfterOpenBracket`` a ``bool`` type. - Add ``AlignPPAndNotPP`` suboption to ``AlignTrailingComments``. +- Rename ``(Binary|Decimal|Hex)MinDigits`` to ``...MinDigitsInsert`` and add + ``(Binary|Decimal|Hex)MaxDigitsSeparator`` suboptions to + ``IntegerLiteralSeparator``. libclang -------- @@ -759,6 +769,9 @@ OpenMP Support - Updated parsing and semantic analysis support for ``nowait`` clause to accept optional argument in OpenMP >= 60. - Added support for ``default`` clause on ``target`` directive. +- Added parsing and semantic analysis support for ``need_device_ptr`` modifier + to accept an optional fallback argument (``fb_nullify`` or ``fb_preserve``) + with OpenMP >= 61. Improvements ^^^^^^^^^^^^ diff --git a/clang/docs/analyzer/user-docs.rst b/clang/docs/analyzer/user-docs.rst index 67c1dfaa40965..1ea193b1a651b 100644 --- a/clang/docs/analyzer/user-docs.rst +++ b/clang/docs/analyzer/user-docs.rst @@ -6,7 +6,6 @@ Contents: .. toctree:: :maxdepth: 2 - user-docs/Installation user-docs/CommandLineUsage user-docs/Options user-docs/UsingWithXCode diff --git a/clang/docs/analyzer/user-docs/CommandLineUsage.rst b/clang/docs/analyzer/user-docs/CommandLineUsage.rst index 0252de80b788f..089a8ce68ce0a 100644 --- a/clang/docs/analyzer/user-docs/CommandLineUsage.rst +++ b/clang/docs/analyzer/user-docs/CommandLineUsage.rst @@ -16,18 +16,19 @@ It is possible, however, to invoke the static analyzer from the command line in The following tools are used commonly to run the analyzer from the command line. Both tools are wrapper scripts to drive the analysis and the underlying invocations of the Clang compiler: -1. scan-build_ is an old and simple command line tool that emits static analyzer warnings as HTML files while compiling your project. You can view the analysis results in your web browser. +1. scan-build_ is an old and simple command line tool that emits static analyzer warnings as HTML files while compiling your project. You can view the analysis results in your web browser; the utility script ``scan-view`` can provide a trivial HTTP server that servers these result files. + - Is available as a part of the LLVM project (together with ``scan-view``). - Useful for individual developers who simply want to view static analysis results at their desk, or in a very simple collaborative environment. - Works on all major platforms (Windows, Linux, macOS) and is available as a package in many Linux distributions. - Does not include support for cross-translation-unit analysis. 2. CodeChecker_ is a driver and web server that runs the static analyzer on your projects on demand and maintains a database of issues. + - Open source, but out-of-tree, i.e. not part of the LLVM project. - Perfect for managing large amounts of thee static analyzer warnings in a collaborative environment. - Generally much more feature-rich than scan-build. - Supports incremental analysis: Results can be stored in a database, subsequent analysis runs can be compared to list the newly added defects. - :doc:`CrossTranslationUnit` is supported fully on Linux via CodeChecker. - - Can run clang-tidy checkers too. - - Open source, but out-of-tree, i.e. not part of the LLVM project. + - Can also run clang-tidy checks and various other analysis tools. scan-build ---------- diff --git a/clang/docs/analyzer/user-docs/Installation.rst b/clang/docs/analyzer/user-docs/Installation.rst index d84007328e5dc..8d2b3aca6e474 100644 --- a/clang/docs/analyzer/user-docs/Installation.rst +++ b/clang/docs/analyzer/user-docs/Installation.rst @@ -1,37 +1,6 @@ +:orphan: + Obtaining the Static Analyzer ============================= -This page describes how to download and install the analyzer. Once the analyzer is installed, follow the :doc:`CommandLineUsage` on using the command line to get started analyzing your code. - -.. contents:: - :local: - - -Building the Analyzer from Source ---------------------------------- - -Currently there are no officially supported binary distributions for the static analyzer. -You must build Clang and LLVM manually. -To do so, please follow the instructions for `building Clang from source code `_. - -Once the Clang is built, you need to add the location of the ``clang`` binary and the locations of the command line utilities (`CodeChecker` or ``scan-build`` and ``scan-view``) to you PATH for :doc:`CommandLineUsage`. - -[Legacy] Packaged Builds (Mac OS X) ------------------------------------ - -Semi-regular pre-built binaries of the analyzer used to be available on Mac OS X. These were built to run on OS X 10.7 and later. - -For older builds for MacOS visit https://clang-analyzer.llvm.org/release_notes.html. - -Packaged builds for other platforms may eventually be provided, but we need volunteers who are willing to help provide such regular builds. If you wish to help contribute regular builds of the analyzer on other platforms, please get in touch via `LLVM Discourse `_. - -[Legacy] Using Packaged Builds ------------------------------- - -To use the legacy package builds, simply unpack it anywhere. If the build archive has the name **``checker-XXX.tar.bz2``** then the archive will expand to a directory called **``checker-XXX``**. You do not need to place this directory or the contents of this directory in any special place. Uninstalling the analyzer is as simple as deleting this directory. - -Most of the files in the **``checker-XXX``** directory will be supporting files for the analyzer that you can simply ignore. Most users will only care about two files, which are located at the top of the **``checker-XXX``** directory: - -* **scan-build**: ``scan-build`` is the high-level command line utility for running the analyzer -* **scan-view**: ``scan-view`` a companion command line utility to ``scan-build``, ``scan-view`` is used to view analysis results generated by ``scan-build``. There is an option that one can pass to ``scan-build`` to cause ``scan-view`` to run as soon as it the analysis of a build completes - +The Static Analyzer can be obtained as a part of Clang; for downloading and installing Clang visit the `LLVM releases page `_. Once the analyzer is installed, follow the :doc:`CommandLineUsage` on using the command line to get started analyzing your code. diff --git a/clang/include/clang/AST/CXXInheritance.h b/clang/include/clang/AST/CXXInheritance.h index e89326081a180..72d365bfbc1f3 100644 --- a/clang/include/clang/AST/CXXInheritance.h +++ b/clang/include/clang/AST/CXXInheritance.h @@ -192,7 +192,7 @@ class CXXBasePaths { /// Determine whether the path from the most-derived type to the /// given base type is ambiguous (i.e., it refers to multiple subobjects of /// the same base type). - bool isAmbiguous(CanQualType BaseType); + bool isAmbiguous(CanQualType BaseType) const; /// Whether we are finding multiple paths to detect ambiguities. bool isFindingAmbiguities() const { return FindAmbiguities; } diff --git a/clang/include/clang/Analysis/Analyses/LifetimeSafety/LifetimeAnnotations.h b/clang/include/clang/Analysis/Analyses/LifetimeSafety/LifetimeAnnotations.h index f02969e0a9563..1a16fb82f9a84 100644 --- a/clang/include/clang/Analysis/Analyses/LifetimeSafety/LifetimeAnnotations.h +++ b/clang/include/clang/Analysis/Analyses/LifetimeSafety/LifetimeAnnotations.h @@ -38,6 +38,11 @@ bool isAssignmentOperatorLifetimeBound(const CXXMethodDecl *CMD); /// method or because it's a normal assignment operator. bool implicitObjectParamIsLifetimeBound(const FunctionDecl *FD); +// Tells whether the type is annotated with [[gsl::Pointer]]. +bool isGslPointerType(QualType QT); +// Tells whether the type is annotated with [[gsl::Owner]]. +bool isGslOwnerType(QualType QT); + } // namespace clang::lifetimes #endif // LLVM_CLANG_ANALYSIS_ANALYSES_LIFETIMEANNOTATIONS_H diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td index 7530aebdcb581..6d6104a3ddb8d 100644 --- a/clang/include/clang/Basic/Builtins.td +++ b/clang/include/clang/Basic/Builtins.td @@ -2290,6 +2290,18 @@ def ScopedAtomicMaxFetch : AtomicBuiltin { let Prototype = "void(...)"; } +def ScopedAtomicUIncWrap : AtomicBuiltin { + let Spellings = ["__scoped_atomic_uinc_wrap"]; + let Attributes = [CustomTypeChecking]; + let Prototype = "void(...)"; +} + +def ScopedAtomicUDecWrap : AtomicBuiltin { + let Spellings = ["__scoped_atomic_udec_wrap"]; + let Attributes = [CustomTypeChecking]; + let Prototype = "void(...)"; +} + // OpenCL 2.0 atomic builtins. def OpenCLAtomicInit : AtomicBuiltin { let Spellings = ["__opencl_atomic_init"]; diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def index a3ded0f6a9983..8af6ce1528a45 100644 --- a/clang/include/clang/Basic/BuiltinsAMDGPU.def +++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def @@ -187,8 +187,8 @@ TARGET_BUILTIN(__builtin_amdgcn_raw_ptr_buffer_atomic_fmax_f32, "ffQbiiIi", "", TARGET_BUILTIN(__builtin_amdgcn_raw_ptr_buffer_atomic_fmin_f64, "ddQbiiIi", "", "atomic-fmin-fmax-global-f64") TARGET_BUILTIN(__builtin_amdgcn_raw_ptr_buffer_atomic_fmax_f64, "ddQbiiIi", "", "atomic-fmin-fmax-global-f64") -TARGET_BUILTIN(__builtin_amdgcn_raw_ptr_buffer_load_lds, "vQbv*3IUiiiIiIi", "t", "vmem-to-lds-load-insts") -TARGET_BUILTIN(__builtin_amdgcn_struct_ptr_buffer_load_lds, "vQbv*3IUiiiiIiIi", "t", "vmem-to-lds-load-insts") +TARGET_BUILTIN(__builtin_amdgcn_raw_ptr_buffer_load_lds, "vQbv*3IUiiiIiIi", "", "vmem-to-lds-load-insts") +TARGET_BUILTIN(__builtin_amdgcn_struct_ptr_buffer_load_lds, "vQbv*3IUiiiiIiIi", "", "vmem-to-lds-load-insts") //===----------------------------------------------------------------------===// // Ballot builtins. @@ -286,7 +286,7 @@ TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_v2bf16, "V2sV2s*1V2s", "t", " TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_v2bf16, "V2sV2s*3V2s", "t", "atomic-ds-pk-add-16-insts") TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_v2f16, "V2hV2h*3V2h", "t", "atomic-ds-pk-add-16-insts") TARGET_BUILTIN(__builtin_amdgcn_load_to_lds, "vv*v*3IUiIiIUi", "", "vmem-to-lds-load-insts") -TARGET_BUILTIN(__builtin_amdgcn_global_load_lds, "vv*1v*3IUiIiIUi", "t", "vmem-to-lds-load-insts") +TARGET_BUILTIN(__builtin_amdgcn_global_load_lds, "vv*1v*3IUiIiIUi", "", "vmem-to-lds-load-insts") //===----------------------------------------------------------------------===// // Deep learning builtins. diff --git a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td index 4aa3d51931980..98cea35beb0ea 100644 --- a/clang/include/clang/Basic/BuiltinsX86.td +++ b/clang/include/clang/Basic/BuiltinsX86.td @@ -156,8 +156,6 @@ let Features = "sse", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in def rcpss : X86Builtin<"_Vector<4, float>(_Vector<4, float>)">; def rsqrtps : X86Builtin<"_Vector<4, float>(_Vector<4, float>)">; def rsqrtss : X86Builtin<"_Vector<4, float>(_Vector<4, float>)">; - def sqrtps : X86Builtin<"_Vector<4, float>(_Vector<4, float>)">; - def sqrtss : X86Builtin<"_Vector<4, float>(_Vector<4, float>)">; } let Features = "sse2", Attributes = [NoThrow, RequiredVectorWidth<128>] in { @@ -170,8 +168,6 @@ let Features = "sse2", Attributes = [NoThrow] in { let Features = "sse2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { def psadbw128 : X86Builtin<"_Vector<2, long long int>(_Vector<16, char>, _Vector<16, char>)">; - def sqrtpd : X86Builtin<"_Vector<2, double>(_Vector<2, double>)">; - def sqrtsd : X86Builtin<"_Vector<2, double>(_Vector<2, double>)">; def cvtpd2dq : X86Builtin<"_Vector<2, long long int>(_Vector<2, double>)">; def cvtpd2ps : X86Builtin<"_Vector<4, float>(_Vector<2, double>)">; def cvttpd2dq : X86Builtin<"_Vector<4, int>(_Vector<2, double>)">; @@ -214,17 +210,6 @@ let Header = "emmintrin.h", Attributes = [NoThrow, RequireDeclaration] in { def _mm_pause : X86LibBuiltin<"void()">; } -let Features = "sse2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { - def psraw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">; - def psrad128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">; - def psrlw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">; - def psrld128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">; - def psrlq128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>)">; - def psllw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">; - def pslld128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">; - def psllq128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>)">; -} - let Features = "sse2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in { def movmskpd : X86Builtin<"int(_Vector<2, double>)">; def pmovmskb128 : X86Builtin<"int(_Vector<16, char>)">; @@ -265,6 +250,15 @@ let Features = "sse2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWi def vec_ext_v4sf : X86Builtin<"float(_Vector<4, float>, _Constant int)">; def vec_ext_v8hi : X86Builtin<"short(_Vector<8, short>, _Constant int)">; def vec_set_v8hi : X86Builtin<"_Vector<8, short>(_Vector<8, short>, short, _Constant int)">; + + def psraw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">; + def psrad128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">; + def psrlw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">; + def psrld128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">; + def psrlq128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>)">; + def psllw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">; + def pslld128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">; + def psllq128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>)">; } let Features = "sse3", Attributes = [NoThrow] in { @@ -513,8 +507,6 @@ let Features = "avx", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWid } let Features = "avx", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { - def sqrtpd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>)">; - def sqrtps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>)">; def rsqrtps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>)">; def rcpps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>)">; def roundpd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, _Constant int)">; @@ -585,14 +577,6 @@ let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] i def psadbw256 : X86Builtin< "_Vector<4, long long int>(_Vector<32, char>, _Vector<32, char>)">; - def psllw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<8, short>)">; - def pslld256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<4, int>)">; - def psllq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<2, long long int>)">; - def psraw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<8, short>)">; - def psrad256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<4, int>)">; - def psrlw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<8, short>)">; - def psrld256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<4, int>)">; - def psrlq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<2, long long int>)">; def permdf256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, _Constant int)">; def permti256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<4, long long int>, _Constant int)">; def permdi256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Constant int)">; @@ -669,6 +653,15 @@ let Features = "avx2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWi def permvarsi256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>)">; def permvarsf256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, _Vector<8, int>)">; + + def psllw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<8, short>)">; + def pslld256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<4, int>)">; + def psllq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<2, long long int>)">; + def psraw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<8, short>)">; + def psrad256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<4, int>)">; + def psrlw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<8, short>)">; + def psrld256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<4, int>)">; + def psrlq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<2, long long int>)">; } let Features = "avx2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in { @@ -716,11 +709,13 @@ let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<128>] in { def gatherq_d : X86Builtin<"_Vector<4, int>(_Vector<4, int>, int const *, _Vector<2, long long int>, _Vector<4, int>, _Constant char)">; } -let Features = "f16c", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { +let Features = "f16c", + Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in { def vcvtps2ph : X86Builtin<"_Vector<8, short>(_Vector<4, float>, _Constant int)">; } -let Features = "f16c", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { +let Features = "f16c", + Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in { def vcvtps2ph256 : X86Builtin<"_Vector<8, short>(_Vector<8, float>, _Constant int)">; } @@ -1930,16 +1925,13 @@ let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVect def prorq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Constant int)">; } -let Features = "avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in { - def psllw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<8, short>)">; -} - let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in { def pmaddubsw512 : X86Builtin<"_Vector<32, short>(_Vector<64, char>, _Vector<64, char>)">; def pmaddwd512 : X86Builtin<"_Vector<16, int>(_Vector<32, short>, _Vector<32, short>)">; def psllv32hi : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">; def pshufhw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Constant int)">; def pshuflw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Constant int)">; + def psllw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<8, short>)">; } let Features = "avx512bw,avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in { @@ -1995,7 +1987,7 @@ let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVect def psravq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<4, long long int>)">; } -let Features = "avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in { +let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in { def psraw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<8, short>)">; def psrlw512 @@ -2312,25 +2304,17 @@ let Features = "avx512f", def psraqi512 : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, int)">; } -let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { +let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in { def psraq128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>)">; -} - -let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { - def psraq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<2, long long int>)">; -} - -let Features = "avx512vl", - Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in { def psraqi128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, int)">; } -let Features = "avx512vl", - Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in { +let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in { + def psraq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<2, long long int>)">; def psraqi256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, int)">; } -let Features = "avx512f", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in { +let Features = "avx512f", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in { def pslld512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<4, int>)">; def psllq512 : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, _Vector<2, long long int>)">; def psrad512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<4, int>)">; @@ -2371,7 +2355,8 @@ let Features = "avx512vl", def pternlogq256_maskz : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<4, long long int>, _Vector<4, long long int>, _Constant int, unsigned char)">; } -let Features = "avx512f", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in { +let Features = "avx512f", + Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in { def shuf_f32x4 : X86Builtin<"_Vector<16, float>(_Vector<16, float>, _Vector<16, float>, _Constant int)">; def shuf_f64x2 : X86Builtin<"_Vector<8, double>(_Vector<8, double>, _Vector<8, double>, _Constant int)">; def shuf_i32x4 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<16, int>, _Constant int)">; @@ -2391,7 +2376,8 @@ let Features = "avx512f", Attributes = [NoThrow, Const, Constexpr, RequiredVecto : X86Builtin<"_Vector<16, float>(_Vector<16, float>, _Vector<16, int>)">; } -let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { +let Features = "avx512vl", + Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in { def shuf_f32x4_256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, _Vector<8, float>, _Constant int)">; def shuf_f64x2_256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, _Vector<4, double>, _Constant int)">; def shuf_i32x4_256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>, _Constant int)">; @@ -3308,15 +3294,15 @@ let Features = "avx512f", Attributes = [NoThrow, Const, RequiredVectorWidth<128> def cvtusi2ss32 : X86Builtin<"_Vector<4, float>(_Vector<4, float>, unsigned int, _Constant int)">; } -let Features = "avx512vbmi", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in { +let Features = "avx512vbmi", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in { def vpmultishiftqb512 : X86Builtin<"_Vector<64, char>(_Vector<64, char>, _Vector<64, char>)">; } -let Features = "avx512vbmi,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { +let Features = "avx512vbmi,avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in { def vpmultishiftqb128 : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Vector<16, char>)">; } -let Features = "avx512vbmi,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { +let Features = "avx512vbmi,avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in { def vpmultishiftqb256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Vector<32, char>)">; } @@ -3356,10 +3342,6 @@ let Features = "avx512bf16", Attributes = [NoThrow, Const, RequiredVectorWidth<5 def dpbf16ps_512 : X86Builtin<"_Vector<16, float>(_Vector<16, float>, _Vector<32, __bf16>, _Vector<32, __bf16>)">; } -let Features = "avx512bf16", Attributes = [NoThrow, Const] in { - def cvtsbf162ss_32 : X86Builtin<"float(__bf16)">; -} - let Features = "avx512vp2intersect", Attributes = [NoThrow, RequiredVectorWidth<512>] in { def vp2intersect_q_512 : X86Builtin<"void(_Vector<8, long long int>, _Vector<8, long long int>, unsigned char *, unsigned char *)">; } @@ -3537,14 +3519,6 @@ let Features = "avx512fp16", Attributes = [NoThrow, Const, RequiredVectorWidth<1 def reducesh_mask : X86Builtin<"_Vector<8, _Float16>(_Vector<8, _Float16>, _Vector<8, _Float16>, _Vector<8, _Float16>, unsigned char, _Constant int, _Constant int)">; } -let Features = "avx512fp16,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { - def sqrtph : X86Builtin<"_Vector<8, _Float16>(_Vector<8, _Float16>)">; -} - -let Features = "avx512fp16,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { - def sqrtph256 : X86Builtin<"_Vector<16, _Float16>(_Vector<16, _Float16>)">; -} - let Features = "avx512fp16", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in { def sqrtph512 : X86Builtin<"_Vector<32, _Float16>(_Vector<32, _Float16>, _Constant int)">; } @@ -5063,15 +5037,3 @@ let Features = "avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<256> let Features = "avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in { def vgetmantbf16512_mask : X86Builtin<"_Vector<32, __bf16>(_Vector<32, __bf16>, _Constant int, _Vector<32, __bf16>, unsigned int)">; } - -let Features = "avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { - def vsqrtbf16 : X86Builtin<"_Vector<8, __bf16>(_Vector<8, __bf16>)">; -} - -let Features = "avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { - def vsqrtbf16256 : X86Builtin<"_Vector<16, __bf16>(_Vector<16, __bf16>)">; -} - -let Features = "avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in { - def vsqrtbf16512 : X86Builtin<"_Vector<32, __bf16>(_Vector<32, __bf16>)">; -} diff --git a/clang/include/clang/Basic/DebugOptions.def b/clang/include/clang/Basic/DebugOptions.def index ea3636ffa1af1..34f5a313947a4 100644 --- a/clang/include/clang/Basic/DebugOptions.def +++ b/clang/include/clang/Basic/DebugOptions.def @@ -65,6 +65,9 @@ DEBUGOPT(DebugKeyInstructions, 1, 0, Benign) DEBUGOPT(DebugColumnInfo, 1, 0, Compatible) ///< Whether or not to use column information ///< in debug info. +/// Whether or not to include call site information in debug info. +DEBUGOPT(DebugCallSiteInfo, 1, 1, Benign) + DEBUGOPT(DebugTypeExtRefs, 1, 0, Compatible) ///< Whether or not debug info should contain ///< external references to a PCH or module. diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td index f262db55a0d92..aeffe96e806bd 100644 --- a/clang/include/clang/Basic/DiagnosticDriverKinds.td +++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td @@ -851,9 +851,6 @@ def warn_drv_sarif_format_unstable : Warning< "diagnostic formatting in SARIF mode is currently unstable">, InGroup>; -def err_drv_riscv_unsupported_with_linker_relaxation : Error< - "%0 is unsupported with RISC-V linker relaxation (-mrelax)">; - def warn_drv_loongarch_conflicting_implied_val : Warning< "ignoring '%0' as it conflicts with that implied by '%1' (%2)">, InGroup; diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td index 53aa86a7dabde..4a145fd71eedd 100644 --- a/clang/include/clang/Basic/DiagnosticSemaKinds.td +++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td @@ -8661,6 +8661,8 @@ def err_conditional_vector_size : Error< def err_conditional_vector_element_size : Error< "vector condition type %0 and result type %1 do not have elements of the " "same size">; +def err_conditional_vector_scalar_type_unsupported : Error< + "scalar type %0 not supported with vector condition type %1">; def err_conditional_vector_has_void : Error< "GNU vector conditional operand cannot be %select{void|a throw expression}0">; def err_conditional_vector_operand_type diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td index d2b7b78b9970f..8dc40a665bd9a 100644 --- a/clang/include/clang/Basic/arm_sve.td +++ b/clang/include/clang/Basic/arm_sve.td @@ -993,7 +993,7 @@ def SVDUPQ_LANE : SInst<"svdupq_lane[_{d}]", "ddn", "csilUcUsUiUlhfdb", MergeNo def SVEXT : SInst<"svext[_{d}]", "dddi", "csilUcUsUiUlhfdb", MergeNone, "aarch64_sve_ext", [VerifyRuntimeMode], [ImmCheck<2, ImmCheckExtract, 1>]>; defm SVLASTA : SVEPerm<"svlasta[_{d}]", "sPd", "aarch64_sve_lasta">; defm SVLASTB : SVEPerm<"svlastb[_{d}]", "sPd", "aarch64_sve_lastb">; -def SVREV : SInst<"svrev[_{d}]", "dd", "csilUcUsUiUlhfdb", MergeNone, "aarch64_sve_rev", [VerifyRuntimeMode]>; +def SVREV : SInst<"svrev[_{d}]", "dd", "csilUcUsUiUlhfdb", MergeNone, "vector_reverse", [VerifyRuntimeMode]>; def SVSEL : SInst<"svsel[_{d}]", "dPdd", "csilUcUsUiUlhfdb", MergeNone, "aarch64_sve_sel", [VerifyRuntimeMode]>; def SVSPLICE : SInst<"svsplice[_{d}]", "dPdd", "csilUcUsUiUlhfdb", MergeNone, "aarch64_sve_splice", [VerifyRuntimeMode]>; def SVTBL : SInst<"svtbl[_{d}]", "ddu", "csilUcUsUiUlhfdb", MergeNone, "aarch64_sve_tbl", [VerifyRuntimeMode]>; @@ -1009,7 +1009,7 @@ def SVUZP2 : SInst<"svuzp2[_{d}]", "ddd", "csilUcUsUiUlhfdb", MergeNon def SVZIP1 : SInst<"svzip1[_{d}]", "ddd", "csilUcUsUiUlhfdb", MergeNone, "aarch64_sve_zip1", [VerifyRuntimeMode]>; def SVZIP2 : SInst<"svzip2[_{d}]", "ddd", "csilUcUsUiUlhfdb", MergeNone, "aarch64_sve_zip2", [VerifyRuntimeMode]>; -def SVREV_B8 : SInst<"svrev_b8", "PP", "Pc", MergeNone, "aarch64_sve_rev", [VerifyRuntimeMode]>; +def SVREV_B8 : SInst<"svrev_b8", "PP", "Pc", MergeNone, "vector_reverse", [VerifyRuntimeMode]>; def SVREV_B16 : SInst<"svrev_b16", "PP", "Pc", MergeNone, "aarch64_sve_rev_b16", [IsOverloadNone, VerifyRuntimeMode]>; def SVREV_B32 : SInst<"svrev_b32", "PP", "Pc", MergeNone, "aarch64_sve_rev_b32", [IsOverloadNone, VerifyRuntimeMode]>; def SVREV_B64 : SInst<"svrev_b64", "PP", "Pc", MergeNone, "aarch64_sve_rev_b64", [IsOverloadNone, VerifyRuntimeMode]>; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td index e91537186df59..34df9af7fc06d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td @@ -44,6 +44,7 @@ def CIR_Dialect : Dialect { static llvm::StringRef getModuleLevelAsmAttrName() { return "cir.module_asm"; } static llvm::StringRef getGlobalCtorsAttrName() { return "cir.global_ctors"; } static llvm::StringRef getGlobalDtorsAttrName() { return "cir.global_dtors"; } + static llvm::StringRef getOperandSegmentSizesAttrName() { return "operandSegmentSizes"; } void registerAttributes(); void registerTypes(); diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index a19c4f951fff9..5f5fab6f12300 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2728,7 +2728,7 @@ def CIR_LLVMIntrinsicCallOp : CIR_Op<"call_llvm_intrinsic"> { } //===----------------------------------------------------------------------===// -// CallOp +// CallOp and TryCallOp //===----------------------------------------------------------------------===// def CIR_SideEffect : CIR_I32EnumAttr< @@ -2855,6 +2855,96 @@ def CIR_CallOp : CIR_CallOpBase<"call", [NoRegionArguments]> { ]; } +def CIR_TryCallOp : CIR_CallOpBase<"try_call",[ + Terminator +]> { + let summary = "try_call operation"; + let description = [{ + Similar to `cir.call` but requires two destination blocks, + one which is used if the call returns without throwing an + exception (the "normal" destination) and another which is used + if an exception is thrown (the "unwind" destination). + + This operation is used only after the CFG flatterning pass. + + Example: + + ```mlir + // Before CFG flattening + cir.try { + %call = cir.call @division(%a, %b) : () -> !s32i + cir.yield + } catch all { + cir.yield + } + + // After CFG flattening + %call = cir.try_call @division(%a, %b) ^normalDest, ^unwindDest + : (f32, f32) -> f32 + ^normalDest: + cir.br ^afterTryBlock + ^unwindDest: + %exception_ptr, %type_id = cir.eh.inflight_exception + cir.br ^catchHandlerBlock(%exception_ptr : !cir.ptr) + ^catchHandlerBlock: + ... + ``` + }]; + + let arguments = commonArgs; + let results = (outs Optional:$result); + let successors = (successor + AnySuccessor:$normalDest, + AnySuccessor:$unwindDest + ); + + let skipDefaultBuilders = 1; + let hasLLVMLowering = false; + + let builders = [ + OpBuilder<(ins "mlir::SymbolRefAttr":$callee, + "mlir::Type":$resType, + "mlir::Block *":$normalDest, + "mlir::Block *":$unwindDest, + CArg<"mlir::ValueRange", "{}">:$callOperands, + CArg<"SideEffect", "SideEffect::All">:$sideEffect), [{ + $_state.addOperands(callOperands); + + if (callee) + $_state.addAttribute("callee", callee); + if (resType && !isa(resType)) + $_state.addTypes(resType); + + $_state.addAttribute("side_effect", + SideEffectAttr::get($_builder.getContext(), sideEffect)); + + // Handle branches + $_state.addSuccessors(normalDest); + $_state.addSuccessors(unwindDest); + }]>, + OpBuilder<(ins "mlir::Value":$ind_target, + "FuncType":$fn_type, + "mlir::Block *":$normalDest, + "mlir::Block *":$unwindDest, + CArg<"mlir::ValueRange", "{}">:$callOperands, + CArg<"SideEffect", "SideEffect::All">:$sideEffect), [{ + ::llvm::SmallVector finalCallOperands({ind_target}); + finalCallOperands.append(callOperands.begin(), callOperands.end()); + $_state.addOperands(finalCallOperands); + + if (!fn_type.hasVoidReturn()) + $_state.addTypes(fn_type.getReturnType()); + + $_state.addAttribute("side_effect", + SideEffectAttr::get($_builder.getContext(), sideEffect)); + + // Handle branches + $_state.addSuccessors(normalDest); + $_state.addSuccessors(unwindDest); + }]> + ]; +} + //===----------------------------------------------------------------------===// // AwaitOp //===----------------------------------------------------------------------===// @@ -4612,6 +4702,16 @@ def CIR_ExpOp : CIR_UnaryFPToFPBuiltinOp<"exp", "ExpOp"> { }]; } +def CIR_Exp2Op : CIR_UnaryFPToFPBuiltinOp<"exp2", "Exp2Op"> { + let summary = "Computes the floating-point base-2 exponential value"; + let description = [{ + `cir.exp2` computes the base-2 exponential of a floating-point operand and + returns a result of the same type. + + Floating-point exceptions are ignored, and it does not set `errno`. + }]; +} + def CIR_FAbsOp : CIR_UnaryFPToFPBuiltinOp<"fabs", "FAbsOp"> { let summary = "Computes the floating-point absolute value"; let description = [{ diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 7321bf4ea8963..1427c677d0f34 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -151,7 +151,6 @@ struct MissingFeatures { // Coroutines static bool coroEndBuiltinCall() { return false; } - static bool coroutineFrame() { return false; } static bool emitBodyAndFallthrough() { return false; } static bool coroOutsideFrameMD() { return false; } diff --git a/clang/include/clang/Driver/CommonArgs.h b/clang/include/clang/Driver/CommonArgs.h index ac17d6211d882..264bd4965f9ad 100644 --- a/clang/include/clang/Driver/CommonArgs.h +++ b/clang/include/clang/Driver/CommonArgs.h @@ -291,16 +291,6 @@ void handleVectorizeLoopsArgs(const llvm::opt::ArgList &Args, void handleVectorizeSLPArgs(const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs); -// Parse -mprefer-vector-width=. Return the Value string if well-formed. -// Otherwise, return an empty string and issue a diagnosic message if needed. -StringRef parseMPreferVectorWidthOption(clang::DiagnosticsEngine &Diags, - const llvm::opt::ArgList &Args); - -// Parse -mrecip. Return the Value string if well-formed. -// Otherwise, return an empty string and issue a diagnosic message if needed. -StringRef parseMRecipOption(clang::DiagnosticsEngine &Diags, - const llvm::opt::ArgList &Args); - // Convert ComplexRangeKind to a string that can be passed as a frontend option. std::string complexRangeKindToStr(LangOptions::ComplexRangeKind Range); diff --git a/clang/include/clang/Driver/CreateASTUnitFromArgs.h b/clang/include/clang/Driver/CreateASTUnitFromArgs.h new file mode 100644 index 0000000000000..30575cc04ca7c --- /dev/null +++ b/clang/include/clang/Driver/CreateASTUnitFromArgs.h @@ -0,0 +1,80 @@ +//===-- CreateInvocationFromArgs.h - Create an ASTUnit from Args-*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Utility for creating an ASTUnit from a vector of command line arguments. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_DRIVER_CREATEASTUNITFROMARGS_H +#define LLVM_CLANG_DRIVER_CREATEASTUNITFROMARGS_H + +#include "clang/Frontend/ASTUnit.h" + +namespace clang { + +/// Create an ASTUnit from a vector of command line arguments, which must +/// specify exactly one source file. +/// +/// \param ArgBegin - The beginning of the argument vector. +/// +/// \param ArgEnd - The end of the argument vector. +/// +/// \param PCHContainerOps - The PCHContainerOperations to use for loading and +/// creating modules. +/// +/// \param Diags - The diagnostics engine to use for reporting errors; its +/// lifetime is expected to extend past that of the returned ASTUnit. +/// +/// \param ResourceFilesPath - The path to the compiler resource files. +/// +/// \param StorePreamblesInMemory - Whether to store PCH in memory. If false, +/// PCH are stored in temporary files. +/// +/// \param PreambleStoragePath - The path to a directory, in which to create +/// temporary PCH files. If empty, the default system temporary directory is +/// used. This parameter is ignored if \p StorePreamblesInMemory is true. +/// +/// \param ModuleFormat - If provided, uses the specific module format. +/// +/// \param ErrAST - If non-null and parsing failed without any AST to return +/// (e.g. because the PCH could not be loaded), this accepts the ASTUnit +/// mainly to allow the caller to see the diagnostics. +/// +/// \param VFS - A llvm::vfs::FileSystem to be used for all file accesses. +/// Note that preamble is saved to a temporary directory on a RealFileSystem, +/// so in order for it to be loaded correctly, VFS should have access to +/// it(i.e., be an overlay over RealFileSystem). RealFileSystem will be used +/// if \p VFS is nullptr. +/// +// FIXME: Move OnlyLocalDecls, UseBumpAllocator to setters on the ASTUnit, we +// shouldn't need to specify them at construction time. +std::unique_ptr CreateASTUnitFromCommandLine( + const char **ArgBegin, const char **ArgEnd, + std::shared_ptr PCHContainerOps, + std::shared_ptr DiagOpts, + IntrusiveRefCntPtr Diags, StringRef ResourceFilesPath, + bool StorePreamblesInMemory = false, + StringRef PreambleStoragePath = StringRef(), bool OnlyLocalDecls = false, + CaptureDiagsKind CaptureDiagnostics = CaptureDiagsKind::None, + ArrayRef RemappedFiles = {}, + bool RemappedFilesKeepOriginalName = true, + unsigned PrecompilePreambleAfterNParses = 0, + TranslationUnitKind TUKind = TU_Complete, + bool CacheCodeCompletionResults = false, + bool IncludeBriefCommentsInCodeCompletion = false, + bool AllowPCHWithCompilerErrors = false, + SkipFunctionBodiesScope SkipFunctionBodies = SkipFunctionBodiesScope::None, + bool SingleFileParse = false, bool UserFilesAreVolatile = false, + bool ForSerialization = false, bool RetainExcludedConditionalBlocks = false, + std::optional ModuleFormat = std::nullopt, + std::unique_ptr *ErrAST = nullptr, + IntrusiveRefCntPtr VFS = nullptr); + +} // namespace clang + +#endif // LLVM_CLANG_DRIVER_CREATEASTUNITFROMARGS_H diff --git a/clang/include/clang/Driver/CreateInvocationFromArgs.h b/clang/include/clang/Driver/CreateInvocationFromArgs.h new file mode 100644 index 0000000000000..0e0f67373ce87 --- /dev/null +++ b/clang/include/clang/Driver/CreateInvocationFromArgs.h @@ -0,0 +1,76 @@ +//===--- CreateInvocationFromArgs.h - CompilerInvocation from Args --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Utility for creating a CompilerInvocation from command-line arguments, for +// tools to use in preparation to parse a file. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_DRIVER_CREATEINVOCATIONFROMARGS_H +#define LLVM_CLANG_DRIVER_CREATEINVOCATIONFROMARGS_H + +#include "clang/Basic/Diagnostic.h" +#include "clang/Basic/LLVM.h" +#include "llvm/Support/VirtualFileSystem.h" +#include +#include +#include + +namespace clang { + +class CompilerInvocation; +class DiagnosticsEngine; + +/// Optional inputs to createInvocation. +struct CreateInvocationOptions { + /// Receives diagnostics encountered while parsing command-line flags. + /// If not provided, these are printed to stderr. + IntrusiveRefCntPtr Diags = nullptr; + /// Used e.g. to probe for system headers locations. + /// If not provided, the real filesystem is used. + /// FIXME: the driver does perform some non-virtualized IO. + IntrusiveRefCntPtr VFS = nullptr; + /// Whether to attempt to produce a non-null (possibly incorrect) invocation + /// if any errors were encountered. + /// By default, always return null on errors. + bool RecoverOnError = false; + /// Allow the driver to probe the filesystem for PCH files. + /// This is used to replace -include with -include-pch in the cc1 args. + /// FIXME: ProbePrecompiled=true is a poor, historical default. + /// It misbehaves if the PCH file is from GCC, has the wrong version, etc. + bool ProbePrecompiled = false; + /// If set, the target is populated with the cc1 args produced by the driver. + /// This may be populated even if createInvocation returns nullptr. + std::vector *CC1Args = nullptr; +}; + +/// Interpret clang arguments in preparation to parse a file. +/// +/// This simulates a number of steps Clang takes when its driver is invoked: +/// - choosing actions (e.g compile + link) to run +/// - probing the system for settings like standard library locations +/// - spawning a cc1 subprocess to compile code, with more explicit arguments +/// - in the cc1 process, assembling those arguments into a CompilerInvocation +/// which is used to configure the parser +/// +/// This simulation is lossy, e.g. in some situations one driver run would +/// result in multiple parses. (Multi-arch, CUDA, ...). +/// This function tries to select a reasonable invocation that tools should use. +/// +/// Args[0] should be the driver name, such as "clang" or "/usr/bin/g++". +/// Absolute path is preferred - this affects searching for system headers. +/// +/// May return nullptr if an invocation could not be determined. +/// See CreateInvocationOptions::RecoverOnError to try harder! +std::unique_ptr +createInvocation(ArrayRef Args, + CreateInvocationOptions Opts = {}); + +} // namespace clang + +#endif // LLVM_CLANG_DRIVER_CREATEINVOCATIONFROMARGS_H diff --git a/clang/include/clang/Driver/Driver.h b/clang/include/clang/Driver/Driver.h index 83bcb7cab550f..76a6c5a128efb 100644 --- a/clang/include/clang/Driver/Driver.h +++ b/clang/include/clang/Driver/Driver.h @@ -406,10 +406,6 @@ class Driver { SmallString<128> &CrashDiagDir); public: - /// Takes the path to a binary that's either in bin/ or lib/ and returns - /// the path to clang's resource directory. - static std::string GetResourcesPath(StringRef BinaryPath); - Driver(StringRef ClangExecutable, StringRef TargetTriple, DiagnosticsEngine &Diags, std::string Title = "clang LLVM compiler", IntrusiveRefCntPtr VFS = nullptr); diff --git a/clang/include/clang/Format/Format.h b/clang/include/clang/Format/Format.h index b6f124f948b59..c7e57d47f9ed1 100644 --- a/clang/include/clang/Format/Format.h +++ b/clang/include/clang/Format/Format.h @@ -3275,9 +3275,20 @@ struct FormatStyle { /// Hex: -1 /// \endcode /// - /// You can also specify a minimum number of digits (``BinaryMinDigits``, - /// ``DecimalMinDigits``, and ``HexMinDigits``) the integer literal must - /// have in order for the separators to be inserted. + /// You can also specify a minimum number of digits + /// (``BinaryMinDigitsInsert``, ``DecimalMinDigitsInsert``, and + /// ``HexMinDigitsInsert``) the integer literal must have in order for the + /// separators to be inserted, and a maximum number of digits + /// (``BinaryMaxDigitsRemove``, ``DecimalMaxDigitsRemove``, and + /// ``HexMaxDigitsRemove``) until the separators are removed. This divides the + /// literals in 3 regions, always without separator (up until including + /// ``xxxMaxDigitsRemove``), maybe with, or without separators (up until + /// excluding ``xxxMinDigitsInsert``), and finally always with separators. + /// \note + /// ``BinaryMinDigits``, ``DecimalMinDigits``, and ``HexMinDigits`` are + /// deprecated and renamed to ``BinaryMinDigitsInsert``, + /// ``DecimalMinDigitsInsert``, and ``HexMinDigitsInsert``, respectively. + /// \endnote struct IntegerLiteralSeparatorStyle { /// Format separators in binary literals. /// \code{.text} @@ -3290,11 +3301,23 @@ struct FormatStyle { /// Format separators in binary literals with a minimum number of digits. /// \code{.text} /// // Binary: 3 - /// // BinaryMinDigits: 7 + /// // BinaryMinDigitsInsert: 7 /// b1 = 0b101101; /// b2 = 0b1'101'101; /// \endcode - int8_t BinaryMinDigits; + int8_t BinaryMinDigitsInsert; + /// Remove separators in binary literals with a maximum number of digits. + /// \code{.text} + /// // Binary: 3 + /// // BinaryMinDigitsInsert: 7 + /// // BinaryMaxDigitsRemove: 4 + /// b0 = 0b1011; // Always removed. + /// b1 = 0b101101; // Not added. + /// b2 = 0b1'01'101; // Not removed, not corrected. + /// b3 = 0b1'101'101; // Always added. + /// b4 = 0b10'1101; // Corrected to 0b101'101. + /// \endcode + int8_t BinaryMaxDigitsRemove; /// Format separators in decimal literals. /// \code{.text} /// /* -1: */ d = 18446744073709550592ull; @@ -3305,11 +3328,23 @@ struct FormatStyle { /// Format separators in decimal literals with a minimum number of digits. /// \code{.text} /// // Decimal: 3 - /// // DecimalMinDigits: 5 + /// // DecimalMinDigitsInsert: 5 /// d1 = 2023; /// d2 = 10'000; /// \endcode - int8_t DecimalMinDigits; + int8_t DecimalMinDigitsInsert; + /// Remove separators in decimal literals with a maximum number of digits. + /// \code{.text} + /// // Decimal: 3 + /// // DecimalMinDigitsInsert: 7 + /// // DecimalMaxDigitsRemove: 4 + /// d0 = 2023; // Always removed. + /// d1 = 123456; // Not added. + /// d2 = 1'23'456; // Not removed, not corrected. + /// d3 = 5'000'000; // Always added. + /// d4 = 1'23'45; // Corrected to 12'345. + /// \endcode + int8_t DecimalMaxDigitsRemove; /// Format separators in hexadecimal literals. /// \code{.text} /// /* -1: */ h = 0xDEADBEEFDEADBEEFuz; @@ -3321,15 +3356,36 @@ struct FormatStyle { /// digits. /// \code{.text} /// // Hex: 2 - /// // HexMinDigits: 6 + /// // HexMinDigitsInsert: 6 /// h1 = 0xABCDE; /// h2 = 0xAB'CD'EF; /// \endcode - int8_t HexMinDigits; + int8_t HexMinDigitsInsert; + /// Remove separators in hexadecimal literals with a maximum number of + /// digits. + /// \code{.text} + /// // Hex: 2 + /// // HexMinDigitsInsert: 6 + /// // HexMaxDigitsRemove: 4 + /// h0 = 0xAFFE; // Always removed. + /// h1 = 0xABCDE; // Not added. + /// h2 = 0xABC'DE; // Not removed, not corrected. + /// h3 = 0xAB'CD'EF; // Always added. + /// h4 = 0xABCD'E; // Corrected to 0xA'BC'DE. + /// \endcode + int8_t HexMaxDigitsRemove; bool operator==(const IntegerLiteralSeparatorStyle &R) const { - return Binary == R.Binary && BinaryMinDigits == R.BinaryMinDigits && - Decimal == R.Decimal && DecimalMinDigits == R.DecimalMinDigits && - Hex == R.Hex && HexMinDigits == R.HexMinDigits; + return Binary == R.Binary && + BinaryMinDigitsInsert == R.BinaryMinDigitsInsert && + BinaryMaxDigitsRemove == R.BinaryMaxDigitsRemove && + Decimal == R.Decimal && + DecimalMinDigitsInsert == R.DecimalMinDigitsInsert && + DecimalMaxDigitsRemove == R.DecimalMaxDigitsRemove && + Hex == R.Hex && HexMinDigitsInsert == R.HexMinDigitsInsert && + HexMaxDigitsRemove == R.HexMaxDigitsRemove; + } + bool operator!=(const IntegerLiteralSeparatorStyle &R) const { + return !operator==(R); } }; diff --git a/clang/include/clang/Frontend/ASTUnit.h b/clang/include/clang/Frontend/ASTUnit.h index e585933a5c8be..341460e1962cb 100644 --- a/clang/include/clang/Frontend/ASTUnit.h +++ b/clang/include/clang/Frontend/ASTUnit.h @@ -23,11 +23,13 @@ #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetOptions.h" #include "clang/Frontend/PrecompiledPreamble.h" +#include "clang/Frontend/StandaloneDiagnostic.h" #include "clang/Lex/HeaderSearchOptions.h" #include "clang/Lex/ModuleLoader.h" #include "clang/Lex/PreprocessingRecord.h" #include "clang/Sema/CodeCompleteConsumer.h" #include "clang/Serialization/ASTBitCodes.h" +#include "clang/Serialization/ASTWriter.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/IntrusiveRefCntPtr.h" @@ -36,6 +38,7 @@ #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator_range.h" +#include "llvm/Bitstream/BitstreamWriter.h" #include #include #include @@ -88,25 +91,6 @@ enum class CaptureDiagsKind { None, All, AllWithoutNonErrorsFromIncludes }; /// Utility class for loading a ASTContext from an AST file. class ASTUnit { -public: - struct StandaloneFixIt { - std::pair RemoveRange; - std::pair InsertFromRange; - std::string CodeToInsert; - bool BeforePreviousInsertions; - }; - - struct StandaloneDiagnostic { - unsigned ID; - DiagnosticsEngine::Level Level; - std::string Message; - std::string Filename; - unsigned LocOffset; - std::vector> Ranges; - std::vector FixIts; - }; - -private: std::unique_ptr LangOpts; std::unique_ptr CodeGenOpts; // FIXME: The documentation on \c LoadFrom* member functions states that the @@ -129,7 +113,15 @@ class ASTUnit { bool HadModuleLoaderFatalFailure = false; bool StorePreamblesInMemory = false; - struct ASTWriterData; + /// Utility struct for managing ASTWriter and its associated data streams. + struct ASTWriterData { + SmallString<128> Buffer; + llvm::BitstreamWriter Stream; + ASTWriter Writer; + + ASTWriterData(ModuleCache &ModCache, const CodeGenOptions &CGOpts) + : Stream(Buffer), Writer(Stream, Buffer, ModCache, CGOpts, {}) {} + }; std::unique_ptr WriterData; FileSystemOptions FileSystemOpts; @@ -271,11 +263,6 @@ class ASTUnit { static void ConfigureDiags(IntrusiveRefCntPtr Diags, ASTUnit &AST, CaptureDiagsKind CaptureDiagnostics); - void - TranslateStoredDiagnostics(FileManager &FileMgr, SourceManager &SrcMan, - const SmallVectorImpl &Diags, - SmallVectorImpl &Out); - void clearFileLevelDecls(); public: @@ -834,65 +821,24 @@ class ASTUnit { bool IncludeBriefCommentsInCodeCompletion = false, bool UserFilesAreVolatile = false); - /// LoadFromCommandLine - Create an ASTUnit from a vector of command line - /// arguments, which must specify exactly one source file. - /// - /// \param ArgBegin - The beginning of the argument vector. - /// - /// \param ArgEnd - The end of the argument vector. - /// - /// \param PCHContainerOps - The PCHContainerOperations to use for loading and - /// creating modules. - /// - /// \param Diags - The diagnostics engine to use for reporting errors; its - /// lifetime is expected to extend past that of the returned ASTUnit. - /// - /// \param ResourceFilesPath - The path to the compiler resource files. - /// - /// \param StorePreamblesInMemory - Whether to store PCH in memory. If false, - /// PCH are stored in temporary files. - /// - /// \param PreambleStoragePath - The path to a directory, in which to create - /// temporary PCH files. If empty, the default system temporary directory is - /// used. This parameter is ignored if \p StorePreamblesInMemory is true. - /// - /// \param ModuleFormat - If provided, uses the specific module format. - /// - /// \param ErrAST - If non-null and parsing failed without any AST to return - /// (e.g. because the PCH could not be loaded), this accepts the ASTUnit - /// mainly to allow the caller to see the diagnostics. - /// - /// \param VFS - A llvm::vfs::FileSystem to be used for all file accesses. - /// Note that preamble is saved to a temporary directory on a RealFileSystem, - /// so in order for it to be loaded correctly, VFS should have access to - /// it(i.e., be an overlay over RealFileSystem). RealFileSystem will be used - /// if \p VFS is nullptr. - /// - // FIXME: Move OnlyLocalDecls, UseBumpAllocator to setters on the ASTUnit, we - // shouldn't need to specify them at construction time. - static std::unique_ptr LoadFromCommandLine( + friend std::unique_ptr CreateASTUnitFromCommandLine( const char **ArgBegin, const char **ArgEnd, std::shared_ptr PCHContainerOps, std::shared_ptr DiagOpts, IntrusiveRefCntPtr Diags, StringRef ResourceFilesPath, - bool StorePreamblesInMemory = false, - StringRef PreambleStoragePath = StringRef(), bool OnlyLocalDecls = false, - CaptureDiagsKind CaptureDiagnostics = CaptureDiagsKind::None, - ArrayRef RemappedFiles = {}, - bool RemappedFilesKeepOriginalName = true, - unsigned PrecompilePreambleAfterNParses = 0, - TranslationUnitKind TUKind = TU_Complete, - bool CacheCodeCompletionResults = false, - bool IncludeBriefCommentsInCodeCompletion = false, - bool AllowPCHWithCompilerErrors = false, - SkipFunctionBodiesScope SkipFunctionBodies = - SkipFunctionBodiesScope::None, - bool SingleFileParse = false, bool UserFilesAreVolatile = false, - bool ForSerialization = false, - bool RetainExcludedConditionalBlocks = false, - std::optional ModuleFormat = std::nullopt, - std::unique_ptr *ErrAST = nullptr, - IntrusiveRefCntPtr VFS = nullptr); + bool StorePreamblesInMemory, StringRef PreambleStoragePath, + bool OnlyLocalDecls, CaptureDiagsKind CaptureDiagnostics, + ArrayRef RemappedFiles, + bool RemappedFilesKeepOriginalName, + unsigned PrecompilePreambleAfterNParses, TranslationUnitKind TUKind, + bool CacheCodeCompletionResults, + bool IncludeBriefCommentsInCodeCompletion, + bool AllowPCHWithCompilerErrors, + SkipFunctionBodiesScope SkipFunctionBodies, bool SingleFileParse, + bool UserFilesAreVolatile, bool ForSerialization, + bool RetainExcludedConditionalBlocks, + std::optional ModuleFormat, std::unique_ptr *ErrAST, + IntrusiveRefCntPtr VFS); /// Reparse the source files using the same command-line options that /// were originally used to produce this translation unit. @@ -963,6 +909,44 @@ class ASTUnit { bool serialize(raw_ostream &OS); }; +/// Diagnostic consumer that saves each diagnostic it is given. +class FilterAndStoreDiagnosticConsumer : public DiagnosticConsumer { + SmallVectorImpl *StoredDiags; + SmallVectorImpl *StandaloneDiags; + bool CaptureNonErrorsFromIncludes = true; + const LangOptions *LangOpts = nullptr; + SourceManager *SourceMgr = nullptr; + +public: + FilterAndStoreDiagnosticConsumer( + SmallVectorImpl *StoredDiags, + SmallVectorImpl *StandaloneDiags, + bool CaptureNonErrorsFromIncludes); + + void BeginSourceFile(const LangOptions &LangOpts, + const Preprocessor *PP = nullptr) override; + + void HandleDiagnostic(DiagnosticsEngine::Level Level, + const Diagnostic &Info) override; +}; + +/// RAII object that optionally captures and filters diagnostics, if +/// there is no diagnostic client to capture them already. +class CaptureDroppedDiagnostics { + DiagnosticsEngine &Diags; + FilterAndStoreDiagnosticConsumer Client; + DiagnosticConsumer *PreviousClient = nullptr; + std::unique_ptr OwningPreviousClient; + +public: + CaptureDroppedDiagnostics( + CaptureDiagsKind CaptureDiagnostics, DiagnosticsEngine &Diags, + SmallVectorImpl *StoredDiags, + SmallVectorImpl *StandaloneDiags); + + ~CaptureDroppedDiagnostics(); +}; + } // namespace clang #endif // LLVM_CLANG_FRONTEND_ASTUNIT_H diff --git a/clang/include/clang/Frontend/CompilerInvocation.h b/clang/include/clang/Frontend/CompilerInvocation.h index b19a6e1a8acc3..4977ddb307d21 100644 --- a/clang/include/clang/Frontend/CompilerInvocation.h +++ b/clang/include/clang/Frontend/CompilerInvocation.h @@ -299,16 +299,6 @@ class CompilerInvocation : public CompilerInvocationBase { DiagnosticsEngine &Diags, const char *Argv0 = nullptr); - /// Get the directory where the compiler headers - /// reside, relative to the compiler binary (found by the passed in - /// arguments). - /// - /// \param Argv0 - The program path (from argv[0]), for finding the builtin - /// compiler path. - /// \param MainAddr - The address of main (or some other function in the main - /// executable), for finding the builtin compiler path. - static std::string GetResourcesPath(const char *Argv0, void *MainAddr); - /// Populate \p Opts with the default set of pointer authentication-related /// options given \p LangOpts and \p Triple. /// diff --git a/clang/include/clang/Frontend/StandaloneDiagnostic.h b/clang/include/clang/Frontend/StandaloneDiagnostic.h new file mode 100644 index 0000000000000..c23d5f95e0c2f --- /dev/null +++ b/clang/include/clang/Frontend/StandaloneDiagnostic.h @@ -0,0 +1,82 @@ +//===--- StandaloneDiagnostic.h - Serializable Diagnostic -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// A serializable diagnostic representation to retain diagnostics after their +// SourceManager has been destroyed. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_FRONTEND_STANDALONEDIAGNOSTICS_H +#define LLVM_CLANG_FRONTEND_STANDALONEDIAGNOSTICS_H + +#include "clang/Basic/DiagnosticIDs.h" +#include "clang/Basic/DiagnosticOptions.h" +#include "clang/Basic/SourceLocation.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/Specifiers.h" +#include "llvm/ADT/StringExtras.h" +#include +#include +#include + +namespace clang { + +/// Represents a StoredDiagnostic in a form that can be retained until after its +/// SourceManager has been destroyed. +/// +/// Source locations are stored as a combination of filename and offsets into +/// that file. +/// To report the diagnostic, it must first be translated back into a +/// StoredDiagnostic with a new associated SourceManager. +struct StandaloneDiagnostic { + /// Represents a CharSourceRange within a StandaloneDiagnostic. + struct SourceOffsetRange { + SourceOffsetRange(CharSourceRange Range, const SourceManager &SrcMgr, + const LangOptions &LangOpts); + + unsigned Begin = 0; + unsigned End = 0; + }; + + /// Represents a FixItHint within a StandaloneDiagnostic. + struct StandaloneFixIt { + StandaloneFixIt(const SourceManager &SrcMgr, const LangOptions &LangOpts, + const FixItHint &FixIt); + + SourceOffsetRange RemoveRange; + SourceOffsetRange InsertFromRange; + std::string CodeToInsert; + bool BeforePreviousInsertions; + }; + + StandaloneDiagnostic(const LangOptions &LangOpts, + const StoredDiagnostic &InDiag); + + DiagnosticsEngine::Level Level; + SrcMgr::CharacteristicKind FileKind; + unsigned ID = 0; + unsigned FileOffset = 0; + std::string Message; + std::string Filename; + std::vector Ranges; + std::vector FixIts; +}; + +/// Translates \c StandaloneDiag into a StoredDiagnostic, associating it with +/// the provided FileManager and SourceManager. +/// +/// This allows the diagnostic to be emitted using the diagnostics engine, since +/// StandaloneDiagnostics themselfs cannot be emitted directly. +StoredDiagnostic +translateStandaloneDiag(FileManager &FileMgr, SourceManager &SrcMgr, + const StandaloneDiagnostic &StandaloneDiag, + llvm::StringMap &SrcLocCache); + +} // namespace clang + +#endif // STANDALONEDIAGNOSTICS diff --git a/clang/include/clang/Frontend/Utils.h b/clang/include/clang/Frontend/Utils.h index ed2703c76f18d..1c561b47b5c47 100644 --- a/clang/include/clang/Frontend/Utils.h +++ b/clang/include/clang/Frontend/Utils.h @@ -192,51 +192,6 @@ IntrusiveRefCntPtr createChainedIncludesSource(CompilerInstance &CI, IntrusiveRefCntPtr &OutReader); -/// Optional inputs to createInvocation. -struct CreateInvocationOptions { - /// Receives diagnostics encountered while parsing command-line flags. - /// If not provided, these are printed to stderr. - IntrusiveRefCntPtr Diags = nullptr; - /// Used e.g. to probe for system headers locations. - /// If not provided, the real filesystem is used. - /// FIXME: the driver does perform some non-virtualized IO. - IntrusiveRefCntPtr VFS = nullptr; - /// Whether to attempt to produce a non-null (possibly incorrect) invocation - /// if any errors were encountered. - /// By default, always return null on errors. - bool RecoverOnError = false; - /// Allow the driver to probe the filesystem for PCH files. - /// This is used to replace -include with -include-pch in the cc1 args. - /// FIXME: ProbePrecompiled=true is a poor, historical default. - /// It misbehaves if the PCH file is from GCC, has the wrong version, etc. - bool ProbePrecompiled = false; - /// If set, the target is populated with the cc1 args produced by the driver. - /// This may be populated even if createInvocation returns nullptr. - std::vector *CC1Args = nullptr; -}; - -/// Interpret clang arguments in preparation to parse a file. -/// -/// This simulates a number of steps Clang takes when its driver is invoked: -/// - choosing actions (e.g compile + link) to run -/// - probing the system for settings like standard library locations -/// - spawning a cc1 subprocess to compile code, with more explicit arguments -/// - in the cc1 process, assembling those arguments into a CompilerInvocation -/// which is used to configure the parser -/// -/// This simulation is lossy, e.g. in some situations one driver run would -/// result in multiple parses. (Multi-arch, CUDA, ...). -/// This function tries to select a reasonable invocation that tools should use. -/// -/// Args[0] should be the driver name, such as "clang" or "/usr/bin/g++". -/// Absolute path is preferred - this affects searching for system headers. -/// -/// May return nullptr if an invocation could not be determined. -/// See CreateInvocationOptions::ShouldRecoverOnErrors to try harder! -std::unique_ptr -createInvocation(ArrayRef Args, - CreateInvocationOptions Opts = {}); - } // namespace clang #endif // LLVM_CLANG_FRONTEND_UTILS_H diff --git a/clang/include/clang/Options/OptionUtils.h b/clang/include/clang/Options/OptionUtils.h index 83c48bd7d6843..02c9c27554db1 100644 --- a/clang/include/clang/Options/OptionUtils.h +++ b/clang/include/clang/Options/OptionUtils.h @@ -28,6 +28,7 @@ class ArgList; } // namespace llvm namespace clang { + /// Return the value of the last argument as an integer, or a default. If Diags /// is non-null, emits an error if the argument is given, but non-integral. int getLastArgIntValue(const llvm::opt::ArgList &Args, @@ -53,6 +54,29 @@ inline uint64_t getLastArgUInt64Value(const llvm::opt::ArgList &Args, return getLastArgUInt64Value(Args, Id, Default, &Diags, Base); } +// Parse -mprefer-vector-width=. Return the Value string if well-formed. +// Otherwise, return an empty string and issue a diagnosic message if needed. +StringRef parseMPreferVectorWidthOption(clang::DiagnosticsEngine &Diags, + const llvm::opt::ArgList &Args); + +// Parse -mrecip. Return the Value string if well-formed. +// Otherwise, return an empty string and issue a diagnosic message if needed. +StringRef parseMRecipOption(clang::DiagnosticsEngine &Diags, + const llvm::opt::ArgList &Args); + +/// Get the directory where the compiler headers reside, relative to the +/// compiler binary path \p BinaryPath. +std::string GetResourcesPath(StringRef BinaryPath); + +/// Get the directory where the compiler headers reside, relative to the +/// compiler binary path (found by the passed in arguments). +/// +/// \param Argv0 The program path (from argv[0]), for finding the builtin +/// compiler path. +/// \param MainAddr The address of main (or some other function in the main +/// executable), for finding the builtin compiler path. +std::string GetResourcesPath(const char *Argv0, void *MainAddr); + } // namespace clang #endif // LLVM_CLANG_OPTIONS_OPTIONUTILS_H diff --git a/clang/include/clang/Options/Options.td b/clang/include/clang/Options/Options.td index a8fc1c4326cc5..756d6deed7130 100644 --- a/clang/include/clang/Options/Options.td +++ b/clang/include/clang/Options/Options.td @@ -1427,6 +1427,16 @@ def fhip_emit_relocatable : Flag<["-"], "fhip-emit-relocatable">, HelpText<"Compile HIP source to relocatable">; def fno_hip_emit_relocatable : Flag<["-"], "fno-hip-emit-relocatable">, HelpText<"Do not override toolchain to compile HIP source to relocatable">; +def use_spirv_backend + : Flag<["-"], "use-spirv-backend">, + Group, + Flags<[HelpHidden]>, + HelpText<"Use the SPIRV backend for compilation ">; +def no_use_spirv_backend + : Flag<["-"], "no-use-spirv-backend">, + Group, + Flags<[HelpHidden]>, + HelpText<"Do not use the SPIRV backend for compilation ">; } // Clang specific/exclusive options for OpenACC. @@ -4832,6 +4842,14 @@ defm column_info : BoolOption<"g", "column-info", NegFlag, PosFlag, BothFlags<[], [ClangOption, CLOption, DXCOption]>>, Group; +defm call_site_info : BoolOption<"g", "call-site-info", + CodeGenOpts<"DebugCallSiteInfo">, + DefaultTrue, + PosFlag, + NegFlag, + BothFlags<[], [ClangOption, CC1Option], " call site debug info">>, + Group, + DocBrief<[{Call site debug info enables various debugger features including detecting tail calls for display in backtraces and displaying some source variable values that reference the call entry value.}]>; def gsplit_dwarf : Flag<["-"], "gsplit-dwarf">, Group, Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>; def gsplit_dwarf_EQ : Joined<["-"], "gsplit-dwarf=">, Group, diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h index ae500139ee6f7..78ecbccbe4efc 100644 --- a/clang/include/clang/Sema/Sema.h +++ b/clang/include/clang/Sema/Sema.h @@ -8721,10 +8721,6 @@ class Sema final : public SemaBase { ExprResult &RHS, SourceLocation QuestionLoc); - QualType CheckSizelessVectorConditionalTypes(ExprResult &Cond, - ExprResult &LHS, ExprResult &RHS, - SourceLocation QuestionLoc); - //// Determines if a type is trivially relocatable /// according to the C++26 rules. // FIXME: This is in Sema because it requires diff --git a/clang/lib/AST/ByteCode/BitcastBuffer.h b/clang/lib/AST/ByteCode/BitcastBuffer.h index d1d6ee39ad17b..8d32351883ae9 100644 --- a/clang/lib/AST/ByteCode/BitcastBuffer.h +++ b/clang/lib/AST/ByteCode/BitcastBuffer.h @@ -89,6 +89,12 @@ struct BitcastBuffer { Data = std::make_unique(ByteSize); } + /// Returns the byte at the given offset. + std::byte *atByte(unsigned Offset) { + assert(Offset < FinalBitSize.roundToBytes()); + return Data.get() + Offset; + } + /// Returns the buffer size in bits. Bits size() const { return FinalBitSize; } Bytes byteSize() const { return FinalBitSize.toBytes(); } @@ -113,6 +119,13 @@ struct BitcastBuffer { std::unique_ptr copyBits(Bits BitOffset, Bits BitWidth, Bits FullBitWidth, Endian TargetEndianness) const; + + /// Dereferences the value at the given offset. + template T deref(Bytes Offset) const { + assert(Offset.getQuantity() < FinalBitSize.roundToBytes()); + assert((Offset.getQuantity() + sizeof(T)) <= FinalBitSize.roundToBytes()); + return *reinterpret_cast(Data.get() + Offset.getQuantity()); + } }; } // namespace interp diff --git a/clang/lib/AST/ByteCode/Integral.h b/clang/lib/AST/ByteCode/Integral.h index 131802439f0c5..e90f1a9a74e1c 100644 --- a/clang/lib/AST/ByteCode/Integral.h +++ b/clang/lib/AST/ByteCode/Integral.h @@ -202,30 +202,21 @@ template class Integral final { static Integral min(unsigned NumBits) { return Integral(Min); } static Integral max(unsigned NumBits) { return Integral(Max); } + static Integral zero(unsigned BitWidth = 0) { return from(0); } - template static Integral from(ValT Value) { - if constexpr (std::is_integral::value) + template + static Integral from(ValT Value, unsigned NumBits = 0) { + if constexpr (std::is_integral_v) return Integral(Value); else - return Integral::from(static_cast(Value)); + return Integral(static_cast(Value)); } template - static std::enable_if_t - from(Integral Value) { + static Integral from(Integral Value) { return Integral(Value.V); } - static Integral zero(unsigned BitWidth = 0) { return from(0); } - - template static Integral from(T Value, unsigned NumBits) { - return Integral(Value); - } - - static bool inRange(int64_t Value, unsigned NumBits) { - return CheckRange(Value); - } - static bool increment(Integral A, Integral *R) { return add(A, Integral(ReprT(1)), A.bitWidth(), R); } @@ -328,13 +319,6 @@ template class Integral final { return false; } } - template static bool CheckRange(int64_t V) { - if constexpr (std::is_signed_v) { - return Min <= V && V <= Max; - } else { - return V >= 0 && static_cast(V) <= Max; - } - } }; template diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp index 4222fd97a84fa..80ef656dc6285 100644 --- a/clang/lib/AST/ByteCode/Interp.cpp +++ b/clang/lib/AST/ByteCode/Interp.cpp @@ -1435,8 +1435,12 @@ static bool getField(InterpState &S, CodePtr OpPC, const Pointer &Ptr, return false; if (Ptr.isIntegralPointer()) { - S.Stk.push(Ptr.asIntPointer().atOffset(S.getASTContext(), Off)); - return true; + if (std::optional IntPtr = + Ptr.asIntPointer().atOffset(S.getASTContext(), Off)) { + S.Stk.push(std::move(*IntPtr)); + return true; + } + return false; } if (!Ptr.isBlockPointer()) { @@ -2081,15 +2085,15 @@ bool InvalidShuffleVectorIndex(InterpState &S, CodePtr OpPC, uint32_t Index) { bool CheckPointerToIntegralCast(InterpState &S, CodePtr OpPC, const Pointer &Ptr, unsigned BitWidth) { + const SourceInfo &E = S.Current->getSource(OpPC); + S.CCEDiag(E, diag::note_constexpr_invalid_cast) + << 2 << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC); + if (Ptr.isDummy()) return false; if (Ptr.isFunctionPointer()) return true; - const SourceInfo &E = S.Current->getSource(OpPC); - S.CCEDiag(E, diag::note_constexpr_invalid_cast) - << 2 << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC); - if (Ptr.isBlockPointer() && !Ptr.isZero()) { // Only allow based lvalue casts if they are lossless. if (S.getASTContext().getTargetInfo().getPointerWidth(LangAS::Default) != diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h index 86b1ba88ca9d4..d8b8b209fa927 100644 --- a/clang/lib/AST/ByteCode/Interp.h +++ b/clang/lib/AST/ByteCode/Interp.h @@ -2646,10 +2646,6 @@ template ::T> bool CastPointerIntegral(InterpState &S, CodePtr OpPC) { const Pointer &Ptr = S.Stk.pop(); - S.CCEDiag(S.Current->getSource(OpPC), diag::note_constexpr_invalid_cast) - << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret - << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC); - if (!CheckPointerToIntegralCast(S, OpPC, Ptr, T::bitWidth())) return Invalid(S, OpPC); diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp index 83e40f64fd979..8496b58105c7a 100644 --- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp +++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp @@ -1626,51 +1626,6 @@ static bool interp__builtin_elementwise_abs(InterpState &S, CodePtr OpPC, return true; } -/// Can be called with an integer or vector as the first and only parameter. -static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC, - const InterpFrame *Frame, - const CallExpr *Call, - unsigned BuiltinID) { - assert(Call->getNumArgs() == 1); - if (Call->getArg(0)->getType()->isIntegerType()) { - APSInt Val = popToAPSInt(S, Call->getArg(0)); - - if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) { - pushInteger(S, Val.popcount(), Call->getType()); - } else { - pushInteger(S, Val.reverseBits(), Call->getType()); - } - return true; - } - // Otherwise, the argument must be a vector. - assert(Call->getArg(0)->getType()->isVectorType()); - const Pointer &Arg = S.Stk.pop(); - assert(Arg.getFieldDesc()->isPrimitiveArray()); - const Pointer &Dst = S.Stk.peek(); - assert(Dst.getFieldDesc()->isPrimitiveArray()); - assert(Arg.getFieldDesc()->getNumElems() == - Dst.getFieldDesc()->getNumElems()); - - QualType ElemType = Arg.getFieldDesc()->getElemQualType(); - PrimType ElemT = *S.getContext().classify(ElemType); - unsigned NumElems = Arg.getNumElems(); - - // FIXME: Reading from uninitialized vector elements? - for (unsigned I = 0; I != NumElems; ++I) { - INT_TYPE_SWITCH_NO_BOOL(ElemT, { - if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) { - Dst.elem(I) = T::from(Arg.elem(I).toAPSInt().popcount()); - } else { - Dst.elem(I) = - T::from(Arg.elem(I).toAPSInt().reverseBits().getZExtValue()); - } - }); - } - Dst.initializeAllElements(); - - return true; -} - /// Can be called with an integer or vector as the first and only parameter. static bool interp__builtin_elementwise_countzeroes(InterpState &S, CodePtr OpPC, @@ -1997,8 +1952,8 @@ static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC, for (size_t I = 0; I != CmpSize; I += ElemSize) { if (IsWide) { INT_TYPE_SWITCH(*S.getContext().classify(ASTCtx.getWCharType()), { - T A = *reinterpret_cast(BufferA.Data.get() + I); - T B = *reinterpret_cast(BufferB.Data.get() + I); + T A = *reinterpret_cast(BufferA.atByte(I)); + T B = *reinterpret_cast(BufferB.atByte(I)); if (A < B) { pushInteger(S, -1, Call->getType()); return true; @@ -2009,8 +1964,8 @@ static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC, } }); } else { - std::byte A = BufferA.Data[I]; - std::byte B = BufferB.Data[I]; + std::byte A = BufferA.deref(Bytes(I)); + std::byte B = BufferB.deref(Bytes(I)); if (A < B) { pushInteger(S, -1, Call->getType()); @@ -2407,18 +2362,39 @@ static bool interp__builtin_elementwise_int_unaryop( InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref Fn) { assert(Call->getNumArgs() == 1); - assert(Call->getType()->isIntegerType()); // Single integer case. if (!Call->getArg(0)->getType()->isVectorType()) { + assert(Call->getType()->isIntegerType()); APSInt Src = popToAPSInt(S, Call->getArg(0)); APInt Result = Fn(Src); pushInteger(S, APSInt(std::move(Result), !Src.isSigned()), Call->getType()); return true; } - // TODO: Add vector integer handling. - return false; + // Vector case. + const Pointer &Arg = S.Stk.pop(); + assert(Arg.getFieldDesc()->isPrimitiveArray()); + const Pointer &Dst = S.Stk.peek(); + assert(Dst.getFieldDesc()->isPrimitiveArray()); + assert(Arg.getFieldDesc()->getNumElems() == + Dst.getFieldDesc()->getNumElems()); + + QualType ElemType = Arg.getFieldDesc()->getElemQualType(); + PrimType ElemT = *S.getContext().classify(ElemType); + unsigned NumElems = Arg.getNumElems(); + bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType(); + + for (unsigned I = 0; I != NumElems; ++I) { + INT_TYPE_SWITCH_NO_BOOL(ElemT, { + APSInt Src = Arg.elem(I).toAPSInt(); + APInt Result = Fn(Src); + Dst.elem(I) = static_cast(APSInt(std::move(Result), DestUnsigned)); + }); + } + Dst.initializeAllElements(); + + return true; } static bool interp__builtin_elementwise_int_binop( @@ -3468,6 +3444,69 @@ static bool interp__builtin_ia32_shuffle_generic( return true; } +static bool interp__builtin_ia32_shift_with_count( + InterpState &S, CodePtr OpPC, const CallExpr *Call, + llvm::function_ref ShiftOp, + llvm::function_ref OverflowOp) { + + assert(Call->getNumArgs() == 2); + + const Pointer &Count = S.Stk.pop(); + const Pointer &Source = S.Stk.pop(); + + QualType SourceType = Call->getArg(0)->getType(); + QualType CountType = Call->getArg(1)->getType(); + assert(SourceType->isVectorType() && CountType->isVectorType()); + + const auto *SourceVecT = SourceType->castAs(); + const auto *CountVecT = CountType->castAs(); + PrimType SourceElemT = *S.getContext().classify(SourceVecT->getElementType()); + PrimType CountElemT = *S.getContext().classify(CountVecT->getElementType()); + + const Pointer &Dst = S.Stk.peek(); + + unsigned DestEltWidth = + S.getASTContext().getTypeSize(SourceVecT->getElementType()); + bool IsDestUnsigned = SourceVecT->getElementType()->isUnsignedIntegerType(); + unsigned DestLen = SourceVecT->getNumElements(); + unsigned CountEltWidth = + S.getASTContext().getTypeSize(CountVecT->getElementType()); + unsigned NumBitsInQWord = 64; + unsigned NumCountElts = NumBitsInQWord / CountEltWidth; + + uint64_t CountLQWord = 0; + for (unsigned EltIdx = 0; EltIdx != NumCountElts; ++EltIdx) { + uint64_t Elt = 0; + INT_TYPE_SWITCH(CountElemT, + { Elt = static_cast(Count.elem(EltIdx)); }); + CountLQWord |= (Elt << (EltIdx * CountEltWidth)); + } + + for (unsigned EltIdx = 0; EltIdx != DestLen; ++EltIdx) { + APSInt Elt; + INT_TYPE_SWITCH(SourceElemT, { Elt = Source.elem(EltIdx).toAPSInt(); }); + + APInt Result; + if (CountLQWord < DestEltWidth) { + Result = ShiftOp(Elt, CountLQWord); + } else { + Result = OverflowOp(Elt, DestEltWidth); + } + if (IsDestUnsigned) { + INT_TYPE_SWITCH(SourceElemT, { + Dst.elem(EltIdx) = T::from(Result.getZExtValue()); + }); + } else { + INT_TYPE_SWITCH(SourceElemT, { + Dst.elem(EltIdx) = T::from(Result.getSExtValue()); + }); + } + } + + Dst.initializeAllElements(); + return true; +} + static bool interp__builtin_ia32_shufbitqmb_mask(InterpState &S, CodePtr OpPC, const CallExpr *Call) { @@ -3527,6 +3566,147 @@ static bool interp__builtin_ia32_shufbitqmb_mask(InterpState &S, CodePtr OpPC, } pushInteger(S, RetMask, Call->getType()); + return true; +} + +static bool interp__builtin_ia32_vcvtps2ph(InterpState &S, CodePtr OpPC, + const CallExpr *Call) { + // Arguments are: vector of floats, rounding immediate + assert(Call->getNumArgs() == 2); + + APSInt Imm = popToAPSInt(S, Call->getArg(1)); + const Pointer &Src = S.Stk.pop(); + const Pointer &Dst = S.Stk.peek(); + + assert(Src.getFieldDesc()->isPrimitiveArray()); + assert(Dst.getFieldDesc()->isPrimitiveArray()); + + const auto *SrcVTy = Call->getArg(0)->getType()->castAs(); + unsigned SrcNumElems = SrcVTy->getNumElements(); + const auto *DstVTy = Call->getType()->castAs(); + unsigned DstNumElems = DstVTy->getNumElements(); + + const llvm::fltSemantics &HalfSem = + S.getASTContext().getFloatTypeSemantics(S.getASTContext().HalfTy); + + // imm[2] == 1 means use MXCSR rounding mode. + // In that case, we can only evaluate if the conversion is exact. + int ImmVal = Imm.getZExtValue(); + bool UseMXCSR = (ImmVal & 4) != 0; + bool IsFPConstrained = + Call->getFPFeaturesInEffect(S.getASTContext().getLangOpts()) + .isFPConstrained(); + + llvm::RoundingMode RM; + if (!UseMXCSR) { + switch (ImmVal & 3) { + case 0: + RM = llvm::RoundingMode::NearestTiesToEven; + break; + case 1: + RM = llvm::RoundingMode::TowardNegative; + break; + case 2: + RM = llvm::RoundingMode::TowardPositive; + break; + case 3: + RM = llvm::RoundingMode::TowardZero; + break; + default: + llvm_unreachable("Invalid immediate rounding mode"); + } + } else { + // For MXCSR, we must check for exactness. We can use any rounding mode + // for the trial conversion since the result is the same if it's exact. + RM = llvm::RoundingMode::NearestTiesToEven; + } + + QualType DstElemQT = Dst.getFieldDesc()->getElemQualType(); + PrimType DstElemT = *S.getContext().classify(DstElemQT); + + for (unsigned I = 0; I != SrcNumElems; ++I) { + Floating SrcVal = Src.elem(I); + APFloat DstVal = SrcVal.getAPFloat(); + + bool LostInfo; + APFloat::opStatus St = DstVal.convert(HalfSem, RM, &LostInfo); + + if (UseMXCSR && IsFPConstrained && St != APFloat::opOK) { + S.FFDiag(S.Current->getSource(OpPC), + diag::note_constexpr_dynamic_rounding); + return false; + } + + INT_TYPE_SWITCH_NO_BOOL(DstElemT, { + // Convert the destination value's bit pattern to an unsigned integer, + // then reconstruct the element using the target type's 'from' method. + uint64_t RawBits = DstVal.bitcastToAPInt().getZExtValue(); + Dst.elem(I) = T::from(RawBits); + }); + } + + // Zero out remaining elements if the destination has more elements + // (e.g., vcvtps2ph converting 4 floats to 8 shorts). + if (DstNumElems > SrcNumElems) { + for (unsigned I = SrcNumElems; I != DstNumElems; ++I) { + INT_TYPE_SWITCH_NO_BOOL(DstElemT, { Dst.elem(I) = T::from(0); }); + } + } + + Dst.initializeAllElements(); + return true; +} + +static bool interp__builtin_ia32_multishiftqb(InterpState &S, CodePtr OpPC, + const CallExpr *Call) { + assert(Call->getNumArgs() == 2); + + QualType ATy = Call->getArg(0)->getType(); + QualType BTy = Call->getArg(1)->getType(); + if (!ATy->isVectorType() || !BTy->isVectorType()) { + return false; + } + + const Pointer &BPtr = S.Stk.pop(); + const Pointer &APtr = S.Stk.pop(); + const auto *AVecT = ATy->castAs(); + assert(AVecT->getNumElements() == + BTy->castAs()->getNumElements()); + + PrimType ElemT = *S.getContext().classify(AVecT->getElementType()); + + unsigned NumBytesInQWord = 8; + unsigned NumBitsInByte = 8; + unsigned NumBytes = AVecT->getNumElements(); + unsigned NumQWords = NumBytes / NumBytesInQWord; + const Pointer &Dst = S.Stk.peek(); + + for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) { + APInt BQWord(64, 0); + for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) { + unsigned Idx = QWordId * NumBytesInQWord + ByteIdx; + INT_TYPE_SWITCH(ElemT, { + uint64_t Byte = static_cast(BPtr.elem(Idx)); + BQWord.insertBits(APInt(8, Byte & 0xFF), ByteIdx * NumBitsInByte); + }); + } + + for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) { + unsigned Idx = QWordId * NumBytesInQWord + ByteIdx; + uint64_t Ctrl = 0; + INT_TYPE_SWITCH( + ElemT, { Ctrl = static_cast(APtr.elem(Idx)) & 0x3F; }); + + APInt Byte(8, 0); + for (unsigned BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) { + Byte.setBitVal(BitIdx, BQWord[(Ctrl + BitIdx) & 0x3F]); + } + INT_TYPE_SWITCH(ElemT, + { Dst.elem(Idx) = T::from(Byte.getZExtValue()); }); + } + } + + Dst.initializeAllElements(); return true; } @@ -4008,9 +4188,13 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, return interp__builtin_vector_reduce(S, OpPC, Call, BuiltinID); case Builtin::BI__builtin_elementwise_popcount: + return interp__builtin_elementwise_int_unaryop( + S, OpPC, Call, [](const APSInt &Src) { + return APInt(Src.getBitWidth(), Src.popcount()); + }); case Builtin::BI__builtin_elementwise_bitreverse: - return interp__builtin_elementwise_popcount(S, OpPC, Frame, Call, - BuiltinID); + return interp__builtin_elementwise_int_unaryop( + S, OpPC, Call, [](const APSInt &Src) { return Src.reverseBits(); }); case Builtin::BI__builtin_elementwise_abs: return interp__builtin_elementwise_abs(S, OpPC, Frame, Call, BuiltinID); @@ -4756,6 +4940,10 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, return std::make_pair(0, static_cast(LaneOffset + Index)); }); + case X86::BI__builtin_ia32_vpmultishiftqb128: + case X86::BI__builtin_ia32_vpmultishiftqb256: + case X86::BI__builtin_ia32_vpmultishiftqb512: + return interp__builtin_ia32_multishiftqb(S, OpPC, Call); case X86::BI__builtin_ia32_kandqi: case X86::BI__builtin_ia32_kandhi: case X86::BI__builtin_ia32_kandsi: @@ -4826,6 +5014,48 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, case X86::BI__builtin_ia32_phminposuw128: return interp__builtin_ia32_phminposuw(S, OpPC, Call); + case X86::BI__builtin_ia32_psraq128: + case X86::BI__builtin_ia32_psraq256: + case X86::BI__builtin_ia32_psraq512: + case X86::BI__builtin_ia32_psrad128: + case X86::BI__builtin_ia32_psrad256: + case X86::BI__builtin_ia32_psrad512: + case X86::BI__builtin_ia32_psraw128: + case X86::BI__builtin_ia32_psraw256: + case X86::BI__builtin_ia32_psraw512: + return interp__builtin_ia32_shift_with_count( + S, OpPC, Call, + [](const APInt &Elt, uint64_t Count) { return Elt.ashr(Count); }, + [](const APInt &Elt, unsigned Width) { return Elt.ashr(Width - 1); }); + + case X86::BI__builtin_ia32_psllq128: + case X86::BI__builtin_ia32_psllq256: + case X86::BI__builtin_ia32_psllq512: + case X86::BI__builtin_ia32_pslld128: + case X86::BI__builtin_ia32_pslld256: + case X86::BI__builtin_ia32_pslld512: + case X86::BI__builtin_ia32_psllw128: + case X86::BI__builtin_ia32_psllw256: + case X86::BI__builtin_ia32_psllw512: + return interp__builtin_ia32_shift_with_count( + S, OpPC, Call, + [](const APInt &Elt, uint64_t Count) { return Elt.shl(Count); }, + [](const APInt &Elt, unsigned Width) { return APInt::getZero(Width); }); + + case X86::BI__builtin_ia32_psrlq128: + case X86::BI__builtin_ia32_psrlq256: + case X86::BI__builtin_ia32_psrlq512: + case X86::BI__builtin_ia32_psrld128: + case X86::BI__builtin_ia32_psrld256: + case X86::BI__builtin_ia32_psrld512: + case X86::BI__builtin_ia32_psrlw128: + case X86::BI__builtin_ia32_psrlw256: + case X86::BI__builtin_ia32_psrlw512: + return interp__builtin_ia32_shift_with_count( + S, OpPC, Call, + [](const APInt &Elt, uint64_t Count) { return Elt.lshr(Count); }, + [](const APInt &Elt, unsigned Width) { return APInt::getZero(Width); }); + case X86::BI__builtin_ia32_pternlogd128_mask: case X86::BI__builtin_ia32_pternlogd256_mask: case X86::BI__builtin_ia32_pternlogd512_mask: @@ -4847,6 +5077,39 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, return interp__builtin_elementwise_triop(S, OpPC, Call, llvm::APIntOps::fshr); + case X86::BI__builtin_ia32_shuf_f32x4_256: + case X86::BI__builtin_ia32_shuf_i32x4_256: + case X86::BI__builtin_ia32_shuf_f64x2_256: + case X86::BI__builtin_ia32_shuf_i64x2_256: + case X86::BI__builtin_ia32_shuf_f32x4: + case X86::BI__builtin_ia32_shuf_i32x4: + case X86::BI__builtin_ia32_shuf_f64x2: + case X86::BI__builtin_ia32_shuf_i64x2: { + // Destination and sources A, B all have the same type. + QualType VecQT = Call->getArg(0)->getType(); + const auto *VecT = VecQT->castAs(); + unsigned NumElems = VecT->getNumElements(); + unsigned ElemBits = S.getASTContext().getTypeSize(VecT->getElementType()); + unsigned LaneBits = 128u; + unsigned NumLanes = (NumElems * ElemBits) / LaneBits; + unsigned NumElemsPerLane = LaneBits / ElemBits; + + return interp__builtin_ia32_shuffle_generic( + S, OpPC, Call, + [NumLanes, NumElemsPerLane](unsigned DstIdx, unsigned ShuffleMask) { + // DstIdx determines source. ShuffleMask selects lane in source. + unsigned BitsPerElem = NumLanes / 2; + unsigned IndexMask = (1u << BitsPerElem) - 1; + unsigned Lane = DstIdx / NumElemsPerLane; + unsigned SrcIdx = (Lane < NumLanes / 2) ? 0 : 1; + unsigned BitIdx = BitsPerElem * Lane; + unsigned SrcLaneIdx = (ShuffleMask >> BitIdx) & IndexMask; + unsigned ElemInLane = DstIdx % NumElemsPerLane; + unsigned IdxToPick = SrcLaneIdx * NumElemsPerLane + ElemInLane; + return std::pair{SrcIdx, IdxToPick}; + }); + } + case X86::BI__builtin_ia32_insertf32x4_256: case X86::BI__builtin_ia32_inserti32x4_256: case X86::BI__builtin_ia32_insertf64x2_256: @@ -4865,6 +5128,10 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, case X86::BI__builtin_ia32_insert128i256: return interp__builtin_x86_insert_subvector(S, OpPC, Call, BuiltinID); + case clang::X86::BI__builtin_ia32_vcvtps2ph: + case clang::X86::BI__builtin_ia32_vcvtps2ph256: + return interp__builtin_ia32_vcvtps2ph(S, OpPC, Call); + case X86::BI__builtin_ia32_vec_ext_v4hi: case X86::BI__builtin_ia32_vec_ext_v16qi: case X86::BI__builtin_ia32_vec_ext_v8hi: diff --git a/clang/lib/AST/ByteCode/Pointer.cpp b/clang/lib/AST/ByteCode/Pointer.cpp index 25719bd6f0f91..00e74db5655d6 100644 --- a/clang/lib/AST/ByteCode/Pointer.cpp +++ b/clang/lib/AST/ByteCode/Pointer.cpp @@ -895,8 +895,8 @@ std::optional Pointer::toRValue(const Context &Ctx, return Result; } -IntPointer IntPointer::atOffset(const ASTContext &ASTCtx, - unsigned Offset) const { +std::optional IntPointer::atOffset(const ASTContext &ASTCtx, + unsigned Offset) const { if (!this->Desc) return *this; const Record *R = this->Desc->ElemRecord; @@ -914,6 +914,9 @@ IntPointer IntPointer::atOffset(const ASTContext &ASTCtx, return *this; const FieldDecl *FD = F->Decl; + if (FD->getParent()->isInvalidDecl()) + return std::nullopt; + const ASTRecordLayout &Layout = ASTCtx.getASTRecordLayout(FD->getParent()); unsigned FieldIndex = FD->getFieldIndex(); uint64_t FieldOffset = diff --git a/clang/lib/AST/ByteCode/Pointer.h b/clang/lib/AST/ByteCode/Pointer.h index 57c8e45609027..0978090ba8b19 100644 --- a/clang/lib/AST/ByteCode/Pointer.h +++ b/clang/lib/AST/ByteCode/Pointer.h @@ -47,7 +47,8 @@ struct IntPointer { const Descriptor *Desc; uint64_t Value; - IntPointer atOffset(const ASTContext &ASTCtx, unsigned Offset) const; + std::optional atOffset(const ASTContext &ASTCtx, + unsigned Offset) const; IntPointer baseCast(const ASTContext &ASTCtx, unsigned BaseOffset) const; }; diff --git a/clang/lib/AST/ByteCode/Program.cpp b/clang/lib/AST/ByteCode/Program.cpp index c468303efea7e..d96934071cb60 100644 --- a/clang/lib/AST/ByteCode/Program.cpp +++ b/clang/lib/AST/ByteCode/Program.cpp @@ -27,7 +27,7 @@ unsigned Program::getOrCreateNativePointer(const void *Ptr) { return It->second; } -const void *Program::getNativePointer(unsigned Idx) { +const void *Program::getNativePointer(unsigned Idx) const { return NativePointers[Idx]; } diff --git a/clang/lib/AST/ByteCode/Program.h b/clang/lib/AST/ByteCode/Program.h index cc9127dc77860..c8795504391fa 100644 --- a/clang/lib/AST/ByteCode/Program.h +++ b/clang/lib/AST/ByteCode/Program.h @@ -58,7 +58,7 @@ class Program final { unsigned getOrCreateNativePointer(const void *Ptr); /// Returns the value of a marshalled native pointer. - const void *getNativePointer(unsigned Idx); + const void *getNativePointer(unsigned Idx) const; /// Emits a string literal among global data. unsigned createGlobalString(const StringLiteral *S, diff --git a/clang/lib/AST/CXXInheritance.cpp b/clang/lib/AST/CXXInheritance.cpp index 7a3e7ea4e5b8f..29f5916284ebb 100644 --- a/clang/lib/AST/CXXInheritance.cpp +++ b/clang/lib/AST/CXXInheritance.cpp @@ -34,9 +34,9 @@ using namespace clang; /// ambiguous, i.e., there are two or more paths that refer to /// different base class subobjects of the same type. BaseType must be /// an unqualified, canonical class type. -bool CXXBasePaths::isAmbiguous(CanQualType BaseType) { +bool CXXBasePaths::isAmbiguous(CanQualType BaseType) const { BaseType = BaseType.getUnqualifiedType(); - IsVirtBaseAndNumberNonVirtBases Subobjects = ClassSubobjects[BaseType]; + IsVirtBaseAndNumberNonVirtBases Subobjects = ClassSubobjects.lookup(BaseType); return Subobjects.NumberOfNonVirtBases + (Subobjects.IsVirtBase ? 1 : 0) > 1; } diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp index 340bb4b2ed6a3..1f405920ce6b5 100644 --- a/clang/lib/AST/Expr.cpp +++ b/clang/lib/AST/Expr.cpp @@ -5213,6 +5213,8 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) { case AO__scoped_atomic_fetch_min: case AO__scoped_atomic_fetch_max: case AO__scoped_atomic_exchange_n: + case AO__scoped_atomic_uinc_wrap: + case AO__scoped_atomic_udec_wrap: case AO__hip_atomic_exchange: case AO__hip_atomic_fetch_add: case AO__hip_atomic_fetch_sub: diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index 3b91678f7d400..b986ee6ca4fa3 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -12166,6 +12166,52 @@ static bool evalShuffleGeneric( return true; } +static bool evalShiftWithCount( + EvalInfo &Info, const CallExpr *Call, APValue &Out, + llvm::function_ref ShiftOp, + llvm::function_ref OverflowOp) { + + APValue Source, Count; + if (!EvaluateAsRValue(Info, Call->getArg(0), Source) || + !EvaluateAsRValue(Info, Call->getArg(1), Count)) + return false; + + assert(Call->getNumArgs() == 2); + + QualType SourceTy = Call->getArg(0)->getType(); + assert(SourceTy->isVectorType() && + Call->getArg(1)->getType()->isVectorType()); + + QualType DestEltTy = SourceTy->castAs()->getElementType(); + unsigned DestEltWidth = Source.getVectorElt(0).getInt().getBitWidth(); + unsigned DestLen = Source.getVectorLength(); + bool IsDestUnsigned = DestEltTy->isUnsignedIntegerType(); + unsigned CountEltWidth = Count.getVectorElt(0).getInt().getBitWidth(); + unsigned NumBitsInQWord = 64; + unsigned NumCountElts = NumBitsInQWord / CountEltWidth; + SmallVector Result; + Result.reserve(DestLen); + + uint64_t CountLQWord = 0; + for (unsigned EltIdx = 0; EltIdx != NumCountElts; ++EltIdx) { + uint64_t Elt = Count.getVectorElt(EltIdx).getInt().getZExtValue(); + CountLQWord |= (Elt << (EltIdx * CountEltWidth)); + } + + for (unsigned EltIdx = 0; EltIdx != DestLen; ++EltIdx) { + APInt Elt = Source.getVectorElt(EltIdx).getInt(); + if (CountLQWord < DestEltWidth) { + Result.push_back( + APValue(APSInt(ShiftOp(Elt, CountLQWord), IsDestUnsigned))); + } else { + Result.push_back( + APValue(APSInt(OverflowOp(Elt, DestEltWidth), IsDestUnsigned))); + } + } + Out = APValue(Result.data(), Result.size()); + return true; +} + bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { if (!IsConstantEvaluatedBuiltinCall(E)) return ExprEvaluatorBaseTy::VisitCallExpr(E); @@ -13096,6 +13142,45 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { return Success(R, E); } + case X86::BI__builtin_ia32_vpmultishiftqb128: + case X86::BI__builtin_ia32_vpmultishiftqb256: + case X86::BI__builtin_ia32_vpmultishiftqb512: { + assert(E->getNumArgs() == 2); + + APValue A, B; + if (!Evaluate(A, Info, E->getArg(0)) || !Evaluate(B, Info, E->getArg(1))) + return false; + + assert(A.getVectorLength() == B.getVectorLength()); + unsigned NumBytesInQWord = 8; + unsigned NumBitsInByte = 8; + unsigned NumBytes = A.getVectorLength(); + unsigned NumQWords = NumBytes / NumBytesInQWord; + SmallVector Result; + Result.reserve(NumBytes); + + for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) { + APInt BQWord(64, 0); + for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) { + unsigned Idx = QWordId * NumBytesInQWord + ByteIdx; + uint64_t Byte = B.getVectorElt(Idx).getInt().getZExtValue(); + BQWord.insertBits(APInt(8, Byte & 0xFF), ByteIdx * NumBitsInByte); + } + + for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) { + unsigned Idx = QWordId * NumBytesInQWord + ByteIdx; + uint64_t Ctrl = A.getVectorElt(Idx).getInt().getZExtValue() & 0x3F; + + APInt Byte(8, 0); + for (unsigned BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) { + Byte.setBitVal(BitIdx, BQWord[(Ctrl + BitIdx) & 0x3F]); + } + Result.push_back(APValue(APSInt(Byte, /*isUnsigned*/ true))); + } + } + return Success(APValue(Result.data(), Result.size()), E); + } + case X86::BI__builtin_ia32_phminposuw128: { APValue Source; if (!Evaluate(Source, Info, E->getArg(0))) @@ -13130,6 +13215,66 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { return Success(APValue(Result.data(), Result.size()), E); } + case X86::BI__builtin_ia32_psraq128: + case X86::BI__builtin_ia32_psraq256: + case X86::BI__builtin_ia32_psraq512: + case X86::BI__builtin_ia32_psrad128: + case X86::BI__builtin_ia32_psrad256: + case X86::BI__builtin_ia32_psrad512: + case X86::BI__builtin_ia32_psraw128: + case X86::BI__builtin_ia32_psraw256: + case X86::BI__builtin_ia32_psraw512: { + APValue R; + if (!evalShiftWithCount( + Info, E, R, + [](const APInt &Elt, uint64_t Count) { return Elt.ashr(Count); }, + [](const APInt &Elt, unsigned Width) { + return Elt.ashr(Width - 1); + })) + return false; + return Success(R, E); + } + + case X86::BI__builtin_ia32_psllq128: + case X86::BI__builtin_ia32_psllq256: + case X86::BI__builtin_ia32_psllq512: + case X86::BI__builtin_ia32_pslld128: + case X86::BI__builtin_ia32_pslld256: + case X86::BI__builtin_ia32_pslld512: + case X86::BI__builtin_ia32_psllw128: + case X86::BI__builtin_ia32_psllw256: + case X86::BI__builtin_ia32_psllw512: { + APValue R; + if (!evalShiftWithCount( + Info, E, R, + [](const APInt &Elt, uint64_t Count) { return Elt.shl(Count); }, + [](const APInt &Elt, unsigned Width) { + return APInt::getZero(Width); + })) + return false; + return Success(R, E); + } + + case X86::BI__builtin_ia32_psrlq128: + case X86::BI__builtin_ia32_psrlq256: + case X86::BI__builtin_ia32_psrlq512: + case X86::BI__builtin_ia32_psrld128: + case X86::BI__builtin_ia32_psrld256: + case X86::BI__builtin_ia32_psrld512: + case X86::BI__builtin_ia32_psrlw128: + case X86::BI__builtin_ia32_psrlw256: + case X86::BI__builtin_ia32_psrlw512: { + APValue R; + if (!evalShiftWithCount( + Info, E, R, + [](const APInt &Elt, uint64_t Count) { return Elt.lshr(Count); }, + [](const APInt &Elt, unsigned Width) { + return APInt::getZero(Width); + })) + return false; + return Success(R, E); + } + case X86::BI__builtin_ia32_pternlogd128_mask: case X86::BI__builtin_ia32_pternlogd256_mask: case X86::BI__builtin_ia32_pternlogd512_mask: @@ -13517,6 +13662,56 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { return Success(APValue(ResultElements.data(), ResultElements.size()), E); } + case X86::BI__builtin_ia32_shuf_f32x4_256: + case X86::BI__builtin_ia32_shuf_i32x4_256: + case X86::BI__builtin_ia32_shuf_f64x2_256: + case X86::BI__builtin_ia32_shuf_i64x2_256: + case X86::BI__builtin_ia32_shuf_f32x4: + case X86::BI__builtin_ia32_shuf_i32x4: + case X86::BI__builtin_ia32_shuf_f64x2: + case X86::BI__builtin_ia32_shuf_i64x2: { + APValue SourceA, SourceB; + if (!EvaluateAsRValue(Info, E->getArg(0), SourceA) || + !EvaluateAsRValue(Info, E->getArg(1), SourceB)) + return false; + + APSInt Imm; + if (!EvaluateInteger(E->getArg(2), Imm, Info)) + return false; + + // Destination and sources A, B all have the same type. + unsigned NumElems = SourceA.getVectorLength(); + const VectorType *VT = E->getArg(0)->getType()->castAs(); + QualType ElemQT = VT->getElementType(); + unsigned ElemBits = Info.Ctx.getTypeSize(ElemQT); + unsigned LaneBits = 128u; + unsigned NumLanes = (NumElems * ElemBits) / LaneBits; + unsigned NumElemsPerLane = LaneBits / ElemBits; + + unsigned DstLen = SourceA.getVectorLength(); + SmallVector ResultElements; + ResultElements.reserve(DstLen); + + APValue R; + if (!evalShuffleGeneric( + Info, E, R, + [NumLanes, NumElemsPerLane](unsigned DstIdx, unsigned ShuffleMask) + -> std::pair { + // DstIdx determines source. ShuffleMask selects lane in source. + unsigned BitsPerElem = NumLanes / 2; + unsigned IndexMask = (1u << BitsPerElem) - 1; + unsigned Lane = DstIdx / NumElemsPerLane; + unsigned SrcIdx = (Lane < NumLanes / 2) ? 0 : 1; + unsigned BitIdx = BitsPerElem * Lane; + unsigned SrcLaneIdx = (ShuffleMask >> BitIdx) & IndexMask; + unsigned ElemInLane = DstIdx % NumElemsPerLane; + unsigned IdxToPick = SrcLaneIdx * NumElemsPerLane + ElemInLane; + return {SrcIdx, IdxToPick}; + })) + return false; + return Success(R, E); + } + case X86::BI__builtin_ia32_insertf32x4_256: case X86::BI__builtin_ia32_inserti32x4_256: case X86::BI__builtin_ia32_insertf64x2_256: @@ -13820,6 +14015,81 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { return false; return Success(R, E); } + + case clang::X86::BI__builtin_ia32_vcvtps2ph: + case clang::X86::BI__builtin_ia32_vcvtps2ph256: { + APValue SrcVec; + if (!EvaluateAsRValue(Info, E->getArg(0), SrcVec)) + return false; + + APSInt Imm; + if (!EvaluateInteger(E->getArg(1), Imm, Info)) + return false; + + const auto *SrcVTy = E->getArg(0)->getType()->castAs(); + unsigned SrcNumElems = SrcVTy->getNumElements(); + const auto *DstVTy = E->getType()->castAs(); + unsigned DstNumElems = DstVTy->getNumElements(); + QualType DstElemTy = DstVTy->getElementType(); + + const llvm::fltSemantics &HalfSem = + Info.Ctx.getFloatTypeSemantics(Info.Ctx.HalfTy); + + int ImmVal = Imm.getZExtValue(); + bool UseMXCSR = (ImmVal & 4) != 0; + bool IsFPConstrained = + E->getFPFeaturesInEffect(Info.Ctx.getLangOpts()).isFPConstrained(); + + llvm::RoundingMode RM; + if (!UseMXCSR) { + switch (ImmVal & 3) { + case 0: + RM = llvm::RoundingMode::NearestTiesToEven; + break; + case 1: + RM = llvm::RoundingMode::TowardNegative; + break; + case 2: + RM = llvm::RoundingMode::TowardPositive; + break; + case 3: + RM = llvm::RoundingMode::TowardZero; + break; + default: + llvm_unreachable("Invalid immediate rounding mode"); + } + } else { + RM = llvm::RoundingMode::NearestTiesToEven; + } + + SmallVector ResultElements; + ResultElements.reserve(DstNumElems); + + for (unsigned I = 0; I < SrcNumElems; ++I) { + APFloat SrcVal = SrcVec.getVectorElt(I).getFloat(); + + bool LostInfo; + APFloat::opStatus St = SrcVal.convert(HalfSem, RM, &LostInfo); + + if (UseMXCSR && IsFPConstrained && St != APFloat::opOK) { + Info.FFDiag(E, diag::note_constexpr_dynamic_rounding); + return false; + } + + APSInt DstInt(SrcVal.bitcastToAPInt(), + DstElemTy->isUnsignedIntegerOrEnumerationType()); + ResultElements.push_back(APValue(DstInt)); + } + + if (DstNumElems > SrcNumElems) { + APSInt Zero = Info.Ctx.MakeIntValue(0, DstElemTy); + for (unsigned I = SrcNumElems; I < DstNumElems; ++I) { + ResultElements.push_back(APValue(Zero)); + } + } + + return Success(ResultElements, E); + } } } @@ -16708,7 +16978,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, bool IsUnsigned = (BuiltinOp >= clang::X86::BI__builtin_ia32_ucmpb128_mask && - BuiltinOp <= clang::X86::BI__builtin_ia32_ucmpq512_mask); + BuiltinOp <= clang::X86::BI__builtin_ia32_ucmpw512_mask); APValue LHS, RHS; APSInt Mask, Opcode; diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp index 41aebdb8d2f1b..7bc0404db1bee 100644 --- a/clang/lib/AST/TextNodeDumper.cpp +++ b/clang/lib/AST/TextNodeDumper.cpp @@ -850,7 +850,10 @@ void TextNodeDumper::Visit(const APValue &Value, QualType Ty) { return; } case APValue::AddrLabelDiff: - OS << "AddrLabelDiff "; + OS << "AddrLabelDiff "; + OS << "&&" << Value.getAddrLabelDiffLHS()->getLabel()->getName(); + OS << " - "; + OS << "&&" << Value.getAddrLabelDiffRHS()->getLabel()->getName(); return; } llvm_unreachable("Unknown APValue kind!"); diff --git a/clang/lib/Analysis/FlowSensitive/Transfer.cpp b/clang/lib/Analysis/FlowSensitive/Transfer.cpp index 06f12784aa82d..05748359b7cef 100644 --- a/clang/lib/Analysis/FlowSensitive/Transfer.cpp +++ b/clang/lib/Analysis/FlowSensitive/Transfer.cpp @@ -769,8 +769,29 @@ class TransferVisitor : public ConstStmtVisitor { StorageLocation *TrueLoc = TrueEnv->getStorageLocation(*S->getTrueExpr()); StorageLocation *FalseLoc = FalseEnv->getStorageLocation(*S->getFalseExpr()); - if (TrueLoc == FalseLoc && TrueLoc != nullptr) + if (TrueLoc == FalseLoc && TrueLoc != nullptr) { Env.setStorageLocation(*S, *TrueLoc); + } else if (!S->getType()->isRecordType()) { + // Ideally, we would have something like an "alias set" to say that the + // result StorageLocation can be either of the locations from the + // TrueEnv or FalseEnv. Then, when this ConditionalOperator is + // (a) used in an LValueToRValue cast, the value is the join of all of + // the values in the alias set. + // (b) or, used in an assignment to the resulting LValue, the assignment + // *may* update all of the locations in the alias set. + // For now, we do the simpler thing of creating a new StorageLocation + // and joining the values right away, handling only case (a). + // Otherwise, the dataflow framework needs to be updated be able to + // represent alias sets and weak updates (for the "may"). + if (Value *Val = Environment::joinValues( + S->getType(), TrueEnv->getValue(*S->getTrueExpr()), *TrueEnv, + FalseEnv->getValue(*S->getFalseExpr()), *FalseEnv, Env, + Model)) { + StorageLocation &Loc = Env.createStorageLocation(*S); + Env.setStorageLocation(*S, Loc); + Env.setValue(Loc, *Val); + } + } } else if (!S->getType()->isRecordType()) { // The conditional operator can evaluate to either of the values of the // two branches. To model this, join these two values together to yield diff --git a/clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp b/clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp index f7be472ed15b5..00870c3fd4086 100644 --- a/clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp +++ b/clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp @@ -15,18 +15,6 @@ namespace clang::lifetimes::internal { using llvm::isa_and_present; -static bool isGslPointerType(QualType QT) { - if (const auto *RD = QT->getAsCXXRecordDecl()) { - // We need to check the template definition for specializations. - if (auto *CTSD = dyn_cast(RD)) - return CTSD->getSpecializedTemplate() - ->getTemplatedDecl() - ->hasAttr(); - return RD->hasAttr(); - } - return false; -} - static bool isPointerType(QualType QT) { return QT->isPointerOrReferenceType() || isGslPointerType(QT); } diff --git a/clang/lib/Analysis/LifetimeSafety/LifetimeAnnotations.cpp b/clang/lib/Analysis/LifetimeSafety/LifetimeAnnotations.cpp index ad61a42c0eaeb..54e343fc2ee5e 100644 --- a/clang/lib/Analysis/LifetimeSafety/LifetimeAnnotations.cpp +++ b/clang/lib/Analysis/LifetimeSafety/LifetimeAnnotations.cpp @@ -10,6 +10,7 @@ #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclTemplate.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" @@ -70,4 +71,34 @@ bool implicitObjectParamIsLifetimeBound(const FunctionDecl *FD) { return isNormalAssignmentOperator(FD); } +template static bool isRecordWithAttr(QualType Type) { + auto *RD = Type->getAsCXXRecordDecl(); + if (!RD) + return false; + // Generally, if a primary template class declaration is annotated with an + // attribute, all its specializations generated from template instantiations + // should inherit the attribute. + // + // However, since lifetime analysis occurs during parsing, we may encounter + // cases where a full definition of the specialization is not required. In + // such cases, the specialization declaration remains incomplete and lacks the + // attribute. Therefore, we fall back to checking the primary template class. + // + // Note: it is possible for a specialization declaration to have an attribute + // even if the primary template does not. + // + // FIXME: What if the primary template and explicit specialization + // declarations have conflicting attributes? We should consider diagnosing + // this scenario. + bool Result = RD->hasAttr(); + + if (auto *CTSD = dyn_cast(RD)) + Result |= CTSD->getSpecializedTemplate()->getTemplatedDecl()->hasAttr(); + + return Result; +} + +bool isGslPointerType(QualType QT) { return isRecordWithAttr(QT); } +bool isGslOwnerType(QualType QT) { return isRecordWithAttr(QT); } + } // namespace clang::lifetimes diff --git a/clang/lib/Analysis/ThreadSafety.cpp b/clang/lib/Analysis/ThreadSafety.cpp index 77750cf89d7a7..a25bd6007d5ed 100644 --- a/clang/lib/Analysis/ThreadSafety.cpp +++ b/clang/lib/Analysis/ThreadSafety.cpp @@ -2820,7 +2820,7 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { case CFGElement::AutomaticObjectDtor: { CFGAutomaticObjDtor AD = BI.castAs(); const auto *DD = AD.getDestructorDecl(AC.getASTContext()); - if (!DD->hasAttrs()) + if (!DD || !DD->hasAttrs()) break; LocksetBuilder.handleCall( diff --git a/clang/lib/Basic/Targets/Sparc.cpp b/clang/lib/Basic/Targets/Sparc.cpp index d47eecb3cf058..fe1aad6804aa6 100644 --- a/clang/lib/Basic/Targets/Sparc.cpp +++ b/clang/lib/Basic/Targets/Sparc.cpp @@ -165,6 +165,7 @@ void SparcV8TargetInfo::getTargetDefines(const LangOptions &Opts, Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4"); Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8"); } + Builder.defineMacro("__LONG_DOUBLE_128__"); } void SparcV9TargetInfo::getTargetDefines(const LangOptions &Opts, diff --git a/clang/lib/Basic/Targets/Sparc.h b/clang/lib/Basic/Targets/Sparc.h index 3215e648ba6c3..acc27194c38ea 100644 --- a/clang/lib/Basic/Targets/Sparc.h +++ b/clang/lib/Basic/Targets/Sparc.h @@ -166,6 +166,13 @@ class LLVM_LIBRARY_VISIBILITY SparcV8TargetInfo : public SparcTargetInfo { PtrDiffType = SignedLong; break; } + + // The SPARCv8 System V ABI has long double 128-bits in size, but 64-bit + // aligned. + LongDoubleWidth = 128; + LongDoubleAlign = 64; + LongDoubleFormat = &llvm::APFloat::IEEEquad(); + // Up to 32 bits (V8) or 64 bits (V9) are lock-free atomic, but we're // willing to do atomic ops on up to 64 bits. MaxAtomicPromoteWidth = 64; diff --git a/clang/lib/Basic/Targets/X86.cpp b/clang/lib/Basic/Targets/X86.cpp index 7a90c89dd7dc0..f00d435937b92 100644 --- a/clang/lib/Basic/Targets/X86.cpp +++ b/clang/lib/Basic/Targets/X86.cpp @@ -1302,15 +1302,15 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const { // X86TargetInfo::hasFeature for a somewhat comprehensive list). bool X86TargetInfo::validateCpuSupports(StringRef FeatureStr) const { return llvm::StringSwitch(FeatureStr) -#define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY) .Case(STR, true) -#define X86_MICROARCH_LEVEL(ENUM, STR, PRIORITY) .Case(STR, true) +#define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY, ABI_VALUE) .Case(STR, true) +#define X86_MICROARCH_LEVEL(ENUM, STR, PRIORITY, ABI_VALUE) .Case(STR, true) #include "llvm/TargetParser/X86TargetParser.def" .Default(false); } static llvm::X86::ProcessorFeatures getFeature(StringRef Name) { return llvm::StringSwitch(Name) -#define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY) \ +#define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY, ABI_VALUE) \ .Case(STR, llvm::X86::FEATURE_##ENUM) #include "llvm/TargetParser/X86TargetParser.def" diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 48c082d89de18..4c94db5ddd457 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -644,6 +644,9 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, case AtomicExpr::AO__scoped_atomic_nand_fetch: case AtomicExpr::AO__scoped_atomic_fetch_nand: + + case AtomicExpr::AO__scoped_atomic_uinc_wrap: + case AtomicExpr::AO__scoped_atomic_udec_wrap: cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI"); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index d220fdf4dc8a7..7d4d13121d5e5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -300,6 +300,17 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID, assert(!cir::MissingFeatures::fastMathFlags()); return emitUnaryMaybeConstrainedFPBuiltin(*this, *e); + case Builtin::BIexp2: + case Builtin::BIexp2f: + case Builtin::BIexp2l: + case Builtin::BI__builtin_exp2: + case Builtin::BI__builtin_exp2f: + case Builtin::BI__builtin_exp2f16: + case Builtin::BI__builtin_exp2l: + case Builtin::BI__builtin_exp2f128: + assert(!cir::MissingFeatures::fastMathFlags()); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *e); + case Builtin::BIfabs: case Builtin::BIfabsf: case Builtin::BIfabsl: @@ -502,9 +513,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID, return getUndefRValue(e->getType()); case Builtin::BI__builtin_coro_frame: { - cgm.errorNYI(e->getSourceRange(), "BI__builtin_coro_frame NYI"); - assert(!cir::MissingFeatures::coroutineFrame()); - return getUndefRValue(e->getType()); + return emitCoroutineFrame(); } case Builtin::BI__builtin_coro_free: case Builtin::BI__builtin_coro_size: { diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp index e7aa8a234efd9..b242efc00e491 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp @@ -21,13 +21,11 @@ using namespace clang; using namespace clang::CIRGen; template -static mlir::Value emitIntrinsicCallOp(CIRGenFunction &cgf, const CallExpr *e, - const std::string &str, +static mlir::Value emitIntrinsicCallOp(CIRGenBuilderTy &builder, + mlir::Location loc, const StringRef str, const mlir::Type &resTy, Operands &&...op) { - CIRGenBuilderTy &builder = cgf.getBuilder(); - mlir::Location location = cgf.getLoc(e->getExprLoc()); - return cir::LLVMIntrinsicCallOp::create(builder, location, + return cir::LLVMIntrinsicCallOp::create(builder, loc, builder.getStringAttr(str), resTy, std::forward(op)...) .getResult(); @@ -68,10 +66,8 @@ static mlir::Value emitVectorFCmp(CIRGenBuilderTy &builder, return bitCast; } -static mlir::Value getMaskVecValue(CIRGenFunction &cgf, const CallExpr *expr, +static mlir::Value getMaskVecValue(CIRGenBuilderTy &builder, mlir::Location loc, mlir::Value mask, unsigned numElems) { - - CIRGenBuilderTy &builder = cgf.getBuilder(); auto maskTy = cir::VectorType::get( builder.getUIntNTy(1), cast(mask.getType()).getWidth()); mlir::Value maskVec = builder.createBitcast(mask, maskTy); @@ -84,12 +80,41 @@ static mlir::Value getMaskVecValue(CIRGenFunction &cgf, const CallExpr *expr, for (auto i : llvm::seq(0, numElems)) indices.push_back(cir::IntAttr::get(i32Ty, i)); - maskVec = builder.createVecShuffle(cgf.getLoc(expr->getExprLoc()), maskVec, - maskVec, indices); + maskVec = builder.createVecShuffle(loc, maskVec, maskVec, indices); } return maskVec; } +static mlir::Value emitX86MaskAddLogic(CIRGenBuilderTy &builder, + mlir::Location loc, + const std::string &intrinsicName, + SmallVectorImpl &ops) { + + auto intTy = cast(ops[0].getType()); + unsigned numElts = intTy.getWidth(); + mlir::Value lhsVec = getMaskVecValue(builder, loc, ops[0], numElts); + mlir::Value rhsVec = getMaskVecValue(builder, loc, ops[1], numElts); + mlir::Type vecTy = lhsVec.getType(); + mlir::Value resVec = emitIntrinsicCallOp(builder, loc, intrinsicName, vecTy, + mlir::ValueRange{lhsVec, rhsVec}); + return builder.createBitcast(resVec, ops[0].getType()); +} + +static mlir::Value emitX86MaskLogic(CIRGenBuilderTy &builder, + mlir::Location loc, + cir::BinOpKind binOpKind, + SmallVectorImpl &ops, + bool invertLHS = false) { + unsigned numElts = cast(ops[0].getType()).getWidth(); + mlir::Value lhs = getMaskVecValue(builder, loc, ops[0], numElts); + mlir::Value rhs = getMaskVecValue(builder, loc, ops[1], numElts); + + if (invertLHS) + lhs = builder.createNot(lhs); + return builder.createBitcast(builder.createBinop(loc, lhs, binOpKind, rhs), + ops[0].getType()); +} + mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr) { if (builtinID == Builtin::BI__builtin_cpu_is) { @@ -132,15 +157,20 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID, default: return {}; case X86::BI_mm_clflush: - return emitIntrinsicCallOp(*this, expr, "x86.sse2.clflush", voidTy, ops[0]); + return emitIntrinsicCallOp(builder, getLoc(expr->getExprLoc()), + "x86.sse2.clflush", voidTy, ops[0]); case X86::BI_mm_lfence: - return emitIntrinsicCallOp(*this, expr, "x86.sse2.lfence", voidTy); + return emitIntrinsicCallOp(builder, getLoc(expr->getExprLoc()), + "x86.sse2.lfence", voidTy); case X86::BI_mm_pause: - return emitIntrinsicCallOp(*this, expr, "x86.sse2.pause", voidTy); + return emitIntrinsicCallOp(builder, getLoc(expr->getExprLoc()), + "x86.sse2.pause", voidTy); case X86::BI_mm_mfence: - return emitIntrinsicCallOp(*this, expr, "x86.sse2.mfence", voidTy); + return emitIntrinsicCallOp(builder, getLoc(expr->getExprLoc()), + "x86.sse2.mfence", voidTy); case X86::BI_mm_sfence: - return emitIntrinsicCallOp(*this, expr, "x86.sse.sfence", voidTy); + return emitIntrinsicCallOp(builder, getLoc(expr->getExprLoc()), + "x86.sse.sfence", voidTy); case X86::BI_mm_prefetch: case X86::BI__rdtsc: case X86::BI__builtin_ia32_rdtscp: { @@ -152,15 +182,17 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID, case X86::BI__builtin_ia32_lzcnt_u16: case X86::BI__builtin_ia32_lzcnt_u32: case X86::BI__builtin_ia32_lzcnt_u64: { - mlir::Value isZeroPoison = builder.getFalse(getLoc(expr->getExprLoc())); - return emitIntrinsicCallOp(*this, expr, "ctlz", ops[0].getType(), + mlir::Location loc = getLoc(expr->getExprLoc()); + mlir::Value isZeroPoison = builder.getFalse(loc); + return emitIntrinsicCallOp(builder, loc, "ctlz", ops[0].getType(), mlir::ValueRange{ops[0], isZeroPoison}); } case X86::BI__builtin_ia32_tzcnt_u16: case X86::BI__builtin_ia32_tzcnt_u32: case X86::BI__builtin_ia32_tzcnt_u64: { - mlir::Value isZeroPoison = builder.getFalse(getLoc(expr->getExprLoc())); - return emitIntrinsicCallOp(*this, expr, "cttz", ops[0].getType(), + mlir::Location loc = getLoc(expr->getExprLoc()); + mlir::Value isZeroPoison = builder.getFalse(loc); + return emitIntrinsicCallOp(builder, loc, "cttz", ops[0].getType(), mlir::ValueRange{ops[0], isZeroPoison}); } case X86::BI__builtin_ia32_undef128: @@ -216,14 +248,14 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID, mlir::Location loc = getLoc(expr->getExprLoc()); Address tmp = createMemTemp(expr->getArg(0)->getType(), loc); builder.createStore(loc, ops[0], tmp); - return emitIntrinsicCallOp(*this, expr, "x86.sse.ldmxcsr", + return emitIntrinsicCallOp(builder, loc, "x86.sse.ldmxcsr", builder.getVoidTy(), tmp.getPointer()); } case X86::BI_mm_getcsr: case X86::BI__builtin_ia32_stmxcsr: { mlir::Location loc = getLoc(expr->getExprLoc()); Address tmp = createMemTemp(expr->getType(), loc); - emitIntrinsicCallOp(*this, expr, "x86.sse.stmxcsr", builder.getVoidTy(), + emitIntrinsicCallOp(builder, loc, "x86.sse.stmxcsr", builder.getVoidTy(), tmp.getPointer()); return builder.createLoad(loc, tmp); } @@ -605,50 +637,48 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID, case X86::BI__builtin_ia32_kshiftlihi: case X86::BI__builtin_ia32_kshiftlisi: case X86::BI__builtin_ia32_kshiftlidi: { + mlir::Location loc = getLoc(expr->getExprLoc()); unsigned shiftVal = ops[1].getDefiningOp().getIntValue().getZExtValue() & 0xff; unsigned numElems = cast(ops[0].getType()).getWidth(); if (shiftVal >= numElems) - return builder.getNullValue(ops[0].getType(), getLoc(expr->getExprLoc())); + return builder.getNullValue(ops[0].getType(), loc); - mlir::Value in = getMaskVecValue(*this, expr, ops[0], numElems); + mlir::Value in = getMaskVecValue(builder, loc, ops[0], numElems); SmallVector indices; mlir::Type i32Ty = builder.getSInt32Ty(); for (auto i : llvm::seq(0, numElems)) indices.push_back(cir::IntAttr::get(i32Ty, numElems + i - shiftVal)); - mlir::Value zero = - builder.getNullValue(in.getType(), getLoc(expr->getExprLoc())); - mlir::Value sv = - builder.createVecShuffle(getLoc(expr->getExprLoc()), zero, in, indices); + mlir::Value zero = builder.getNullValue(in.getType(), loc); + mlir::Value sv = builder.createVecShuffle(loc, zero, in, indices); return builder.createBitcast(sv, ops[0].getType()); } case X86::BI__builtin_ia32_kshiftriqi: case X86::BI__builtin_ia32_kshiftrihi: case X86::BI__builtin_ia32_kshiftrisi: case X86::BI__builtin_ia32_kshiftridi: { + mlir::Location loc = getLoc(expr->getExprLoc()); unsigned shiftVal = ops[1].getDefiningOp().getIntValue().getZExtValue() & 0xff; unsigned numElems = cast(ops[0].getType()).getWidth(); if (shiftVal >= numElems) - return builder.getNullValue(ops[0].getType(), getLoc(expr->getExprLoc())); + return builder.getNullValue(ops[0].getType(), loc); - mlir::Value in = getMaskVecValue(*this, expr, ops[0], numElems); + mlir::Value in = getMaskVecValue(builder, loc, ops[0], numElems); SmallVector indices; mlir::Type i32Ty = builder.getSInt32Ty(); for (auto i : llvm::seq(0, numElems)) indices.push_back(cir::IntAttr::get(i32Ty, i + shiftVal)); - mlir::Value zero = - builder.getNullValue(in.getType(), getLoc(expr->getExprLoc())); - mlir::Value sv = - builder.createVecShuffle(getLoc(expr->getExprLoc()), in, zero, indices); + mlir::Value zero = builder.getNullValue(in.getType(), loc); + mlir::Value sv = builder.createVecShuffle(loc, in, zero, indices); return builder.createBitcast(sv, ops[0].getType()); } case X86::BI__builtin_ia32_vprotbi: @@ -743,54 +773,82 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID, case X86::BI__builtin_ia32_ktestzsi: case X86::BI__builtin_ia32_ktestcdi: case X86::BI__builtin_ia32_ktestzdi: + cgm.errorNYI(expr->getSourceRange(), + std::string("unimplemented X86 builtin call: ") + + getContext().BuiltinInfo.getName(builtinID)); + return {}; case X86::BI__builtin_ia32_kaddqi: + return emitX86MaskAddLogic(builder, getLoc(expr->getExprLoc()), + "x86.avx512.kadd.b", ops); case X86::BI__builtin_ia32_kaddhi: + return emitX86MaskAddLogic(builder, getLoc(expr->getExprLoc()), + "x86.avx512.kadd.w", ops); case X86::BI__builtin_ia32_kaddsi: + return emitX86MaskAddLogic(builder, getLoc(expr->getExprLoc()), + "x86.avx512.kadd.d", ops); case X86::BI__builtin_ia32_kadddi: + return emitX86MaskAddLogic(builder, getLoc(expr->getExprLoc()), + "x86.avx512.kadd.q", ops); case X86::BI__builtin_ia32_kandqi: case X86::BI__builtin_ia32_kandhi: case X86::BI__builtin_ia32_kandsi: case X86::BI__builtin_ia32_kanddi: + return emitX86MaskLogic(builder, getLoc(expr->getExprLoc()), + cir::BinOpKind::And, ops); case X86::BI__builtin_ia32_kandnqi: case X86::BI__builtin_ia32_kandnhi: case X86::BI__builtin_ia32_kandnsi: case X86::BI__builtin_ia32_kandndi: + return emitX86MaskLogic(builder, getLoc(expr->getExprLoc()), + cir::BinOpKind::And, ops, true); case X86::BI__builtin_ia32_korqi: case X86::BI__builtin_ia32_korhi: case X86::BI__builtin_ia32_korsi: case X86::BI__builtin_ia32_kordi: + return emitX86MaskLogic(builder, getLoc(expr->getExprLoc()), + cir::BinOpKind::Or, ops); case X86::BI__builtin_ia32_kxnorqi: case X86::BI__builtin_ia32_kxnorhi: case X86::BI__builtin_ia32_kxnorsi: case X86::BI__builtin_ia32_kxnordi: + return emitX86MaskLogic(builder, getLoc(expr->getExprLoc()), + cir::BinOpKind::Xor, ops, true); case X86::BI__builtin_ia32_kxorqi: case X86::BI__builtin_ia32_kxorhi: case X86::BI__builtin_ia32_kxorsi: case X86::BI__builtin_ia32_kxordi: + return emitX86MaskLogic(builder, getLoc(expr->getExprLoc()), + cir::BinOpKind::Xor, ops); case X86::BI__builtin_ia32_knotqi: case X86::BI__builtin_ia32_knothi: case X86::BI__builtin_ia32_knotsi: - case X86::BI__builtin_ia32_knotdi: + case X86::BI__builtin_ia32_knotdi: { + cir::IntType intTy = cast(ops[0].getType()); + unsigned numElts = intTy.getWidth(); + mlir::Value resVec = + getMaskVecValue(builder, getLoc(expr->getExprLoc()), ops[0], numElts); + return builder.createBitcast(builder.createNot(resVec), ops[0].getType()); + } case X86::BI__builtin_ia32_kmovb: case X86::BI__builtin_ia32_kmovw: case X86::BI__builtin_ia32_kmovd: - case X86::BI__builtin_ia32_kmovq: + case X86::BI__builtin_ia32_kmovq: { + // Bitcast to vXi1 type and then back to integer. This gets the mask + // register type into the IR, but might be optimized out depending on + // what's around it. + cir::IntType intTy = cast(ops[0].getType()); + unsigned numElts = intTy.getWidth(); + mlir::Value resVec = + getMaskVecValue(builder, getLoc(expr->getExprLoc()), ops[0], numElts); + return builder.createBitcast(resVec, ops[0].getType()); + } case X86::BI__builtin_ia32_kunpckdi: case X86::BI__builtin_ia32_kunpcksi: case X86::BI__builtin_ia32_kunpckhi: case X86::BI__builtin_ia32_sqrtsh_round_mask: case X86::BI__builtin_ia32_sqrtsd_round_mask: case X86::BI__builtin_ia32_sqrtss_round_mask: - case X86::BI__builtin_ia32_sqrtpd256: - case X86::BI__builtin_ia32_sqrtpd: - case X86::BI__builtin_ia32_sqrtps256: - case X86::BI__builtin_ia32_sqrtps: - case X86::BI__builtin_ia32_sqrtph256: - case X86::BI__builtin_ia32_sqrtph: case X86::BI__builtin_ia32_sqrtph512: - case X86::BI__builtin_ia32_vsqrtbf16256: - case X86::BI__builtin_ia32_vsqrtbf16: - case X86::BI__builtin_ia32_vsqrtbf16512: case X86::BI__builtin_ia32_sqrtps512: case X86::BI__builtin_ia32_sqrtpd512: case X86::BI__builtin_ia32_pmuludq128: @@ -943,7 +1001,6 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID, case X86::BI__builtin_ia32_vcvtph2ps256_mask: case X86::BI__builtin_ia32_vcvtph2ps512_mask: case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: - case X86::BI__builtin_ia32_cvtsbf162ss_32: case X86::BI__builtin_ia32_cvtneps2bf16_256_mask: case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: case X86::BI__cpuid: diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 13dc9f305945a..57b1a1f20aa17 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -192,6 +192,9 @@ class CIRGenCXXABI { QualType elementType, const CXXDestructorDecl *dtor) = 0; + virtual size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, + FunctionArgList &args) const = 0; + /// Checks if ABI requires extra virtual offset for vtable field. virtual bool isVirtualOffsetNeededForVTableField(CIRGenFunction &cgf, diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 0f10347944fae..c98d9bb0724f6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -111,8 +111,24 @@ static void emitMemberInitializer(CIRGenFunction &cgf, // NOTE(cir): CodeGen allows record types to be memcpy'd if applicable, // whereas ClangIR wants to represent all object construction explicitly. if (!baseElementTy->isRecordType()) { - cgf.cgm.errorNYI(memberInit->getSourceRange(), - "emitMemberInitializer: array of non-record type"); + unsigned srcArgIndex = + cgf.cgm.getCXXABI().getSrcArgforCopyCtor(constructor, args); + cir::LoadOp srcPtr = cgf.getBuilder().createLoad( + cgf.getLoc(memberInit->getSourceLocation()), + cgf.getAddrOfLocalVar(args[srcArgIndex])); + LValue thisRhslv = cgf.makeNaturalAlignAddrLValue(srcPtr, recordTy); + LValue src = cgf.emitLValueForFieldInitialization(thisRhslv, field, + field->getName()); + + // Copy the aggregate. + cgf.emitAggregateCopy(lhs, src, fieldType, + cgf.getOverlapForFieldInit(field), + lhs.isVolatileQualified()); + // Ensure that we destroy the objects if an exception is thrown later in + // the constructor. + QualType::DestructionKind dtorKind = fieldType.isDestructedType(); + assert(!cgf.needsEHCleanup(dtorKind) && + "Arrays of non-record types shouldn't need EH cleanup"); return; } } diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 03ae967af21de..f7df811a67c26 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -97,6 +97,15 @@ struct ParamReferenceReplacerRAII { } }; } // namespace + +RValue CIRGenFunction::emitCoroutineFrame() { + if (curCoro.data && curCoro.data->coroBegin) { + return RValue::get(curCoro.data->coroBegin); + } + cgm.errorNYI("NYI"); + return RValue(); +} + static void createCoroData(CIRGenFunction &cgf, CIRGenFunction::CGCoroInfo &curCoro, cir::CallOp coroId) { @@ -302,11 +311,24 @@ emitSuspendExpression(CIRGenFunction &cgf, CGCoroData &coro, builder, cgf.getLoc(s.getSourceRange()), kind, /*readyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - builder.createCondition( - cgf.createDummyValue(loc, cgf.getContext().BoolTy)); + Expr *condExpr = s.getReadyExpr()->IgnoreParens(); + builder.createCondition(cgf.evaluateExprAsBool(condExpr)); }, /*suspendBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { + // Note that differently from LLVM codegen we do not emit coro.save + // and coro.suspend here, that should be done as part of lowering this + // to LLVM dialect (or some other MLIR dialect) + + // A invalid suspendRet indicates "void returning await_suspend" + mlir::Value suspendRet = cgf.emitScalarExpr(s.getSuspendExpr()); + + // Veto suspension if requested by bool returning await_suspend. + if (suspendRet) { + cgf.cgm.errorNYI("Veto await_suspend"); + } + + // Signals the parent that execution flows to next region. cir::YieldOp::create(builder, loc); }, /*resumeBuilder=*/ diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 7c94743d5ffc6..a8c2061ddbd6c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -2325,14 +2325,45 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( const QualType typeToSize = e->getTypeOfArgument(); const mlir::Location loc = cgf.getLoc(e->getSourceRange()); if (auto kind = e->getKind(); - kind == UETT_SizeOf || kind == UETT_DataSizeOf) { - if (cgf.getContext().getAsVariableArrayType(typeToSize)) { - cgf.getCIRGenModule().errorNYI(e->getSourceRange(), - "sizeof operator for VariableArrayType", - e->getStmtClassName()); - return builder.getConstant( - loc, cir::IntAttr::get(cgf.cgm.uInt64Ty, - llvm::APSInt(llvm::APInt(64, 1), true))); + kind == UETT_SizeOf || kind == UETT_DataSizeOf || kind == UETT_CountOf) { + if (const VariableArrayType *vat = + cgf.getContext().getAsVariableArrayType(typeToSize)) { + // For _Countof, we only want to evaluate if the extent is actually + // variable as opposed to a multi-dimensional array whose extent is + // constant but whose element type is variable. + bool evaluateExtent = true; + if (kind == UETT_CountOf && vat->getElementType()->isArrayType()) { + evaluateExtent = + !vat->getSizeExpr()->isIntegerConstantExpr(cgf.getContext()); + } + + if (evaluateExtent) { + if (e->isArgumentType()) { + // sizeof(type) - make sure to emit the VLA size. + cgf.emitVariablyModifiedType(typeToSize); + } else { + // C99 6.5.3.4p2: If the argument is an expression of type + // VLA, it is evaluated. + cgf.getCIRGenModule().errorNYI( + e->getSourceRange(), + "sizeof operator for VariableArrayType & evaluateExtent " + "ignoredExpr", + e->getStmtClassName()); + return {}; + } + + // For _Countof, we just want to return the size of a single dimension. + if (kind == UETT_CountOf) + return cgf.getVLAElements1D(vat).numElts; + + cgf.getCIRGenModule().errorNYI( + e->getSourceRange(), + "sizeof operator for VariableArrayType & evaluateExtent", + e->getStmtClassName()); + return builder.getConstant( + loc, cir::IntAttr::get(cgf.cgm.uInt64Ty, + -llvm::APSInt(llvm::APInt(64, 1), true))); + } } } else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) { cgf.getCIRGenModule().errorNYI( diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 33bdfa315a9ea..22128ed3521f8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1138,6 +1138,14 @@ CIRGenFunction::getVLASize(const VariableArrayType *type) { return {numElements, elementType}; } +CIRGenFunction::VlaSizePair +CIRGenFunction::getVLAElements1D(const VariableArrayType *vla) { + mlir::Value vlaSize = vlaSizeMap[vla->getSizeExpr()]; + assert(vlaSize && "no size for VLA!"); + assert(vlaSize.getType() == sizeTy); + return {vlaSize, vla->getElementType()}; +} + // TODO(cir): Most of this function can be shared between CIRGen // and traditional LLVM codegen void CIRGenFunction::emitVariablyModifiedType(QualType type) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 29a7affe50576..b6926bb88ac85 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -498,6 +498,10 @@ class CIRGenFunction : public CIRGenTypeCache { VlaSizePair(mlir::Value num, QualType ty) : numElts(num), type(ty) {} }; + /// Return the number of elements for a single dimension + /// for the given array type. + VlaSizePair getVLAElements1D(const VariableArrayType *vla); + /// Returns an MLIR::Value+QualType pair that corresponds to the size, /// in non-variably-sized elements, of a variable length array type, /// plus that largest non-variably-sized element type. Assumes that @@ -1418,6 +1422,7 @@ class CIRGenFunction : public CIRGenTypeCache { cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc); cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc, mlir::Value coroframeAddr); + RValue emitCoroutineFrame(); void emitDestroy(Address addr, QualType type, Destroyer *destroyer); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index c98edad1303ed..7e145f2c57ce6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -123,6 +123,12 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { return true; } + size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, + FunctionArgList &args) const override { + assert(!args.empty() && "expected the arglist to not be empty!"); + return args.size() - 1; + } + void emitBadCastCall(CIRGenFunction &cgf, mlir::Location loc) override; mlir::Value diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 6bf543cf794b7..d505ca141d383 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -330,6 +330,12 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, "zero expects struct, array, vector, or complex type"); } + if (mlir::isa(attrType)) { + if (!mlir::isa(opType)) + return success(); + return op->emitOpError("undef expects non-void type"); + } + if (mlir::isa(attrType)) { if (!mlir::isa(opType)) return op->emitOpError("result type (") @@ -715,8 +721,28 @@ unsigned cir::CallOp::getNumArgOperands() { return this->getOperation()->getNumOperands(); } +static mlir::ParseResult +parseTryCallDestinations(mlir::OpAsmParser &parser, + mlir::OperationState &result) { + mlir::Block *normalDestSuccessor; + if (parser.parseSuccessor(normalDestSuccessor)) + return mlir::failure(); + + if (parser.parseComma()) + return mlir::failure(); + + mlir::Block *unwindDestSuccessor; + if (parser.parseSuccessor(unwindDestSuccessor)) + return mlir::failure(); + + result.addSuccessors(normalDestSuccessor); + result.addSuccessors(unwindDestSuccessor); + return mlir::success(); +} + static mlir::ParseResult parseCallCommon(mlir::OpAsmParser &parser, - mlir::OperationState &result) { + mlir::OperationState &result, + bool hasDestinationBlocks = false) { llvm::SmallVector ops; llvm::SMLoc opsLoc; mlir::FlatSymbolRefAttr calleeAttr; @@ -743,6 +769,11 @@ static mlir::ParseResult parseCallCommon(mlir::OpAsmParser &parser, if (parser.parseRParen()) return mlir::failure(); + if (hasDestinationBlocks && + parseTryCallDestinations(parser, result).failed()) { + return ::mlir::failure(); + } + if (parser.parseOptionalKeyword("nothrow").succeeded()) result.addAttribute(CIRDialect::getNoThrowAttrName(), mlir::UnitAttr::get(parser.getContext())); @@ -782,7 +813,9 @@ static void printCallCommon(mlir::Operation *op, mlir::FlatSymbolRefAttr calleeSym, mlir::Value indirectCallee, mlir::OpAsmPrinter &printer, bool isNothrow, - cir::SideEffect sideEffect) { + cir::SideEffect sideEffect, + mlir::Block *normalDest = nullptr, + mlir::Block *unwindDest = nullptr) { printer << ' '; auto callLikeOp = mlir::cast(op); @@ -796,8 +829,18 @@ static void printCallCommon(mlir::Operation *op, assert(indirectCallee); printer << indirectCallee; } + printer << "(" << ops << ")"; + if (normalDest) { + assert(unwindDest && "expected two successors"); + auto tryCall = cast(op); + printer << ' ' << tryCall.getNormalDest(); + printer << ","; + printer << ' '; + printer << tryCall.getUnwindDest(); + } + if (isNothrow) printer << " nothrow"; @@ -807,11 +850,11 @@ static void printCallCommon(mlir::Operation *op, printer << ")"; } - printer.printOptionalAttrDict(op->getAttrs(), - {CIRDialect::getCalleeAttrName(), - CIRDialect::getNoThrowAttrName(), - CIRDialect::getSideEffectAttrName()}); - + llvm::SmallVector<::llvm::StringRef> elidedAttrs = { + CIRDialect::getCalleeAttrName(), CIRDialect::getNoThrowAttrName(), + CIRDialect::getSideEffectAttrName(), + CIRDialect::getOperandSegmentSizesAttrName()}; + printer.printOptionalAttrDict(op->getAttrs(), elidedAttrs); printer << " : "; printer.printFunctionalType(op->getOperands().getTypes(), op->getResultTypes()); @@ -892,6 +935,59 @@ cir::CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { return verifyCallCommInSymbolUses(*this, symbolTable); } +//===----------------------------------------------------------------------===// +// TryCallOp +//===----------------------------------------------------------------------===// + +mlir::OperandRange cir::TryCallOp::getArgOperands() { + if (isIndirect()) + return getArgs().drop_front(1); + return getArgs(); +} + +mlir::MutableOperandRange cir::TryCallOp::getArgOperandsMutable() { + mlir::MutableOperandRange args = getArgsMutable(); + if (isIndirect()) + return args.slice(1, args.size() - 1); + return args; +} + +mlir::Value cir::TryCallOp::getIndirectCall() { + assert(isIndirect()); + return getOperand(0); +} + +/// Return the operand at index 'i'. +Value cir::TryCallOp::getArgOperand(unsigned i) { + if (isIndirect()) + ++i; + return getOperand(i); +} + +/// Return the number of operands. +unsigned cir::TryCallOp::getNumArgOperands() { + if (isIndirect()) + return this->getOperation()->getNumOperands() - 1; + return this->getOperation()->getNumOperands(); +} + +LogicalResult +cir::TryCallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { + return verifyCallCommInSymbolUses(*this, symbolTable); +} + +mlir::ParseResult cir::TryCallOp::parse(mlir::OpAsmParser &parser, + mlir::OperationState &result) { + return parseCallCommon(parser, result, /*hasDestinationBlocks=*/true); +} + +void cir::TryCallOp::print(::mlir::OpAsmPrinter &p) { + mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; + cir::SideEffect sideEffect = getSideEffect(); + printCallCommon(*this, getCalleeAttr(), indirectCallee, p, getNothrow(), + sideEffect, getNormalDest(), getUnwindDest()); +} + //===----------------------------------------------------------------------===// // ReturnOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 6136d48204e0c..0c34d87734c3e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -202,6 +202,14 @@ mlir::LogicalResult CIRToLLVMExpOpLowering::matchAndRewrite( return mlir::success(); } +mlir::LogicalResult CIRToLLVMExp2OpLowering::matchAndRewrite( + cir::Exp2Op op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + mlir::Type resTy = typeConverter->convertType(op.getType()); + rewriter.replaceOpWithNewOp(op, resTy, adaptor.getSrc()); + return mlir::success(); +} + static mlir::Value getLLVMIntCast(mlir::ConversionPatternRewriter &rewriter, mlir::Value llvmSrc, mlir::Type llvmDstIntTy, bool isUnsigned, uint64_t cirSrcWidth, @@ -232,7 +240,7 @@ class CIRAttrToValue { .Case( + cir::UndefAttr, cir::VTableAttr, cir::ZeroAttr>( [&](auto attrT) { return visitCirAttr(attrT); }) .Default([&](auto attrT) { return mlir::Value(); }); } @@ -246,6 +254,7 @@ class CIRAttrToValue { mlir::Value visitCirAttr(cir::ConstVectorAttr attr); mlir::Value visitCirAttr(cir::GlobalViewAttr attr); mlir::Value visitCirAttr(cir::TypeInfoAttr attr); + mlir::Value visitCirAttr(cir::UndefAttr attr); mlir::Value visitCirAttr(cir::VTableAttr attr); mlir::Value visitCirAttr(cir::ZeroAttr attr); @@ -583,6 +592,13 @@ mlir::Value CIRAttrToValue::visitCirAttr(cir::TypeInfoAttr typeInfoAttr) { return result; } +/// UndefAttr visitor. +mlir::Value CIRAttrToValue::visitCirAttr(cir::UndefAttr undefAttr) { + mlir::Location loc = parentOp->getLoc(); + return mlir::LLVM::UndefOp::create( + rewriter, loc, converter->convertType(undefAttr.getType())); +} + // VTableAttr visitor. mlir::Value CIRAttrToValue::visitCirAttr(cir::VTableAttr vtableArr) { mlir::Type llvmTy = converter->convertType(vtableArr.getType()); @@ -2038,9 +2054,11 @@ CIRToLLVMGlobalOpLowering::matchAndRewriteRegionInitializedGlobal( cir::GlobalOp op, mlir::Attribute init, mlir::ConversionPatternRewriter &rewriter) const { // TODO: Generalize this handling when more types are needed here. - assert((isa(init))); + assert( + (isa( + init))); // TODO(cir): once LLVM's dialect has proper equivalent attributes this // should be updated. For now, we use a custom op to initialize globals @@ -2098,8 +2116,8 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite( } else if (mlir::isa( - init.value())) { + cir::TypeInfoAttr, cir::UndefAttr, cir::VTableAttr, + cir::ZeroAttr>(init.value())) { // TODO(cir): once LLVM's dialect has proper equivalent attributes this // should be updated. For now, we use a custom op to initialize globals // to the appropriate value. diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp index 5590d217e96ff..82ca831f35da2 100644 --- a/clang/lib/CodeGen/BackendUtil.cpp +++ b/clang/lib/CodeGen/BackendUtil.cpp @@ -1134,6 +1134,8 @@ void EmitAssemblyHelper::RunOptimizationPipeline( CodeGenOpts.SanitizeMinimalRuntime), /*MayReturn=*/ CodeGenOpts.SanitizeRecover.has(SanitizerKind::LocalBounds), + /*HandlerPreserveAllRegs=*/ + static_cast(CodeGenOpts.SanitizeHandlerPreserveAllRegs), }; } FPM.addPass(BoundsCheckingPass(Options)); diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp index fbc4ac3b05836..0a76d04b83a54 100644 --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -767,6 +767,13 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Op = llvm::AtomicRMWInst::Nand; break; + case AtomicExpr::AO__scoped_atomic_uinc_wrap: + Op = llvm::AtomicRMWInst::UIncWrap; + break; + case AtomicExpr::AO__scoped_atomic_udec_wrap: + Op = llvm::AtomicRMWInst::UDecWrap; + break; + case AtomicExpr::AO__atomic_test_and_set: { llvm::AtomicRMWInst *RMWI = CGF.emitAtomicRMWInst(llvm::AtomicRMWInst::Xchg, Ptr, @@ -1071,6 +1078,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__scoped_atomic_xor_fetch: case AtomicExpr::AO__scoped_atomic_store_n: case AtomicExpr::AO__scoped_atomic_exchange_n: + case AtomicExpr::AO__scoped_atomic_uinc_wrap: + case AtomicExpr::AO__scoped_atomic_udec_wrap: Val1 = EmitValToTemp(*this, E->getVal1()); break; } @@ -1269,6 +1278,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__opencl_atomic_fetch_max: case AtomicExpr::AO__scoped_atomic_fetch_max: case AtomicExpr::AO__scoped_atomic_max_fetch: + case AtomicExpr::AO__scoped_atomic_uinc_wrap: + case AtomicExpr::AO__scoped_atomic_udec_wrap: case AtomicExpr::AO__atomic_test_and_set: case AtomicExpr::AO__atomic_clear: llvm_unreachable("Integral atomic operations always become atomicrmw!"); diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp index 4eb99cc342275..c50f372c1f331 100644 --- a/clang/lib/CodeGen/CGDebugInfo.cpp +++ b/clang/lib/CodeGen/CGDebugInfo.cpp @@ -6519,7 +6519,8 @@ llvm::DINode::DIFlags CGDebugInfo::getCallSiteRelatedAttrs() const { // when there's a possibility of debugging backtraces. if (CGM.getCodeGenOpts().OptimizationLevel == 0 || DebugKind == llvm::codegenoptions::NoDebugInfo || - DebugKind == llvm::codegenoptions::LocTrackingOnly) + DebugKind == llvm::codegenoptions::LocTrackingOnly || + !CGM.getCodeGenOpts().DebugCallSiteInfo) return llvm::DINode::FlagZero; // Call site-related attributes are available in DWARF v5. Some debuggers, diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index b33772919b8c8..c8f669b69d991 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -2801,18 +2801,49 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst) { llvm::Value *SrcVal = Src.getScalarVal(); Address DstAddr = Dst.getExtVectorAddress(); + const llvm::Constant *Elts = Dst.getExtVectorElts(); if (DstAddr.getElementType()->getScalarSizeInBits() > SrcVal->getType()->getScalarSizeInBits()) SrcVal = Builder.CreateZExt( SrcVal, convertTypeForLoadStore(Dst.getType(), SrcVal->getType())); - // HLSL allows storing to scalar values through ExtVector component LValues. - // To support this we need to handle the case where the destination address is - // a scalar. - if (!DstAddr.getElementType()->isVectorTy()) { - assert(!Dst.getType()->isVectorType() && - "this should only occur for non-vector l-values"); - Builder.CreateStore(SrcVal, DstAddr, Dst.isVolatileQualified()); + if (getLangOpts().HLSL) { + llvm::Type *DestAddrTy = DstAddr.getElementType(); + // HLSL allows storing to scalar values through ExtVector component LValues. + // To support this we need to handle the case where the destination address + // is a scalar. + if (!DestAddrTy->isVectorTy()) { + assert(!Dst.getType()->isVectorType() && + "this should only occur for non-vector l-values"); + Builder.CreateStore(SrcVal, DstAddr, Dst.isVolatileQualified()); + return; + } + + // HLSL allows direct access to vector elements, so storing to individual + // elements of a vector through ExtVector is handled as separate store + // instructions. + // If we are updating multiple elements, Dst and Src are vectors; for + // a single element update they are scalars. + const VectorType *VTy = Dst.getType()->getAs(); + unsigned NumSrcElts = VTy ? VTy->getNumElements() : 1; + CharUnits ElemAlign = CharUnits::fromQuantity( + CGM.getDataLayout().getPrefTypeAlign(DestAddrTy->getScalarType())); + llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); + + for (unsigned I = 0; I != NumSrcElts; ++I) { + llvm::Value *Val = VTy ? Builder.CreateExtractElement( + SrcVal, llvm::ConstantInt::get(Int32Ty, I)) + : SrcVal; + unsigned FieldNo = getAccessedFieldNo(I, Elts); + Address DstElemAddr = Address::invalid(); + if (FieldNo == 0) + DstElemAddr = DstAddr.withAlignment(ElemAlign); + else + DstElemAddr = Builder.CreateGEP( + DstAddr, {Zero, llvm::ConstantInt::get(Int32Ty, FieldNo)}, + DestAddrTy, ElemAlign); + Builder.CreateStore(Val, DstElemAddr, Dst.isVolatileQualified()); + } return; } @@ -2820,7 +2851,6 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, // value now. llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified()); llvm::Type *VecTy = Vec->getType(); - const llvm::Constant *Elts = Dst.getExtVectorElts(); if (const VectorType *VTy = Dst.getType()->getAs()) { unsigned NumSrcElts = VTy->getNumElements(); @@ -3789,6 +3819,8 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF, bool NeedsAbortSuffix = IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable; bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime; + bool HandlerPreserveAllRegs = + CGF.CGM.getCodeGenOpts().SanitizeHandlerPreserveAllRegs; const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler]; const StringRef CheckName = CheckInfo.Name; std::string FnName = "__ubsan_handle_" + CheckName.str(); @@ -3798,6 +3830,8 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF, FnName += "_minimal"; if (NeedsAbortSuffix) FnName += "_abort"; + if (HandlerPreserveAllRegs && !NeedsAbortSuffix) + FnName += "_preserve"; bool MayReturn = !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable; @@ -3818,6 +3852,10 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF, (CGF.CurCodeDecl && CGF.CurCodeDecl->hasAttr()); if (NoMerge) HandlerCall->addFnAttr(llvm::Attribute::NoMerge); + if (HandlerPreserveAllRegs && !NeedsAbortSuffix) { + // N.B. there is also a clang::CallingConv which is not what we want here. + HandlerCall->setCallingConv(llvm::CallingConv::PreserveAll); + } if (!MayReturn) { HandlerCall->setDoesNotReturn(); CGF.Builder.CreateUnreachable(); diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp index 2f69a53787f0c..572d59edb99b2 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp @@ -1727,7 +1727,7 @@ void CGOpenMPRuntimeGPU::emitReduction( CGF.Builder.GetInsertPoint()); llvm::OpenMPIRBuilder::LocationDescription OmpLoc( CodeGenIP, CGF.SourceLocToDebugLoc(Loc)); - llvm::SmallVector ReductionInfos; + llvm::SmallVector ReductionInfos; CodeGenFunction::OMPPrivateScope Scope(CGF); unsigned Idx = 0; @@ -1780,14 +1780,15 @@ void CGOpenMPRuntimeGPU::emitReduction( }; ReductionInfos.emplace_back(llvm::OpenMPIRBuilder::ReductionInfo( ElementType, Variable, PrivateVariable, EvalKind, - /*ReductionGen=*/nullptr, ReductionGen, AtomicReductionGen)); + /*ReductionGen=*/nullptr, ReductionGen, AtomicReductionGen, + /*DataPtrPtrGen=*/nullptr)); Idx++; } llvm::OpenMPIRBuilder::InsertPointTy AfterIP = cantFail(OMPBuilder.createReductionsGPU( - OmpLoc, AllocaIP, CodeGenIP, ReductionInfos, false, TeamsReduction, - llvm::OpenMPIRBuilder::ReductionGenCBKind::Clang, + OmpLoc, AllocaIP, CodeGenIP, ReductionInfos, /*IsByRef=*/{}, false, + TeamsReduction, llvm::OpenMPIRBuilder::ReductionGenCBKind::Clang, CGF.getTarget().getGridValue(), C.getLangOpts().OpenMPCUDAReductionBufNum, RTLoc)); CGF.Builder.restoreIP(AfterIP); diff --git a/clang/lib/CodeGen/CGPointerAuth.cpp b/clang/lib/CodeGen/CGPointerAuth.cpp index dbb7bc99ac638..a49a0c91681fe 100644 --- a/clang/lib/CodeGen/CGPointerAuth.cpp +++ b/clang/lib/CodeGen/CGPointerAuth.cpp @@ -440,9 +440,10 @@ CodeGenModule::getConstantSignedPointer(llvm::Constant *Pointer, unsigned Key, IntegerDiscriminator = llvm::ConstantInt::get(Int64Ty, 0); } - return llvm::ConstantPtrAuth::get(Pointer, - llvm::ConstantInt::get(Int32Ty, Key), - IntegerDiscriminator, AddressDiscriminator); + return llvm::ConstantPtrAuth::get( + Pointer, llvm::ConstantInt::get(Int32Ty, Key), IntegerDiscriminator, + AddressDiscriminator, + /*DeactivationSymbol=*/llvm::Constant::getNullValue(DefaultPtrTy)); } /// Does a given PointerAuthScheme require us to sign a value diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp index 645b78a599f89..4789c6b26797f 100644 --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -4107,6 +4107,38 @@ template static bool hasImplicitAttr(const ValueDecl *D) { return D->isImplicit(); } +static bool shouldSkipAliasEmission(const CodeGenModule &CGM, + const ValueDecl *Global) { + const LangOptions &LangOpts = CGM.getLangOpts(); + if (!LangOpts.OpenMPIsTargetDevice && !LangOpts.CUDA) + return false; + + const auto *AA = Global->getAttr(); + GlobalDecl AliaseeGD; + + // Check if the aliasee exists, if the aliasee is not found, skip the alias + // emission. This is executed for both the host and device. + if (!CGM.lookupRepresentativeDecl(AA->getAliasee(), AliaseeGD)) + return true; + + const auto *AliaseeDecl = dyn_cast(AliaseeGD.getDecl()); + if (LangOpts.OpenMPIsTargetDevice) + return !AliaseeDecl || + !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(AliaseeDecl); + + // CUDA / HIP + const bool HasDeviceAttr = Global->hasAttr(); + const bool AliaseeHasDeviceAttr = + AliaseeDecl && AliaseeDecl->hasAttr(); + + if (LangOpts.CUDAIsDevice) + return !HasDeviceAttr || !AliaseeHasDeviceAttr; + + // CUDA / HIP Host + // we know that the aliasee exists from above, so we know to emit + return false; +} + bool CodeGenModule::shouldEmitCUDAGlobalVar(const VarDecl *Global) const { assert(LangOpts.CUDA && "Should not be called by non-CUDA languages"); // We need to emit host-side 'shadows' for all global @@ -4129,8 +4161,11 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) { // If this is an alias definition (which otherwise looks like a declaration) // emit it now. - if (Global->hasAttr()) + if (Global->hasAttr()) { + if (shouldSkipAliasEmission(*this, Global)) + return; return EmitAliasDefinition(GD); + } // IFunc like an alias whose value is resolved at runtime by calling resolver. if (Global->hasAttr()) diff --git a/clang/lib/CodeGen/SanitizerHandler.h b/clang/lib/CodeGen/SanitizerHandler.h index a66e7ab354eb2..871e17c22d3fa 100644 --- a/clang/lib/CodeGen/SanitizerHandler.h +++ b/clang/lib/CodeGen/SanitizerHandler.h @@ -64,7 +64,7 @@ SANITIZER_CHECK(SubOverflow, sub_overflow, 0, \ "Integer subtraction overflowed") \ SANITIZER_CHECK(TypeMismatch, type_mismatch, 1, \ - "Type mismatch in operation") \ + "Alignment, null, or object-size error") \ SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0, \ "Alignment assumption violated") \ SANITIZER_CHECK( \ diff --git a/clang/lib/CodeGen/TargetBuiltins/X86.cpp b/clang/lib/CodeGen/TargetBuiltins/X86.cpp index 00c8a1cf16e31..be2b7d442645e 100644 --- a/clang/lib/CodeGen/TargetBuiltins/X86.cpp +++ b/clang/lib/CodeGen/TargetBuiltins/X86.cpp @@ -2171,21 +2171,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, return Builder.CreateBitCast(Res, Ops[0]->getType()); } - case X86::BI__builtin_ia32_sqrtss: - case X86::BI__builtin_ia32_sqrtsd: { - Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0); - Function *F; - if (Builder.getIsFPConstrained()) { - CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); - F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, - A->getType()); - A = Builder.CreateConstrainedFPCall(F, {A}); - } else { - F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); - A = Builder.CreateCall(F, {A}); - } - return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0); - } case X86::BI__builtin_ia32_sqrtsh_round_mask: case X86::BI__builtin_ia32_sqrtsd_round_mask: case X86::BI__builtin_ia32_sqrtss_round_mask: { @@ -2225,40 +2210,29 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, A = EmitX86ScalarSelect(*this, Ops[3], A, Src); return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0); } - case X86::BI__builtin_ia32_sqrtpd256: - case X86::BI__builtin_ia32_sqrtpd: - case X86::BI__builtin_ia32_sqrtps256: - case X86::BI__builtin_ia32_sqrtps: - case X86::BI__builtin_ia32_sqrtph256: - case X86::BI__builtin_ia32_sqrtph: case X86::BI__builtin_ia32_sqrtph512: - case X86::BI__builtin_ia32_vsqrtbf16256: - case X86::BI__builtin_ia32_vsqrtbf16: - case X86::BI__builtin_ia32_vsqrtbf16512: case X86::BI__builtin_ia32_sqrtps512: case X86::BI__builtin_ia32_sqrtpd512: { - if (Ops.size() == 2) { - unsigned CC = cast(Ops[1])->getZExtValue(); - // Support only if the rounding mode is 4 (AKA CUR_DIRECTION), - // otherwise keep the intrinsic. - if (CC != 4) { - Intrinsic::ID IID; - - switch (BuiltinID) { - default: - llvm_unreachable("Unsupported intrinsic!"); - case X86::BI__builtin_ia32_sqrtph512: - IID = Intrinsic::x86_avx512fp16_sqrt_ph_512; - break; - case X86::BI__builtin_ia32_sqrtps512: - IID = Intrinsic::x86_avx512_sqrt_ps_512; - break; - case X86::BI__builtin_ia32_sqrtpd512: - IID = Intrinsic::x86_avx512_sqrt_pd_512; - break; - } - return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); + unsigned CC = cast(Ops[1])->getZExtValue(); + // Support only if the rounding mode is 4 (AKA CUR_DIRECTION), + // otherwise keep the intrinsic. + if (CC != 4) { + Intrinsic::ID IID; + + switch (BuiltinID) { + default: + llvm_unreachable("Unsupported intrinsic!"); + case X86::BI__builtin_ia32_sqrtph512: + IID = Intrinsic::x86_avx512fp16_sqrt_ph_512; + break; + case X86::BI__builtin_ia32_sqrtps512: + IID = Intrinsic::x86_avx512_sqrt_ps_512; + break; + case X86::BI__builtin_ia32_sqrtpd512: + IID = Intrinsic::x86_avx512_sqrt_pd_512; + break; } + return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); } if (Builder.getIsFPConstrained()) { CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); @@ -2796,8 +2770,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128; return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); } - case X86::BI__builtin_ia32_cvtsbf162ss_32: - return Builder.CreateFPExt(Ops[0], Builder.getFloatTy()); case X86::BI__builtin_ia32_cvtneps2bf16_256_mask: case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: { diff --git a/clang/lib/CodeGen/Targets/Sparc.cpp b/clang/lib/CodeGen/Targets/Sparc.cpp index 38dbebdec2429..3fa4e84823d51 100644 --- a/clang/lib/CodeGen/Targets/Sparc.cpp +++ b/clang/lib/CodeGen/Targets/Sparc.cpp @@ -26,23 +26,39 @@ class SparcV8ABIInfo : public DefaultABIInfo { private: ABIArgInfo classifyReturnType(QualType RetTy) const; + ABIArgInfo classifyArgumentType(QualType Ty) const; void computeInfo(CGFunctionInfo &FI) const override; }; } // end anonymous namespace +ABIArgInfo SparcV8ABIInfo::classifyReturnType(QualType Ty) const { + const auto *CT = Ty->getAs(); + const auto *BT = Ty->getAs(); + if (CT) + BT = CT->getElementType()->getAs(); + bool IsLongDouble = BT && BT->getKind() == BuiltinType::LongDouble; -ABIArgInfo -SparcV8ABIInfo::classifyReturnType(QualType Ty) const { - if (Ty->isAnyComplexType()) { - return ABIArgInfo::getDirect(); - } - else { - return DefaultABIInfo::classifyReturnType(Ty); - } + // long double _Complex is special in that it should be marked as inreg. + if (CT) + return IsLongDouble ? ABIArgInfo::getDirectInReg() + : ABIArgInfo::getDirect(); + + if (IsLongDouble) + return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(), + /*ByVal=*/false); + + return DefaultABIInfo::classifyReturnType(Ty); } -void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const { +ABIArgInfo SparcV8ABIInfo::classifyArgumentType(QualType Ty) const { + if (const auto *BT = Ty->getAs(); + BT && BT->getKind() == BuiltinType::LongDouble) + return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace()); + return DefaultABIInfo::classifyArgumentType(Ty); +} + +void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const { FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &Arg : FI.arguments()) Arg.info = classifyArgumentType(Arg.type); diff --git a/clang/lib/CrossTU/CMakeLists.txt b/clang/lib/CrossTU/CMakeLists.txt index 3349fc283925d..eef7a892701fb 100644 --- a/clang/lib/CrossTU/CMakeLists.txt +++ b/clang/lib/CrossTU/CMakeLists.txt @@ -9,6 +9,7 @@ add_clang_library(clangCrossTU LINK_LIBS clangAST clangBasic + clangDriver clangFrontend clangIndex ) diff --git a/clang/lib/CrossTU/CrossTranslationUnit.cpp b/clang/lib/CrossTU/CrossTranslationUnit.cpp index 0287845a741ed..a3fc2cf6bfb3c 100644 --- a/clang/lib/CrossTU/CrossTranslationUnit.cpp +++ b/clang/lib/CrossTU/CrossTranslationUnit.cpp @@ -16,6 +16,7 @@ #include "clang/Basic/DiagnosticDriver.h" #include "clang/Basic/TargetInfo.h" #include "clang/CrossTU/CrossTUDiagnostic.h" +#include "clang/Driver/CreateASTUnitFromArgs.h" #include "clang/Frontend/ASTUnit.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/TextDiagnosticPrinter.h" @@ -619,7 +620,7 @@ CrossTranslationUnitContext::ASTLoader::loadFromSource( auto Diags = llvm::makeIntrusiveRefCnt(DiagID, *DiagOpts, DiagClient); - return ASTUnit::LoadFromCommandLine( + return CreateASTUnitFromCommandLine( CommandLineArgs.begin(), (CommandLineArgs.end()), CI.getPCHContainerOperations(), DiagOpts, Diags, CI.getHeaderSearchOpts().ResourceDir); diff --git a/clang/lib/Driver/CMakeLists.txt b/clang/lib/Driver/CMakeLists.txt index 8052659e9836b..d987111827597 100644 --- a/clang/lib/Driver/CMakeLists.txt +++ b/clang/lib/Driver/CMakeLists.txt @@ -17,6 +17,8 @@ endif() add_clang_library(clangDriver Action.cpp Compilation.cpp + CreateASTUnitFromArgs.cpp + CreateInvocationFromArgs.cpp Distro.cpp Driver.cpp Job.cpp @@ -96,6 +98,8 @@ add_clang_library(clangDriver LINK_LIBS clangBasic + clangFrontend + clangSerialization clangLex clangOptions ${system_libs} diff --git a/clang/lib/Driver/CreateASTUnitFromArgs.cpp b/clang/lib/Driver/CreateASTUnitFromArgs.cpp new file mode 100644 index 0000000000000..ea31a8ed07c5f --- /dev/null +++ b/clang/lib/Driver/CreateASTUnitFromArgs.cpp @@ -0,0 +1,166 @@ +//===--- CreateASTUnitFromArgs.h - Create an ASTUnit from Args ------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Utility for creating an ASTUnit from a vector of command line arguments. +// +//===----------------------------------------------------------------------===// + +#include "clang/Driver/CreateASTUnitFromArgs.h" +#include "clang/Driver/CreateInvocationFromArgs.h" +#include "clang/Frontend/CompilerInvocation.h" +#include "clang/Lex/PreprocessorOptions.h" +#include "clang/Serialization/ModuleCache.h" +#include "llvm/Support/CrashRecoveryContext.h" + +using namespace clang; + +/// Create an ASTUnit from a vector of command line arguments, which must +/// specify exactly one source file. +/// +/// \param ArgBegin - The beginning of the argument vector. +/// +/// \param ArgEnd - The end of the argument vector. +/// +/// \param PCHContainerOps - The PCHContainerOperations to use for loading and +/// creating modules. +/// +/// \param Diags - The diagnostics engine to use for reporting errors; its +/// lifetime is expected to extend past that of the returned ASTUnit. +/// +/// \param ResourceFilesPath - The path to the compiler resource files. +/// +/// \param StorePreamblesInMemory - Whether to store PCH in memory. If false, +/// PCH are stored in temporary files. +/// +/// \param PreambleStoragePath - The path to a directory, in which to create +/// temporary PCH files. If empty, the default system temporary directory is +/// used. This parameter is ignored if \p StorePreamblesInMemory is true. +/// +/// \param ModuleFormat - If provided, uses the specific module format. +/// +/// \param ErrAST - If non-null and parsing failed without any AST to return +/// (e.g. because the PCH could not be loaded), this accepts the ASTUnit +/// mainly to allow the caller to see the diagnostics. +/// +/// \param VFS - A llvm::vfs::FileSystem to be used for all file accesses. +/// Note that preamble is saved to a temporary directory on a RealFileSystem, +/// so in order for it to be loaded correctly, VFS should have access to +/// it(i.e., be an overlay over RealFileSystem). RealFileSystem will be used +/// if \p VFS is nullptr. +/// +// FIXME: Move OnlyLocalDecls, UseBumpAllocator to setters on the ASTUnit, we +// shouldn't need to specify them at construction time. +std::unique_ptr clang::CreateASTUnitFromCommandLine( + const char **ArgBegin, const char **ArgEnd, + std::shared_ptr PCHContainerOps, + std::shared_ptr DiagOpts, + IntrusiveRefCntPtr Diags, StringRef ResourceFilesPath, + bool StorePreamblesInMemory, StringRef PreambleStoragePath, + bool OnlyLocalDecls, CaptureDiagsKind CaptureDiagnostics, + ArrayRef RemappedFiles, + bool RemappedFilesKeepOriginalName, unsigned PrecompilePreambleAfterNParses, + TranslationUnitKind TUKind, bool CacheCodeCompletionResults, + bool IncludeBriefCommentsInCodeCompletion, bool AllowPCHWithCompilerErrors, + SkipFunctionBodiesScope SkipFunctionBodies, bool SingleFileParse, + bool UserFilesAreVolatile, bool ForSerialization, + bool RetainExcludedConditionalBlocks, std::optional ModuleFormat, + std::unique_ptr *ErrAST, + IntrusiveRefCntPtr VFS) { + assert(Diags.get() && "no DiagnosticsEngine was provided"); + + // If no VFS was provided, create one that tracks the physical file system. + // If '-working-directory' was passed as an argument, 'createInvocation' will + // set this as the current working directory of the VFS. + if (!VFS) + VFS = llvm::vfs::createPhysicalFileSystem(); + + SmallVector StoredDiagnostics; + + std::shared_ptr CI; + + { + CaptureDroppedDiagnostics Capture(CaptureDiagnostics, *Diags, + &StoredDiagnostics, nullptr); + + CreateInvocationOptions CIOpts; + CIOpts.VFS = VFS; + CIOpts.Diags = Diags; + CIOpts.ProbePrecompiled = true; // FIXME: historical default. Needed? + CI = createInvocation(llvm::ArrayRef(ArgBegin, ArgEnd), std::move(CIOpts)); + if (!CI) + return nullptr; + } + + // Override any files that need remapping + for (const auto &RemappedFile : RemappedFiles) { + CI->getPreprocessorOpts().addRemappedFile(RemappedFile.first, + RemappedFile.second); + } + PreprocessorOptions &PPOpts = CI->getPreprocessorOpts(); + PPOpts.RemappedFilesKeepOriginalName = RemappedFilesKeepOriginalName; + PPOpts.AllowPCHWithCompilerErrors = AllowPCHWithCompilerErrors; + PPOpts.SingleFileParseMode = SingleFileParse; + PPOpts.RetainExcludedConditionalBlocks = RetainExcludedConditionalBlocks; + + // Override the resources path. + CI->getHeaderSearchOpts().ResourceDir = std::string(ResourceFilesPath); + + CI->getFrontendOpts().SkipFunctionBodies = + SkipFunctionBodies == SkipFunctionBodiesScope::PreambleAndMainFile; + + if (ModuleFormat) + CI->getHeaderSearchOpts().ModuleFormat = std::string(*ModuleFormat); + + // Create the AST unit. + std::unique_ptr AST; + AST.reset(new ASTUnit(false)); + AST->NumStoredDiagnosticsFromDriver = StoredDiagnostics.size(); + AST->StoredDiagnostics.swap(StoredDiagnostics); + ASTUnit::ConfigureDiags(Diags, *AST, CaptureDiagnostics); + AST->DiagOpts = DiagOpts; + AST->Diagnostics = Diags; + AST->FileSystemOpts = CI->getFileSystemOpts(); + AST->CodeGenOpts = std::make_unique(CI->getCodeGenOpts()); + VFS = createVFSFromCompilerInvocation(*CI, *Diags, VFS); + AST->FileMgr = + llvm::makeIntrusiveRefCnt(AST->FileSystemOpts, VFS); + AST->StorePreamblesInMemory = StorePreamblesInMemory; + AST->PreambleStoragePath = PreambleStoragePath; + AST->ModCache = createCrossProcessModuleCache(); + AST->OnlyLocalDecls = OnlyLocalDecls; + AST->CaptureDiagnostics = CaptureDiagnostics; + AST->TUKind = TUKind; + AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults; + AST->IncludeBriefCommentsInCodeCompletion = + IncludeBriefCommentsInCodeCompletion; + AST->UserFilesAreVolatile = UserFilesAreVolatile; + AST->Invocation = CI; + AST->SkipFunctionBodies = SkipFunctionBodies; + if (ForSerialization) + AST->WriterData.reset( + new ASTUnit::ASTWriterData(*AST->ModCache, *AST->CodeGenOpts)); + // Zero out now to ease cleanup during crash recovery. + CI = nullptr; + Diags = nullptr; + + // Recover resources if we crash before exiting this method. + llvm::CrashRecoveryContextCleanupRegistrar ASTUnitCleanup(AST.get()); + + if (AST->LoadFromCompilerInvocation(std::move(PCHContainerOps), + PrecompilePreambleAfterNParses, VFS)) { + // Some error occurred, if caller wants to examine diagnostics, pass it the + // ASTUnit. + if (ErrAST) { + AST->StoredDiagnostics.swap(AST->FailedParseDiagnostics); + ErrAST->swap(AST); + } + return nullptr; + } + + return AST; +} diff --git a/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp b/clang/lib/Driver/CreateInvocationFromArgs.cpp similarity index 93% rename from clang/lib/Frontend/CreateInvocationFromCommandLine.cpp rename to clang/lib/Driver/CreateInvocationFromArgs.cpp index e54e83151ad1e..516d61f1a1159 100644 --- a/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp +++ b/clang/lib/Driver/CreateInvocationFromArgs.cpp @@ -1,4 +1,4 @@ -//===--- CreateInvocationFromCommandLine.cpp - CompilerInvocation from Args ==// +//===--- CreateInvocationFromArgs.h - CompilerInvocation from Args --------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -10,9 +10,9 @@ // //===----------------------------------------------------------------------===// +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Basic/DiagnosticFrontend.h" #include "clang/Basic/DiagnosticOptions.h" -#include "clang/Driver/Action.h" #include "clang/Driver/Compilation.h" #include "clang/Driver/Driver.h" #include "clang/Driver/Tool.h" @@ -24,12 +24,13 @@ #include "llvm/Option/ArgList.h" #include "llvm/Support/VirtualFileSystem.h" #include "llvm/TargetParser/Host.h" -using namespace clang; + using namespace llvm::opt; +namespace clang { + std::unique_ptr -clang::createInvocation(ArrayRef ArgList, - CreateInvocationOptions Opts) { +createInvocation(ArrayRef ArgList, CreateInvocationOptions Opts) { assert(!ArgList.empty()); std::optional LocalDiagOpts; IntrusiveRefCntPtr Diags; @@ -114,3 +115,5 @@ clang::createInvocation(ArrayRef ArgList, return nullptr; return CI; } + +} // namespace clang diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp index de8d4601210ae..8644a271a04b5 100644 --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -66,6 +66,7 @@ #include "clang/Driver/ToolChain.h" #include "clang/Driver/Types.h" #include "clang/Lex/DependencyDirectivesScanner.h" +#include "clang/Options/OptionUtils.h" #include "clang/Options/Options.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/STLExtras.h" @@ -125,40 +126,6 @@ template static bool usesInput(const ArgList &Args, F &&Fn) { }); } -// static -std::string Driver::GetResourcesPath(StringRef BinaryPath) { - // Since the resource directory is embedded in the module hash, it's important - // that all places that need it call this function, so that they get the - // exact same string ("a/../b/" and "b/" get different hashes, for example). - - // Dir is bin/ or lib/, depending on where BinaryPath is. - StringRef Dir = llvm::sys::path::parent_path(BinaryPath); - SmallString<128> P(Dir); - - StringRef ConfiguredResourceDir(CLANG_RESOURCE_DIR); - if (!ConfiguredResourceDir.empty()) { - // FIXME: We should fix the behavior of llvm::sys::path::append so we don't - // need to check for absolute paths here. - if (llvm::sys::path::is_absolute(ConfiguredResourceDir)) - P = ConfiguredResourceDir; - else - llvm::sys::path::append(P, ConfiguredResourceDir); - } else { - // On Windows, libclang.dll is in bin/. - // On non-Windows, libclang.so/.dylib is in lib/. - // With a static-library build of libclang, LibClangPath will contain the - // path of the embedding binary, which for LLVM binaries will be in bin/. - // ../lib gets us to lib/ in both cases. - P = llvm::sys::path::parent_path(Dir); - // This search path is also created in the COFF driver of lld, so any - // changes here also needs to happen in lld/COFF/Driver.cpp - llvm::sys::path::append(P, CLANG_INSTALL_LIBDIR_BASENAME, "clang", - CLANG_VERSION_MAJOR_STRING); - } - - return std::string(P); -} - CUIDOptions::CUIDOptions(llvm::opt::DerivedArgList &Args, const Driver &D) : UseCUID(Kind::Hash) { if (Arg *A = Args.getLastArg(options::OPT_fuse_cuid_EQ)) { @@ -5024,15 +4991,24 @@ Action *Driver::BuildOffloadingActions(Compilation &C, // Compiling HIP in device-only non-RDC mode requires linking each action // individually. for (Action *&A : DeviceActions) { - // Special handling for the HIP SPIR-V toolchain because it doesn't use - // the SPIR-V backend yet doesn't report the output as an object. bool IsAMDGCNSPIRV = A->getOffloadingToolChain() && A->getOffloadingToolChain()->getTriple().getOS() == llvm::Triple::OSType::AMDHSA && A->getOffloadingToolChain()->getTriple().isSPIRV(); + bool UseSPIRVBackend = Args.hasFlag(options::OPT_use_spirv_backend, + options::OPT_no_use_spirv_backend, + /*Default=*/false); + + // Special handling for the HIP SPIR-V toolchain in device-only. + // The translator path has a linking step, whereas the SPIR-V backend path + // does not to avoid any external dependency such as spirv-link. The + // linking step is skipped for the SPIR-V backend path. + bool IsAMDGCNSPIRVWithBackend = IsAMDGCNSPIRV && UseSPIRVBackend; + if ((A->getType() != types::TY_Object && !IsAMDGCNSPIRV && A->getType() != types::TY_LTO_BC) || - HIPRelocatableObj || !HIPNoRDC || !offloadDeviceOnly()) + HIPRelocatableObj || !HIPNoRDC || !offloadDeviceOnly() || + (IsAMDGCNSPIRVWithBackend && offloadDeviceOnly())) continue; ActionList LinkerInput = {A}; A = C.MakeAction(LinkerInput, types::TY_Image); @@ -5258,12 +5234,28 @@ Action *Driver::ConstructPhaseAction( Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC; return C.MakeAction(Input, Output); } + bool UseSPIRVBackend = Args.hasFlag(options::OPT_use_spirv_backend, + options::OPT_no_use_spirv_backend, + /*Default=*/false); + + auto OffloadingToolChain = Input->getOffloadingToolChain(); + // For AMD SPIRV, if offloadDeviceOnly(), we call the SPIRV backend unless + // LLVM bitcode was requested explicitly or RDC is set. If + // !offloadDeviceOnly, we emit LLVM bitcode, and clang-linker-wrapper will + // compile it to SPIRV. + bool UseSPIRVBackendForHipDeviceOnlyNoRDC = + TargetDeviceOffloadKind == Action::OFK_HIP && OffloadingToolChain && + OffloadingToolChain->getTriple().isSPIRV() && UseSPIRVBackend && + offloadDeviceOnly() && + !Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, false); + if (Args.hasArg(options::OPT_emit_llvm) || TargetDeviceOffloadKind == Action::OFK_SYCL || (((Input->getOffloadingToolChain() && Input->getOffloadingToolChain()->getTriple().isAMDGPU() && TargetDeviceOffloadKind != Action::OFK_None) || TargetDeviceOffloadKind == Action::OFK_HIP) && + !UseSPIRVBackendForHipDeviceOnlyNoRDC && ((Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, false) || (Args.hasFlag(options::OPT_offload_new_driver, @@ -5285,6 +5277,19 @@ Action *Driver::ConstructPhaseAction( : types::TY_LLVM_BC; return C.MakeAction(Input, Output); } + + // The SPIRV backend compilation path for HIP must avoid external + // dependencies. The default compilation path assembles and links its + // output, but the SPIRV assembler and linker are external tools. This code + // ensures the backend emits binary SPIRV directly to bypass those steps and + // avoid failures. Without -save-temps, the compiler may already skip + // assembling and linking. With -save-temps, these steps must be explicitly + // disabled, as done here. We also force skipping these steps regardless of + // -save-temps to avoid relying on optimizations (unless -S is set). + // The current HIP bundling expects the type to be types::TY_Image + if (UseSPIRVBackendForHipDeviceOnlyNoRDC && !Args.hasArg(options::OPT_S)) + return C.MakeAction(Input, types::TY_Image); + return C.MakeAction(Input, types::TY_PP_Asm); } case phases::Assemble: diff --git a/clang/lib/Driver/SanitizerArgs.cpp b/clang/lib/Driver/SanitizerArgs.cpp index 9902cbbf99436..be068b2381d06 100644 --- a/clang/lib/Driver/SanitizerArgs.cpp +++ b/clang/lib/Driver/SanitizerArgs.cpp @@ -358,7 +358,7 @@ bool SanitizerArgs::needsFuzzerInterceptors() const { bool SanitizerArgs::needsUbsanRt() const { // All of these include ubsan. if (needsAsanRt() || needsMsanRt() || needsNsanRt() || needsHwasanRt() || - needsTsanRt() || needsDfsanRt() || needsLsanRt() || + needsTsanRt() || needsDfsanRt() || needsLsanRt() || needsTysanRt() || needsCfiCrossDsoDiagRt() || (needsScudoRt() && !requiresMinimalRuntime())) return false; @@ -419,6 +419,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC, const Driver &D = TC.getDriver(); SanitizerMask TrappingKinds = parseSanitizeTrapArgs(D, Args, DiagnoseErrors); SanitizerMask InvalidTrappingKinds = TrappingKinds & NotAllowedWithTrap; + const llvm::Triple &Triple = TC.getTriple(); MinimalRuntime = Args.hasFlag(options::OPT_fsanitize_minimal_runtime, @@ -426,7 +427,8 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC, HandlerPreserveAllRegs = Args.hasFlag(options::OPT_fsanitize_handler_preserve_all_regs, options::OPT_fno_sanitize_handler_preserve_all_regs, - HandlerPreserveAllRegs); + HandlerPreserveAllRegs) && + MinimalRuntime && (Triple.isAArch64() || Triple.isX86_64()); // The object size sanitizer should not be enabled at -O0. Arg *OptLevel = Args.getLastArg(options::OPT_O_Group); @@ -494,7 +496,6 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC, // -fsanitize=function and -fsanitize=kcfi instrument indirect function // calls to load a type hash before the function label. Therefore, an // execute-only target doesn't support the function and kcfi sanitizers. - const llvm::Triple &Triple = TC.getTriple(); if (isExecuteOnlyTarget(Triple, Args)) { if (SanitizerMask KindsToDiagnose = Add & NotAllowedWithExecuteOnly & ~DiagnosedKinds) { diff --git a/clang/lib/Driver/ToolChains/AMDGPU.cpp b/clang/lib/Driver/ToolChains/AMDGPU.cpp index 80e58d466b885..87ccd40372681 100644 --- a/clang/lib/Driver/ToolChains/AMDGPU.cpp +++ b/clang/lib/Driver/ToolChains/AMDGPU.cpp @@ -243,8 +243,15 @@ RocmInstallationDetector::getInstallationPathCandidates() { // Some versions of the rocm llvm package install to /opt/rocm/llvm/bin // Some versions of the aomp package install to /opt/rocm/aomp/bin - if (ParentName == "llvm" || ParentName.starts_with("aomp")) + if (ParentName == "llvm" || ParentName.starts_with("aomp")) { ParentDir = llvm::sys::path::parent_path(ParentDir); + ParentName = llvm::sys::path::filename(ParentDir); + + // Some versions of the rocm llvm package install to + // /opt/rocm/lib/llvm/bin, so also back up if within the lib dir still + if (ParentName == "lib") + ParentDir = llvm::sys::path::parent_path(ParentDir); + } return Candidate(ParentDir.str(), /*StrictChecking=*/true); }; diff --git a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp index 1dcce6d053a39..7fda8ea50223d 100644 --- a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp +++ b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp @@ -130,17 +130,10 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple, #undef RESERVE_REG // -mrelax is default, unless -mno-relax is specified. - if (Args.hasFlag(options::OPT_mrelax, options::OPT_mno_relax, true)) { + if (Args.hasFlag(options::OPT_mrelax, options::OPT_mno_relax, true)) Features.push_back("+relax"); - // -gsplit-dwarf -mrelax requires DW_AT_high_pc/DW_AT_ranges/... indexing - // into .debug_addr, which is currently not implemented. - Arg *A; - if (getDebugFissionKind(D, Args, A) != DwarfFissionKind::None) - D.Diag(clang::diag::err_drv_riscv_unsupported_with_linker_relaxation) - << A->getAsString(Args); - } else { + else Features.push_back("-relax"); - } // If -mstrict-align, -mno-strict-align, -mscalar-strict-align, or // -mno-scalar-strict-align is passed, use it. Otherwise, the diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index c5d40c9825fab..0380568412e62 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -32,6 +32,7 @@ #include "clang/Driver/SanitizerArgs.h" #include "clang/Driver/Types.h" #include "clang/Driver/XRayArgs.h" +#include "clang/Options/OptionUtils.h" #include "clang/Options/Options.h" #include "llvm/ADT/ScopeExit.h" #include "llvm/ADT/SmallSet.h" @@ -4442,6 +4443,10 @@ renderDebugOptions(const ToolChain &TC, const Driver &D, const llvm::Triple &T, DebuggerTuning != llvm::DebuggerKind::DBX))) CmdArgs.push_back("-gno-column-info"); + if (!Args.hasFlag(options::OPT_gcall_site_info, + options::OPT_gno_call_site_info, true)) + CmdArgs.push_back("-gno-call-site-info"); + // FIXME: Move backend command line options to the module. if (Args.hasFlag(options::OPT_gmodules, options::OPT_gno_modules, false)) { // If -gline-tables-only or -gline-directives-only is the last option it @@ -5057,6 +5062,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Args.ClaimAllArgs(options::OPT_femit_dwarf_unwind_EQ); } + bool IsAMDSPIRVForHIPDevice = + IsHIPDevice && getToolChain().getTriple().isSPIRV() && + getToolChain().getTriple().getVendor() == llvm::Triple::AMD; + if (isa(JA)) { assert(JA.getType() == types::TY_Plist && "Invalid output type."); CmdArgs.push_back("-analyze"); @@ -5154,6 +5163,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, rewriteKind = RK_Fragile; } else if (JA.getType() == types::TY_CIR) { CmdArgs.push_back("-emit-cir"); + } else if (JA.getType() == types::TY_Image && IsAMDSPIRVForHIPDevice) { + CmdArgs.push_back("-emit-obj"); } else { assert(JA.getType() == types::TY_PP_Asm && "Unexpected output type!"); } @@ -9084,7 +9095,9 @@ void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA, OPT_fno_lto, OPT_flto, OPT_flto_partitions_EQ, - OPT_flto_EQ}; + OPT_flto_EQ, + OPT_use_spirv_backend}; + const llvm::DenseSet LinkerOptions{OPT_mllvm, OPT_Zlinker_input}; auto ShouldForwardForToolChain = [&](Arg *A, const ToolChain &TC) { // Don't forward -mllvm to toolchains that don't support LLVM. diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp index 4c036f0f8dee3..d3539a594df11 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.cpp +++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp @@ -3398,169 +3398,6 @@ void tools::handleInterchangeLoopsArgs(const ArgList &Args, CmdArgs.push_back("-floop-interchange"); } -// Parse -mprefer-vector-width=. Return the Value string if well-formed. -// Otherwise, return an empty string and issue a diagnosic message if needed. -StringRef tools::parseMPreferVectorWidthOption(clang::DiagnosticsEngine &Diags, - const llvm::opt::ArgList &Args) { - Arg *A = Args.getLastArg(options::OPT_mprefer_vector_width_EQ); - if (!A) - return ""; - - StringRef Value = A->getValue(); - unsigned Width LLVM_ATTRIBUTE_UNINITIALIZED; - - // Only "none" and Integer values are accepted by - // -mprefer-vector-width=. - if (Value != "none" && Value.getAsInteger(10, Width)) { - Diags.Report(clang::diag::err_drv_invalid_value) - << A->getOption().getName() << Value; - return ""; - } - - return Value; -} - -// This is a helper function for validating the optional refinement step -// parameter in reciprocal argument strings. Return false if there is an error -// parsing the refinement step. Otherwise, return true and set the Position -// of the refinement step in the input string. -static bool getRefinementStep(StringRef In, clang::DiagnosticsEngine &Diags, - const Arg &A, size_t &Position) { - const char RefinementStepToken = ':'; - Position = In.find(RefinementStepToken); - if (Position != StringRef::npos) { - StringRef Option = A.getOption().getName(); - StringRef RefStep = In.substr(Position + 1); - // Allow exactly one numeric character for the additional refinement - // step parameter. This is reasonable for all currently-supported - // operations and architectures because we would expect that a larger value - // of refinement steps would cause the estimate "optimization" to - // under-perform the native operation. Also, if the estimate does not - // converge quickly, it probably will not ever converge, so further - // refinement steps will not produce a better answer. - if (RefStep.size() != 1) { - Diags.Report(diag::err_drv_invalid_value) << Option << RefStep; - return false; - } - char RefStepChar = RefStep[0]; - if (RefStepChar < '0' || RefStepChar > '9') { - Diags.Report(diag::err_drv_invalid_value) << Option << RefStep; - return false; - } - } - return true; -} - -// Parse -mrecip. Return the Value string if well-formed. -// Otherwise, return an empty string and issue a diagnosic message if needed. -StringRef tools::parseMRecipOption(clang::DiagnosticsEngine &Diags, - const ArgList &Args) { - StringRef DisabledPrefixIn = "!"; - StringRef DisabledPrefixOut = "!"; - StringRef EnabledPrefixOut = ""; - StringRef Out = ""; - - Arg *A = Args.getLastArg(options::OPT_mrecip, options::OPT_mrecip_EQ); - if (!A) - return ""; - - unsigned NumOptions = A->getNumValues(); - if (NumOptions == 0) { - // No option is the same as "all". - return "all"; - } - - // Pass through "all", "none", or "default" with an optional refinement step. - if (NumOptions == 1) { - StringRef Val = A->getValue(0); - size_t RefStepLoc; - if (!getRefinementStep(Val, Diags, *A, RefStepLoc)) - return ""; - StringRef ValBase = Val.slice(0, RefStepLoc); - if (ValBase == "all" || ValBase == "none" || ValBase == "default") { - return Val; - } - } - - // Each reciprocal type may be enabled or disabled individually. - // Check each input value for validity, concatenate them all back together, - // and pass through. - - llvm::StringMap OptionStrings; - OptionStrings.insert(std::make_pair("divd", false)); - OptionStrings.insert(std::make_pair("divf", false)); - OptionStrings.insert(std::make_pair("divh", false)); - OptionStrings.insert(std::make_pair("vec-divd", false)); - OptionStrings.insert(std::make_pair("vec-divf", false)); - OptionStrings.insert(std::make_pair("vec-divh", false)); - OptionStrings.insert(std::make_pair("sqrtd", false)); - OptionStrings.insert(std::make_pair("sqrtf", false)); - OptionStrings.insert(std::make_pair("sqrth", false)); - OptionStrings.insert(std::make_pair("vec-sqrtd", false)); - OptionStrings.insert(std::make_pair("vec-sqrtf", false)); - OptionStrings.insert(std::make_pair("vec-sqrth", false)); - - for (unsigned i = 0; i != NumOptions; ++i) { - StringRef Val = A->getValue(i); - - bool IsDisabled = Val.starts_with(DisabledPrefixIn); - // Ignore the disablement token for string matching. - if (IsDisabled) - Val = Val.substr(1); - - size_t RefStep; - if (!getRefinementStep(Val, Diags, *A, RefStep)) - return ""; - - StringRef ValBase = Val.slice(0, RefStep); - llvm::StringMap::iterator OptionIter = OptionStrings.find(ValBase); - if (OptionIter == OptionStrings.end()) { - // Try again specifying float suffix. - OptionIter = OptionStrings.find(ValBase.str() + 'f'); - if (OptionIter == OptionStrings.end()) { - // The input name did not match any known option string. - Diags.Report(diag::err_drv_unknown_argument) << Val; - return ""; - } - // The option was specified without a half or float or double suffix. - // Make sure that the double or half entry was not already specified. - // The float entry will be checked below. - if (OptionStrings[ValBase.str() + 'd'] || - OptionStrings[ValBase.str() + 'h']) { - Diags.Report(diag::err_drv_invalid_value) - << A->getOption().getName() << Val; - return ""; - } - } - - if (OptionIter->second == true) { - // Duplicate option specified. - Diags.Report(diag::err_drv_invalid_value) - << A->getOption().getName() << Val; - return ""; - } - - // Mark the matched option as found. Do not allow duplicate specifiers. - OptionIter->second = true; - - // If the precision was not specified, also mark the double and half entry - // as found. - if (ValBase.back() != 'f' && ValBase.back() != 'd' && - ValBase.back() != 'h') { - OptionStrings[ValBase.str() + 'd'] = true; - OptionStrings[ValBase.str() + 'h'] = true; - } - - // Build the output string. - StringRef Prefix = IsDisabled ? DisabledPrefixOut : EnabledPrefixOut; - Out = Args.MakeArgString(Out + Prefix + Val); - if (i != NumOptions - 1) - Out = Args.MakeArgString(Out + ","); - } - - return Out; -} - std::string tools::complexRangeKindToStr(LangOptions::ComplexRangeKind Range) { switch (Range) { case LangOptions::ComplexRangeKind::CX_Full: diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp index cc4755cd6a9b0..438de23be0103 100644 --- a/clang/lib/Driver/ToolChains/Flang.cpp +++ b/clang/lib/Driver/ToolChains/Flang.cpp @@ -11,6 +11,7 @@ #include "clang/Basic/CodeGenOptions.h" #include "clang/Driver/CommonArgs.h" +#include "clang/Options/OptionUtils.h" #include "clang/Options/Options.h" #include "llvm/Frontend/Debug/Options.h" #include "llvm/Support/Path.h" diff --git a/clang/lib/Driver/ToolChains/HIPAMD.cpp b/clang/lib/Driver/ToolChains/HIPAMD.cpp index 231a38c2d3717..f2f64922cb404 100644 --- a/clang/lib/Driver/ToolChains/HIPAMD.cpp +++ b/clang/lib/Driver/ToolChains/HIPAMD.cpp @@ -159,10 +159,9 @@ void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA, // For SPIR-V the inputs for the job are device AMDGCN SPIR-V flavoured bitcode // and the output is either a compiled SPIR-V binary or bitcode (-emit-llvm). It -// calls llvm-link and then the llvm-spirv translator. Once the SPIR-V BE will -// be promoted from experimental, we will switch to using that. TODO: consider -// if we want to run any targeted optimisations over IR here, over generic -// SPIR-V. +// calls llvm-link and then the llvm-spirv translator or the SPIR-V BE. +// TODO: consider if we want to run any targeted optimisations over IR here, +// over generic SPIR-V. void AMDGCN::Linker::constructLinkAndEmitSpirvCommand( Compilation &C, const JobAction &JA, const InputInfoList &Inputs, const InputInfo &Output, const llvm::opt::ArgList &Args) const { @@ -173,17 +172,41 @@ void AMDGCN::Linker::constructLinkAndEmitSpirvCommand( const char *LinkedBCFilePath = HIP::getTempFile(C, LinkedBCFilePrefix, "bc"); InputInfo LinkedBCFile(&JA, LinkedBCFilePath, Output.getBaseInput()); + bool UseSPIRVBackend = + Args.hasFlag(options::OPT_use_spirv_backend, + options::OPT_no_use_spirv_backend, /*Default=*/false); + constructLlvmLinkCommand(C, JA, Inputs, LinkedBCFile, Args); - // Emit SPIR-V binary. - llvm::opt::ArgStringList TrArgs{ - "--spirv-max-version=1.6", - "--spirv-ext=+all", - "--spirv-allow-unknown-intrinsics", - "--spirv-lower-const-expr", - "--spirv-preserve-auxdata", - "--spirv-debug-info-version=nonsemantic-shader-200"}; - SPIRV::constructTranslateCommand(C, *this, JA, Output, LinkedBCFile, TrArgs); + if (UseSPIRVBackend) { + // This code handles the case in the new driver when --offload-device-only + // is unset and clang-linker-wrapper forwards the bitcode that must be + // compiled to SPIR-V. + + llvm::opt::ArgStringList CmdArgs; + const char *Triple = + C.getArgs().MakeArgString("-triple=spirv64-amd-amdhsa"); + + CmdArgs.append({"-cc1", Triple, "-emit-obj", "-disable-llvm-optzns", + LinkedBCFile.getFilename(), "-o", Output.getFilename()}); + + const Driver &Driver = getToolChain().getDriver(); + const char *Exec = Driver.getClangProgramPath(); + C.addCommand(std::make_unique( + JA, *this, ResponseFileSupport::None(), Exec, CmdArgs, LinkedBCFile, + Output, Driver.getPrependArg())); + } else { + // Emit SPIR-V binary using the translator + llvm::opt::ArgStringList TrArgs{ + "--spirv-max-version=1.6", + "--spirv-ext=+all", + "--spirv-allow-unknown-intrinsics", + "--spirv-lower-const-expr", + "--spirv-preserve-auxdata", + "--spirv-debug-info-version=nonsemantic-shader-200"}; + SPIRV::constructTranslateCommand(C, *this, JA, Output, LinkedBCFile, + TrArgs); + } } // For amdgcn the inputs of the linker job are device bitcode and output is diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp index 020e7465548fe..2c741a38fce1a 100644 --- a/clang/lib/Driver/ToolChains/Linux.cpp +++ b/clang/lib/Driver/ToolChains/Linux.cpp @@ -927,7 +927,7 @@ SanitizerMask Linux::getSupportedSanitizers() const { if (IsX86_64 || IsSystemZ || IsPowerPC64) Res |= SanitizerKind::KernelMemory; if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsMIPS || IsArmArch || - IsPowerPC64 || IsHexagon || IsLoongArch64 || IsRISCV64) + IsPowerPC64 || IsHexagon || IsLoongArch64 || IsRISCV64 || IsSystemZ) Res |= SanitizerKind::Scudo; if (IsX86_64 || IsAArch64 || IsRISCV64) { Res |= SanitizerKind::HWAddress; diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp index 9bbb33cb14502..f0e9aff2fd21a 100644 --- a/clang/lib/Format/Format.cpp +++ b/clang/lib/Format/Format.cpp @@ -405,11 +405,19 @@ struct ScalarEnumerationTraits { template <> struct MappingTraits { static void mapping(IO &IO, FormatStyle::IntegerLiteralSeparatorStyle &Base) { IO.mapOptional("Binary", Base.Binary); - IO.mapOptional("BinaryMinDigits", Base.BinaryMinDigits); + IO.mapOptional("BinaryMinDigitsInsert", Base.BinaryMinDigitsInsert); + IO.mapOptional("BinaryMaxDigitsRemove", Base.BinaryMaxDigitsRemove); IO.mapOptional("Decimal", Base.Decimal); - IO.mapOptional("DecimalMinDigits", Base.DecimalMinDigits); + IO.mapOptional("DecimalMinDigitsInsert", Base.DecimalMinDigitsInsert); + IO.mapOptional("DecimalMaxDigitsRemove", Base.DecimalMaxDigitsRemove); IO.mapOptional("Hex", Base.Hex); - IO.mapOptional("HexMinDigits", Base.HexMinDigits); + IO.mapOptional("HexMinDigitsInsert", Base.HexMinDigitsInsert); + IO.mapOptional("HexMaxDigitsRemove", Base.HexMaxDigitsRemove); + + // For backward compatibility. + IO.mapOptional("BinaryMinDigits", Base.BinaryMinDigitsInsert); + IO.mapOptional("DecimalMinDigits", Base.DecimalMinDigitsInsert); + IO.mapOptional("HexMinDigits", Base.HexMinDigitsInsert); } }; @@ -1758,10 +1766,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) { LLVMStyle.InsertBraces = false; LLVMStyle.InsertNewlineAtEOF = false; LLVMStyle.InsertTrailingCommas = FormatStyle::TCS_None; - LLVMStyle.IntegerLiteralSeparator = { - /*Binary=*/0, /*BinaryMinDigits=*/0, - /*Decimal=*/0, /*DecimalMinDigits=*/0, - /*Hex=*/0, /*HexMinDigits=*/0}; + LLVMStyle.IntegerLiteralSeparator = {}; LLVMStyle.JavaScriptQuotes = FormatStyle::JSQS_Leave; LLVMStyle.JavaScriptWrapImports = true; LLVMStyle.KeepEmptyLines = { @@ -2183,7 +2188,7 @@ FormatStyle getClangFormatStyle() { Style.InsertBraces = true; Style.InsertNewlineAtEOF = true; Style.IntegerLiteralSeparator.Decimal = 3; - Style.IntegerLiteralSeparator.DecimalMinDigits = 5; + Style.IntegerLiteralSeparator.DecimalMinDigitsInsert = 5; Style.LineEnding = FormatStyle::LE_LF; Style.RemoveBracesLLVM = true; Style.RemoveEmptyLinesInUnwrappedLines = true; diff --git a/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp b/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp index b51991bfeff4b..a283884b6c341 100644 --- a/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp +++ b/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp @@ -72,11 +72,22 @@ IntegerLiteralSeparatorFixer::process(const Environment &Env, if (SkipBinary && SkipDecimal && SkipHex) return {}; - const auto BinaryMinDigits = - std::max((int)Option.BinaryMinDigits, Binary + 1); - const auto DecimalMinDigits = - std::max((int)Option.DecimalMinDigits, Decimal + 1); - const auto HexMinDigits = std::max((int)Option.HexMinDigits, Hex + 1); + auto CalcMinAndMax = [](int DigitsPerGroup, int MinDigitsInsert, + int MaxDigitsRemove) { + MinDigitsInsert = std::max(MinDigitsInsert, DigitsPerGroup + 1); + if (MinDigitsInsert < 1) + MaxDigitsRemove = 0; + else if (MaxDigitsRemove < 1 || MaxDigitsRemove >= MinDigitsInsert) + MaxDigitsRemove = MinDigitsInsert - 1; + return std::pair(MinDigitsInsert, MaxDigitsRemove); + }; + + const auto [BinaryMinDigitsInsert, BinaryMaxDigitsRemove] = CalcMinAndMax( + Binary, Option.BinaryMinDigitsInsert, Option.BinaryMaxDigitsRemove); + const auto [DecimalMinDigitsInsert, DecimalMaxDigitsRemove] = CalcMinAndMax( + Decimal, Option.DecimalMinDigitsInsert, Option.DecimalMaxDigitsRemove); + const auto [HexMinDigitsInsert, HexMaxDigitsRemove] = + CalcMinAndMax(Hex, Option.HexMinDigitsInsert, Option.HexMaxDigitsRemove); const auto &SourceMgr = Env.getSourceManager(); AffectedRangeManager AffectedRangeMgr(SourceMgr, Env.getCharRanges()); @@ -138,17 +149,23 @@ IntegerLiteralSeparatorFixer::process(const Environment &Env, Text = Text.substr(Start, Length); } auto DigitsPerGroup = Decimal; - auto MinDigits = DecimalMinDigits; + auto MinDigitsInsert = DecimalMinDigitsInsert; + auto MaxDigitsRemove = DecimalMaxDigitsRemove; if (IsBase2) { DigitsPerGroup = Binary; - MinDigits = BinaryMinDigits; + MinDigitsInsert = BinaryMinDigitsInsert; + MaxDigitsRemove = BinaryMaxDigitsRemove; } else if (IsBase16) { DigitsPerGroup = Hex; - MinDigits = HexMinDigits; + MinDigitsInsert = HexMinDigitsInsert; + MaxDigitsRemove = HexMaxDigitsRemove; } const auto SeparatorCount = Text.count(Separator); const int DigitCount = Length - SeparatorCount; - const bool RemoveSeparator = DigitsPerGroup < 0 || DigitCount < MinDigits; + if (DigitCount > MaxDigitsRemove && DigitCount < MinDigitsInsert) + continue; + const bool RemoveSeparator = + DigitsPerGroup < 0 || DigitCount <= MaxDigitsRemove; if (RemoveSeparator && SeparatorCount == 0) continue; if (!RemoveSeparator && SeparatorCount > 0 && diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp index 50edca43ebb92..19c83d3910902 100644 --- a/clang/lib/Format/UnwrappedLineParser.cpp +++ b/clang/lib/Format/UnwrappedLineParser.cpp @@ -2363,12 +2363,9 @@ bool UnwrappedLineParser::tryToParseLambda() { Arrow = FormatTok; nextToken(); break; - case tok::kw_requires: { - auto *RequiresToken = FormatTok; - nextToken(); - parseRequiresClause(RequiresToken); + case tok::kw_requires: + parseRequiresClause(); break; - } case tok::equal: if (!InTemplateParameterList) return true; @@ -2580,12 +2577,9 @@ bool UnwrappedLineParser::parseBracedList(bool IsAngleBracket, bool IsEnum) { if (IsEnum && !Style.AllowShortEnumsOnASingleLine) addUnwrappedLine(); break; - case tok::kw_requires: { - auto *RequiresToken = FormatTok; - nextToken(); - parseRequiresExpression(RequiresToken); + case tok::kw_requires: + parseRequiresExpression(); break; - } default: nextToken(); break; @@ -2727,12 +2721,9 @@ bool UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType, else nextToken(); break; - case tok::kw_requires: { - auto RequiresToken = FormatTok; - nextToken(); - parseRequiresExpression(RequiresToken); + case tok::kw_requires: + parseRequiresExpression(); break; - } case tok::ampamp: if (AmpAmpTokenType != TT_Unknown) FormatTok->setFinalizedType(AmpAmpTokenType); @@ -3467,23 +3458,20 @@ void UnwrappedLineParser::parseAccessSpecifier() { /// \returns true if it parsed a clause. bool UnwrappedLineParser::parseRequires(bool SeenEqual) { assert(FormatTok->is(tok::kw_requires) && "'requires' expected"); - auto RequiresToken = FormatTok; // We try to guess if it is a requires clause, or a requires expression. For - // that we first consume the keyword and check the next token. - nextToken(); - - switch (FormatTok->Tok.getKind()) { + // that we first check the next token. + switch (Tokens->peekNextToken(/*SkipComment=*/true)->Tok.getKind()) { case tok::l_brace: // This can only be an expression, never a clause. - parseRequiresExpression(RequiresToken); + parseRequiresExpression(); return false; case tok::l_paren: // Clauses and expression can start with a paren, it's unclear what we have. break; default: // All other tokens can only be a clause. - parseRequiresClause(RequiresToken); + parseRequiresClause(); return true; } @@ -3494,13 +3482,13 @@ bool UnwrappedLineParser::parseRequires(bool SeenEqual) { // requires (C && ... // But first let's look behind. - auto *PreviousNonComment = RequiresToken->getPreviousNonComment(); + auto *PreviousNonComment = FormatTok->getPreviousNonComment(); if (!PreviousNonComment || PreviousNonComment->is(TT_RequiresExpressionLBrace)) { // If there is no token, or an expression left brace, we are a requires // clause within a requires expression. - parseRequiresClause(RequiresToken); + parseRequiresClause(); return true; } @@ -3512,7 +3500,7 @@ bool UnwrappedLineParser::parseRequires(bool SeenEqual) { case tok::star: case tok::amp: // This is a requires clause. - parseRequiresClause(RequiresToken); + parseRequiresClause(); return true; case tok::ampamp: { // This can be either: @@ -3523,7 +3511,7 @@ bool UnwrappedLineParser::parseRequires(bool SeenEqual) { // void member(...) const && requires (C ... auto PrevPrev = PreviousNonComment->getPreviousNonComment(); if ((PrevPrev && PrevPrev->is(tok::kw_const)) || !SeenEqual) { - parseRequiresClause(RequiresToken); + parseRequiresClause(); return true; } break; @@ -3531,11 +3519,11 @@ bool UnwrappedLineParser::parseRequires(bool SeenEqual) { default: if (PreviousNonComment->isTypeOrIdentifier(LangOpts)) { // This is a requires clause. - parseRequiresClause(RequiresToken); + parseRequiresClause(); return true; } // It's an expression. - parseRequiresExpression(RequiresToken); + parseRequiresExpression(); return false; } @@ -3564,7 +3552,7 @@ bool UnwrappedLineParser::parseRequires(bool SeenEqual) { case tok::comma: if (OpenAngles == 0) { FormatTok = Tokens->setPosition(StoredPosition); - parseRequiresExpression(RequiresToken); + parseRequiresExpression(); return false; } break; @@ -3579,7 +3567,7 @@ bool UnwrappedLineParser::parseRequires(bool SeenEqual) { case tok::identifier: if (FoundType && !LastWasColonColon && OpenAngles == 0) { FormatTok = Tokens->setPosition(StoredPosition); - parseRequiresExpression(RequiresToken); + parseRequiresExpression(); return false; } FoundType = true; @@ -3594,7 +3582,7 @@ bool UnwrappedLineParser::parseRequires(bool SeenEqual) { default: if (NextToken->isTypeName(LangOpts)) { FormatTok = Tokens->setPosition(StoredPosition); - parseRequiresExpression(RequiresToken); + parseRequiresExpression(); return false; } break; @@ -3602,31 +3590,29 @@ bool UnwrappedLineParser::parseRequires(bool SeenEqual) { } // This seems to be a complicated expression, just assume it's a clause. FormatTok = Tokens->setPosition(StoredPosition); - parseRequiresClause(RequiresToken); + parseRequiresClause(); return true; } /// Parses a requires clause. -/// \param RequiresToken The requires keyword token, which starts this clause. -/// \pre We need to be on the next token after the requires keyword. /// \sa parseRequiresExpression /// /// Returns if it either has finished parsing the clause, or it detects, that /// the clause is incorrect. -void UnwrappedLineParser::parseRequiresClause(FormatToken *RequiresToken) { - assert(FormatTok->getPreviousNonComment() == RequiresToken); - assert(RequiresToken->is(tok::kw_requires) && "'requires' expected"); +void UnwrappedLineParser::parseRequiresClause() { + assert(FormatTok->is(tok::kw_requires) && "'requires' expected"); // If there is no previous token, we are within a requires expression, // otherwise we will always have the template or function declaration in front // of it. bool InRequiresExpression = - !RequiresToken->Previous || - RequiresToken->Previous->is(TT_RequiresExpressionLBrace); + !FormatTok->Previous || + FormatTok->Previous->is(TT_RequiresExpressionLBrace); - RequiresToken->setFinalizedType(InRequiresExpression - ? TT_RequiresClauseInARequiresExpression - : TT_RequiresClause); + FormatTok->setFinalizedType(InRequiresExpression + ? TT_RequiresClauseInARequiresExpression + : TT_RequiresClause); + nextToken(); // NOTE: parseConstraintExpression is only ever called from this function. // It could be inlined into here. @@ -3637,17 +3623,15 @@ void UnwrappedLineParser::parseRequiresClause(FormatToken *RequiresToken) { } /// Parses a requires expression. -/// \param RequiresToken The requires keyword token, which starts this clause. -/// \pre We need to be on the next token after the requires keyword. /// \sa parseRequiresClause /// /// Returns if it either has finished parsing the expression, or it detects, /// that the expression is incorrect. -void UnwrappedLineParser::parseRequiresExpression(FormatToken *RequiresToken) { - assert(FormatTok->getPreviousNonComment() == RequiresToken); - assert(RequiresToken->is(tok::kw_requires) && "'requires' expected"); +void UnwrappedLineParser::parseRequiresExpression() { + assert(FormatTok->is(tok::kw_requires) && "'requires' expected"); - RequiresToken->setFinalizedType(TT_RequiresExpression); + FormatTok->setFinalizedType(TT_RequiresExpression); + nextToken(); if (FormatTok->is(tok::l_paren)) { FormatTok->setFinalizedType(TT_RequiresExpressionLParen); @@ -3687,12 +3671,9 @@ void UnwrappedLineParser::parseConstraintExpression() { bool LambdaThisTimeAllowed = std::exchange(LambdaNextTimeAllowed, false); switch (FormatTok->Tok.getKind()) { - case tok::kw_requires: { - auto RequiresToken = FormatTok; - nextToken(); - parseRequiresExpression(RequiresToken); + case tok::kw_requires: + parseRequiresExpression(); break; - } case tok::l_paren: if (!TopLevelParensAllowed) diff --git a/clang/lib/Format/UnwrappedLineParser.h b/clang/lib/Format/UnwrappedLineParser.h index 0161a5063ad40..86022d9b316c6 100644 --- a/clang/lib/Format/UnwrappedLineParser.h +++ b/clang/lib/Format/UnwrappedLineParser.h @@ -169,8 +169,8 @@ class UnwrappedLineParser { bool parseEnum(); bool parseStructLike(); bool parseRequires(bool SeenEqual); - void parseRequiresClause(FormatToken *RequiresToken); - void parseRequiresExpression(FormatToken *RequiresToken); + void parseRequiresClause(); + void parseRequiresExpression(); void parseConstraintExpression(); void parseCppExportBlock(); void parseNamespaceOrExportBlock(unsigned AddLevels); diff --git a/clang/lib/Frontend/ASTUnit.cpp b/clang/lib/Frontend/ASTUnit.cpp index 1de779ccbf141..e72317da64596 100644 --- a/clang/lib/Frontend/ASTUnit.cpp +++ b/clang/lib/Frontend/ASTUnit.cpp @@ -44,6 +44,7 @@ #include "clang/Frontend/FrontendOptions.h" #include "clang/Frontend/MultiplexConsumer.h" #include "clang/Frontend/PrecompiledPreamble.h" +#include "clang/Frontend/StandaloneDiagnostic.h" #include "clang/Frontend/Utils.h" #include "clang/Lex/HeaderSearch.h" #include "clang/Lex/HeaderSearchOptions.h" @@ -210,15 +211,6 @@ getBufferForFileHandlingRemapping(const CompilerInvocation &Invocation, return llvm::MemoryBuffer::getMemBufferCopy(Buffer->getBuffer(), FilePath); } -struct ASTUnit::ASTWriterData { - SmallString<128> Buffer; - llvm::BitstreamWriter Stream; - ASTWriter Writer; - - ASTWriterData(ModuleCache &ModCache, const CodeGenOptions &CGOpts) - : Stream(Buffer), Writer(Stream, Buffer, ModCache, CGOpts, {}) {} -}; - void ASTUnit::clearFileLevelDecls() { FileDecls.clear(); } @@ -581,73 +573,24 @@ class ASTInfoCollector : public ASTReaderListener { Counter = NewCounter; } }; +} // anonymous namespace -/// Diagnostic consumer that saves each diagnostic it is given. -class FilterAndStoreDiagnosticConsumer : public DiagnosticConsumer { - SmallVectorImpl *StoredDiags; - SmallVectorImpl *StandaloneDiags; - bool CaptureNonErrorsFromIncludes = true; - const LangOptions *LangOpts = nullptr; - SourceManager *SourceMgr = nullptr; - -public: - FilterAndStoreDiagnosticConsumer( - SmallVectorImpl *StoredDiags, - SmallVectorImpl *StandaloneDiags, - bool CaptureNonErrorsFromIncludes) - : StoredDiags(StoredDiags), StandaloneDiags(StandaloneDiags), - CaptureNonErrorsFromIncludes(CaptureNonErrorsFromIncludes) { - assert((StoredDiags || StandaloneDiags) && - "No output collections were passed to StoredDiagnosticConsumer."); - } - - void BeginSourceFile(const LangOptions &LangOpts, - const Preprocessor *PP = nullptr) override { - this->LangOpts = &LangOpts; - if (PP) - SourceMgr = &PP->getSourceManager(); - } - - void HandleDiagnostic(DiagnosticsEngine::Level Level, - const Diagnostic &Info) override; -}; - -/// RAII object that optionally captures and filters diagnostics, if -/// there is no diagnostic client to capture them already. -class CaptureDroppedDiagnostics { - DiagnosticsEngine &Diags; - FilterAndStoreDiagnosticConsumer Client; - DiagnosticConsumer *PreviousClient = nullptr; - std::unique_ptr OwningPreviousClient; - -public: - CaptureDroppedDiagnostics( - CaptureDiagsKind CaptureDiagnostics, DiagnosticsEngine &Diags, - SmallVectorImpl *StoredDiags, - SmallVectorImpl *StandaloneDiags) - : Diags(Diags), - Client(StoredDiags, StandaloneDiags, - CaptureDiagnostics != - CaptureDiagsKind::AllWithoutNonErrorsFromIncludes) { - if (CaptureDiagnostics != CaptureDiagsKind::None || - Diags.getClient() == nullptr) { - OwningPreviousClient = Diags.takeClient(); - PreviousClient = Diags.getClient(); - Diags.setClient(&Client, false); - } - } - - ~CaptureDroppedDiagnostics() { - if (Diags.getClient() == &Client) - Diags.setClient(PreviousClient, !!OwningPreviousClient.release()); - } -}; - -} // namespace +FilterAndStoreDiagnosticConsumer::FilterAndStoreDiagnosticConsumer( + SmallVectorImpl *StoredDiags, + SmallVectorImpl *StandaloneDiags, + bool CaptureNonErrorsFromIncludes) + : StoredDiags(StoredDiags), StandaloneDiags(StandaloneDiags), + CaptureNonErrorsFromIncludes(CaptureNonErrorsFromIncludes) { + assert((StoredDiags || StandaloneDiags) && + "No output collections were passed to StoredDiagnosticConsumer."); +} -static ASTUnit::StandaloneDiagnostic -makeStandaloneDiagnostic(const LangOptions &LangOpts, - const StoredDiagnostic &InDiag); +void FilterAndStoreDiagnosticConsumer::BeginSourceFile( + const LangOptions &LangOpts, const Preprocessor *PP) { + this->LangOpts = &LangOpts; + if (PP) + SourceMgr = &PP->getSourceManager(); +} static bool isInMainFile(const clang::Diagnostic &D) { if (!D.hasSourceManager() || !D.getLocation().isValid()) @@ -683,12 +626,32 @@ void FilterAndStoreDiagnosticConsumer::HandleDiagnostic( StoredDiag.emplace(Level, Info); ResultDiag = &*StoredDiag; } - StandaloneDiags->push_back( - makeStandaloneDiagnostic(*LangOpts, *ResultDiag)); + StandaloneDiags->emplace_back(*LangOpts, *ResultDiag); } } } +CaptureDroppedDiagnostics::CaptureDroppedDiagnostics( + CaptureDiagsKind CaptureDiagnostics, DiagnosticsEngine &Diags, + SmallVectorImpl *StoredDiags, + SmallVectorImpl *StandaloneDiags) + : Diags(Diags), + Client(StoredDiags, StandaloneDiags, + CaptureDiagnostics != + CaptureDiagsKind::AllWithoutNonErrorsFromIncludes) { + if (CaptureDiagnostics != CaptureDiagsKind::None || + Diags.getClient() == nullptr) { + OwningPreviousClient = Diags.takeClient(); + PreviousClient = Diags.getClient(); + Diags.setClient(&Client, false); + } +} + +CaptureDroppedDiagnostics::~CaptureDroppedDiagnostics() { + if (Diags.getClient() == &Client) + Diags.setClient(PreviousClient, !!OwningPreviousClient.release()); +} + IntrusiveRefCntPtr ASTUnit::getASTReader() const { return Reader; } @@ -1110,7 +1073,7 @@ class ASTUnitPreambleCallbacks : public PreambleCallbacks { unsigned Hash = 0; std::vector TopLevelDecls; std::vector TopLevelDeclIDs; - llvm::SmallVector PreambleDiags; + llvm::SmallVector PreambleDiags; }; } // namespace @@ -1259,10 +1222,17 @@ bool ASTUnit::Parse(std::shared_ptr PCHContainerOps, if (!Act->BeginSourceFile(*Clang, Clang->getFrontendOpts().Inputs[0])) return true; - if (SavedMainFileBuffer) - TranslateStoredDiagnostics(getFileManager(), getSourceManager(), - PreambleDiagnostics, StoredDiagnostics); - else + if (SavedMainFileBuffer) { + StoredDiagnostics.clear(); + StoredDiagnostics.reserve(PreambleDiagnostics.size()); + llvm::transform(std::move(PreambleDiagnostics), + std::back_inserter(StoredDiagnostics), + [&](auto &&StandaloneDiag) { + return translateStandaloneDiag( + getFileManager(), getSourceManager(), + std::move(StandaloneDiag), PreambleSrcLocCache); + }); + } else PreambleSrcLocCache.clear(); if (llvm::Error Err = Act->Execute()) { @@ -1281,51 +1251,6 @@ bool ASTUnit::Parse(std::shared_ptr PCHContainerOps, return false; } -static std::pair -makeStandaloneRange(CharSourceRange Range, const SourceManager &SM, - const LangOptions &LangOpts) { - CharSourceRange FileRange = Lexer::makeFileCharRange(Range, SM, LangOpts); - unsigned Offset = SM.getFileOffset(FileRange.getBegin()); - unsigned EndOffset = SM.getFileOffset(FileRange.getEnd()); - return std::make_pair(Offset, EndOffset); -} - -static ASTUnit::StandaloneFixIt makeStandaloneFixIt(const SourceManager &SM, - const LangOptions &LangOpts, - const FixItHint &InFix) { - ASTUnit::StandaloneFixIt OutFix; - OutFix.RemoveRange = makeStandaloneRange(InFix.RemoveRange, SM, LangOpts); - OutFix.InsertFromRange = - makeStandaloneRange(InFix.InsertFromRange, SM, LangOpts); - OutFix.CodeToInsert = InFix.CodeToInsert; - OutFix.BeforePreviousInsertions = InFix.BeforePreviousInsertions; - return OutFix; -} - -static ASTUnit::StandaloneDiagnostic -makeStandaloneDiagnostic(const LangOptions &LangOpts, - const StoredDiagnostic &InDiag) { - ASTUnit::StandaloneDiagnostic OutDiag; - OutDiag.ID = InDiag.getID(); - OutDiag.Level = InDiag.getLevel(); - OutDiag.Message = std::string(InDiag.getMessage()); - OutDiag.LocOffset = 0; - if (InDiag.getLocation().isInvalid()) - return OutDiag; - const SourceManager &SM = InDiag.getLocation().getManager(); - SourceLocation FileLoc = SM.getFileLoc(InDiag.getLocation()); - OutDiag.Filename = std::string(SM.getFilename(FileLoc)); - if (OutDiag.Filename.empty()) - return OutDiag; - OutDiag.LocOffset = SM.getFileOffset(FileLoc); - for (const auto &Range : InDiag.getRanges()) - OutDiag.Ranges.push_back(makeStandaloneRange(Range, SM, LangOpts)); - for (const auto &FixIt : InDiag.getFixIts()) - OutDiag.FixIts.push_back(makeStandaloneFixIt(SM, LangOpts, FixIt)); - - return OutDiag; -} - /// Attempt to build or re-use a precompiled preamble when (re-)parsing /// the source file. /// @@ -1780,114 +1705,6 @@ std::unique_ptr ASTUnit::LoadFromCompilerInvocation( return AST; } -std::unique_ptr ASTUnit::LoadFromCommandLine( - const char **ArgBegin, const char **ArgEnd, - std::shared_ptr PCHContainerOps, - std::shared_ptr DiagOpts, - IntrusiveRefCntPtr Diags, StringRef ResourceFilesPath, - bool StorePreamblesInMemory, StringRef PreambleStoragePath, - bool OnlyLocalDecls, CaptureDiagsKind CaptureDiagnostics, - ArrayRef RemappedFiles, bool RemappedFilesKeepOriginalName, - unsigned PrecompilePreambleAfterNParses, TranslationUnitKind TUKind, - bool CacheCodeCompletionResults, bool IncludeBriefCommentsInCodeCompletion, - bool AllowPCHWithCompilerErrors, SkipFunctionBodiesScope SkipFunctionBodies, - bool SingleFileParse, bool UserFilesAreVolatile, bool ForSerialization, - bool RetainExcludedConditionalBlocks, std::optional ModuleFormat, - std::unique_ptr *ErrAST, - IntrusiveRefCntPtr VFS) { - assert(Diags.get() && "no DiagnosticsEngine was provided"); - - // If no VFS was provided, create one that tracks the physical file system. - // If '-working-directory' was passed as an argument, 'createInvocation' will - // set this as the current working directory of the VFS. - if (!VFS) - VFS = llvm::vfs::createPhysicalFileSystem(); - - SmallVector StoredDiagnostics; - - std::shared_ptr CI; - - { - CaptureDroppedDiagnostics Capture(CaptureDiagnostics, *Diags, - &StoredDiagnostics, nullptr); - - CreateInvocationOptions CIOpts; - CIOpts.VFS = VFS; - CIOpts.Diags = Diags; - CIOpts.ProbePrecompiled = true; // FIXME: historical default. Needed? - CI = createInvocation(llvm::ArrayRef(ArgBegin, ArgEnd), std::move(CIOpts)); - if (!CI) - return nullptr; - } - - // Override any files that need remapping - for (const auto &RemappedFile : RemappedFiles) { - CI->getPreprocessorOpts().addRemappedFile(RemappedFile.first, - RemappedFile.second); - } - PreprocessorOptions &PPOpts = CI->getPreprocessorOpts(); - PPOpts.RemappedFilesKeepOriginalName = RemappedFilesKeepOriginalName; - PPOpts.AllowPCHWithCompilerErrors = AllowPCHWithCompilerErrors; - PPOpts.SingleFileParseMode = SingleFileParse; - PPOpts.RetainExcludedConditionalBlocks = RetainExcludedConditionalBlocks; - - // Override the resources path. - CI->getHeaderSearchOpts().ResourceDir = std::string(ResourceFilesPath); - - CI->getFrontendOpts().SkipFunctionBodies = - SkipFunctionBodies == SkipFunctionBodiesScope::PreambleAndMainFile; - - if (ModuleFormat) - CI->getHeaderSearchOpts().ModuleFormat = std::string(*ModuleFormat); - - // Create the AST unit. - std::unique_ptr AST; - AST.reset(new ASTUnit(false)); - AST->NumStoredDiagnosticsFromDriver = StoredDiagnostics.size(); - AST->StoredDiagnostics.swap(StoredDiagnostics); - ConfigureDiags(Diags, *AST, CaptureDiagnostics); - AST->DiagOpts = DiagOpts; - AST->Diagnostics = Diags; - AST->FileSystemOpts = CI->getFileSystemOpts(); - AST->CodeGenOpts = std::make_unique(CI->getCodeGenOpts()); - VFS = createVFSFromCompilerInvocation(*CI, *Diags, VFS); - AST->FileMgr = - llvm::makeIntrusiveRefCnt(AST->FileSystemOpts, VFS); - AST->StorePreamblesInMemory = StorePreamblesInMemory; - AST->PreambleStoragePath = PreambleStoragePath; - AST->ModCache = createCrossProcessModuleCache(); - AST->OnlyLocalDecls = OnlyLocalDecls; - AST->CaptureDiagnostics = CaptureDiagnostics; - AST->TUKind = TUKind; - AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults; - AST->IncludeBriefCommentsInCodeCompletion = - IncludeBriefCommentsInCodeCompletion; - AST->UserFilesAreVolatile = UserFilesAreVolatile; - AST->Invocation = CI; - AST->SkipFunctionBodies = SkipFunctionBodies; - if (ForSerialization) - AST->WriterData.reset(new ASTWriterData(*AST->ModCache, *AST->CodeGenOpts)); - // Zero out now to ease cleanup during crash recovery. - CI = nullptr; - Diags = nullptr; - - // Recover resources if we crash before exiting this method. - llvm::CrashRecoveryContextCleanupRegistrar ASTUnitCleanup(AST.get()); - - if (AST->LoadFromCompilerInvocation(std::move(PCHContainerOps), - PrecompilePreambleAfterNParses, VFS)) { - // Some error occurred, if caller wants to examine diagnostics, pass it the - // ASTUnit. - if (ErrAST) { - AST->StoredDiagnostics.swap(AST->FailedParseDiagnostics); - ErrAST->swap(AST); - } - return nullptr; - } - - return AST; -} - bool ASTUnit::Reparse(std::shared_ptr PCHContainerOps, ArrayRef RemappedFiles, IntrusiveRefCntPtr VFS) { @@ -2406,64 +2223,6 @@ bool ASTUnit::serialize(raw_ostream &OS) { return serializeUnit(Writer, Buffer, getSema(), OS); } -void ASTUnit::TranslateStoredDiagnostics( - FileManager &FileMgr, SourceManager &SrcMgr, - const SmallVectorImpl &Diags, - SmallVectorImpl &Out) { - // Map the standalone diagnostic into the new source manager. We also need to - // remap all the locations to the new view. This includes the diag location, - // any associated source ranges, and the source ranges of associated fix-its. - // FIXME: There should be a cleaner way to do this. - SmallVector Result; - Result.reserve(Diags.size()); - - for (const auto &SD : Diags) { - // Rebuild the StoredDiagnostic. - if (SD.Filename.empty()) - continue; - auto FE = FileMgr.getOptionalFileRef(SD.Filename); - if (!FE) - continue; - SourceLocation FileLoc; - auto ItFileID = PreambleSrcLocCache.find(SD.Filename); - if (ItFileID == PreambleSrcLocCache.end()) { - FileID FID = SrcMgr.translateFile(*FE); - FileLoc = SrcMgr.getLocForStartOfFile(FID); - PreambleSrcLocCache[SD.Filename] = FileLoc; - } else { - FileLoc = ItFileID->getValue(); - } - - if (FileLoc.isInvalid()) - continue; - SourceLocation L = FileLoc.getLocWithOffset(SD.LocOffset); - FullSourceLoc Loc(L, SrcMgr); - - SmallVector Ranges; - Ranges.reserve(SD.Ranges.size()); - for (const auto &Range : SD.Ranges) { - SourceLocation BL = FileLoc.getLocWithOffset(Range.first); - SourceLocation EL = FileLoc.getLocWithOffset(Range.second); - Ranges.push_back(CharSourceRange::getCharRange(BL, EL)); - } - - SmallVector FixIts; - FixIts.reserve(SD.FixIts.size()); - for (const auto &FixIt : SD.FixIts) { - FixIts.push_back(FixItHint()); - FixItHint &FH = FixIts.back(); - FH.CodeToInsert = FixIt.CodeToInsert; - SourceLocation BL = FileLoc.getLocWithOffset(FixIt.RemoveRange.first); - SourceLocation EL = FileLoc.getLocWithOffset(FixIt.RemoveRange.second); - FH.RemoveRange = CharSourceRange::getCharRange(BL, EL); - } - - Result.push_back( - StoredDiagnostic(SD.Level, SD.ID, SD.Message, Loc, Ranges, FixIts)); - } - Result.swap(Out); -} - void ASTUnit::addFileLevelDecl(Decl *D) { assert(D); diff --git a/clang/lib/Frontend/CMakeLists.txt b/clang/lib/Frontend/CMakeLists.txt index dac9e0d26f393..634f239933605 100644 --- a/clang/lib/Frontend/CMakeLists.txt +++ b/clang/lib/Frontend/CMakeLists.txt @@ -17,7 +17,6 @@ add_clang_library(clangFrontend ChainedIncludesSource.cpp CompilerInstance.cpp CompilerInvocation.cpp - CreateInvocationFromCommandLine.cpp DependencyFile.cpp DependencyGraph.cpp DiagnosticRenderer.cpp @@ -36,6 +35,7 @@ add_clang_library(clangFrontend SARIFDiagnosticPrinter.cpp SerializedDiagnosticPrinter.cpp SerializedDiagnosticReader.cpp + StandaloneDiagnostic.cpp TestModuleFileExtension.cpp TextDiagnostic.cpp TextDiagnosticBuffer.cpp @@ -51,7 +51,6 @@ add_clang_library(clangFrontend clangAPINotes clangAST clangBasic - clangDriver clangOptions clangEdit clangLex diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index c7c29a91721c0..d2a5ed6262de1 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -27,7 +27,6 @@ #include "clang/Basic/Version.h" #include "clang/Basic/XRayInstr.h" #include "clang/Config/config.h" -#include "clang/Driver/Driver.h" #include "clang/Frontend/CommandLineSourceLoc.h" #include "clang/Frontend/DependencyOutputOptions.h" #include "clang/Frontend/FrontendOptions.h" @@ -3273,13 +3272,6 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, return Diags.getNumErrors() == NumErrorsBefore; } -std::string CompilerInvocation::GetResourcesPath(const char *Argv0, - void *MainAddr) { - std::string ClangExecutable = - llvm::sys::fs::getMainExecutable(Argv0, MainAddr); - return driver::Driver::GetResourcesPath(ClangExecutable); -} - static void GenerateHeaderSearchArgs(const HeaderSearchOptions &Opts, ArgumentConsumer Consumer) { const HeaderSearchOptions *HeaderSearchOpts = &Opts; @@ -3956,21 +3948,7 @@ void CompilerInvocationBase::GenerateLangArgs(const LangOptions &Opts, std::to_string(*Opts.AllocTokenMax)); if (Opts.AllocTokenMode) { - StringRef S; - switch (*Opts.AllocTokenMode) { - case llvm::AllocTokenMode::Increment: - S = "increment"; - break; - case llvm::AllocTokenMode::Random: - S = "random"; - break; - case llvm::AllocTokenMode::TypeHash: - S = "typehash"; - break; - case llvm::AllocTokenMode::TypeHashPointerSplit: - S = "typehashpointersplit"; - break; - } + StringRef S = llvm::getAllocTokenModeAsString(*Opts.AllocTokenMode); GenerateArg(Consumer, OPT_falloc_token_mode_EQ, S); } } diff --git a/clang/lib/Frontend/StandaloneDiagnostic.cpp b/clang/lib/Frontend/StandaloneDiagnostic.cpp new file mode 100644 index 0000000000000..4f19c91b7d266 --- /dev/null +++ b/clang/lib/Frontend/StandaloneDiagnostic.cpp @@ -0,0 +1,117 @@ +//===--- StandaloneDiagnostic.h - Serializable Diagnostic ------------- ---===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "clang/Frontend/StandaloneDiagnostic.h" +#include "clang/Lex/Lexer.h" + +namespace clang { + +StandaloneDiagnostic::SourceOffsetRange::SourceOffsetRange( + CharSourceRange Range, const SourceManager &SrcMgr, + const LangOptions &LangOpts) { + const auto FileRange = Lexer::makeFileCharRange(Range, SrcMgr, LangOpts); + Begin = SrcMgr.getFileOffset(FileRange.getBegin()); + End = SrcMgr.getFileOffset(FileRange.getEnd()); +} + +StandaloneDiagnostic::StandaloneFixIt::StandaloneFixIt( + const SourceManager &SrcMgr, const LangOptions &LangOpts, + const FixItHint &FixIt) + : RemoveRange(FixIt.RemoveRange, SrcMgr, LangOpts), + InsertFromRange(FixIt.InsertFromRange, SrcMgr, LangOpts), + CodeToInsert(FixIt.CodeToInsert), + BeforePreviousInsertions(FixIt.BeforePreviousInsertions) {} + +StandaloneDiagnostic::StandaloneDiagnostic(const LangOptions &LangOpts, + const StoredDiagnostic &InDiag) + : Level(InDiag.getLevel()), ID(InDiag.getID()), + Message(InDiag.getMessage()) { + const FullSourceLoc &FullLoc = InDiag.getLocation(); + // This is not an invalid diagnostic; invalid SourceLocations are used to + // represent diagnostics without a specific SourceLocation. + if (FullLoc.isInvalid()) + return; + + const auto &SrcMgr = FullLoc.getManager(); + FileKind = SrcMgr.getFileCharacteristic(static_cast(FullLoc)); + const auto FileLoc = SrcMgr.getFileLoc(static_cast(FullLoc)); + FileOffset = SrcMgr.getFileOffset(FileLoc); + Filename = SrcMgr.getFilename(FileLoc); + assert(!Filename.empty() && "diagnostic with location has no source file?"); + + Ranges.reserve(InDiag.getRanges().size()); + for (const auto &Range : InDiag.getRanges()) + Ranges.emplace_back(Range, SrcMgr, LangOpts); + + FixIts.reserve(InDiag.getFixIts().size()); + for (const auto &FixIt : InDiag.getFixIts()) + FixIts.emplace_back(SrcMgr, LangOpts, FixIt); +} + +StoredDiagnostic +translateStandaloneDiag(FileManager &FileMgr, SourceManager &SrcMgr, + const StandaloneDiagnostic &StandaloneDiag, + llvm::StringMap &SrcLocCache) { + const auto FileRef = FileMgr.getOptionalFileRef(StandaloneDiag.Filename); + if (!FileRef) + return StoredDiagnostic(StandaloneDiag.Level, StandaloneDiag.ID, + StandaloneDiag.Message); + + // Try to get FileLoc from cache first + SourceLocation FileLoc; + auto It = SrcLocCache.find(StandaloneDiag.Filename); + if (It != SrcLocCache.end()) { + FileLoc = It->getValue(); + } + + // Cache miss - compute and cache the location + if (FileLoc.isInvalid()) { + const auto FileID = + SrcMgr.getOrCreateFileID(*FileRef, StandaloneDiag.FileKind); + FileLoc = SrcMgr.getLocForStartOfFile(FileID); + + if (FileLoc.isInvalid()) + return StoredDiagnostic(StandaloneDiag.Level, StandaloneDiag.ID, + StandaloneDiag.Message); + + SrcLocCache[StandaloneDiag.Filename] = FileLoc; + } + + const auto DiagLoc = FileLoc.getLocWithOffset(StandaloneDiag.FileOffset); + const FullSourceLoc Loc(DiagLoc, SrcMgr); + + auto ConvertOffsetRange = + [&](const StandaloneDiagnostic::SourceOffsetRange &Range) { + return CharSourceRange( + SourceRange(FileLoc.getLocWithOffset(Range.Begin), + FileLoc.getLocWithOffset(Range.End)), + /*IsTokenRange*/ false); + }; + + SmallVector TranslatedRanges; + TranslatedRanges.reserve(StandaloneDiag.Ranges.size()); + transform(StandaloneDiag.Ranges, std::back_inserter(TranslatedRanges), + ConvertOffsetRange); + + SmallVector TranslatedFixIts; + TranslatedFixIts.reserve(StandaloneDiag.FixIts.size()); + for (const auto &FixIt : StandaloneDiag.FixIts) { + FixItHint TranslatedFixIt; + TranslatedFixIt.CodeToInsert = FixIt.CodeToInsert; + TranslatedFixIt.RemoveRange = ConvertOffsetRange(FixIt.RemoveRange); + TranslatedFixIt.InsertFromRange = ConvertOffsetRange(FixIt.InsertFromRange); + TranslatedFixIt.BeforePreviousInsertions = FixIt.BeforePreviousInsertions; + TranslatedFixIts.push_back(std::move(TranslatedFixIt)); + } + + return StoredDiagnostic(StandaloneDiag.Level, StandaloneDiag.ID, + StandaloneDiag.Message, Loc, TranslatedRanges, + TranslatedFixIts); +} + +} // namespace clang diff --git a/clang/lib/Headers/avx10_2_512bf16intrin.h b/clang/lib/Headers/avx10_2_512bf16intrin.h index 3201307af4731..3e9f27443ecce 100644 --- a/clang/lib/Headers/avx10_2_512bf16intrin.h +++ b/clang/lib/Headers/avx10_2_512bf16intrin.h @@ -429,7 +429,7 @@ _mm512_maskz_rsqrt_pbh(__mmask32 __U, __m512bh __A) { (__v32bf)_mm512_setzero_pbh(), (__mmask32)(__U))) static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_sqrt_pbh(__m512bh __A) { - return (__m512bh)__builtin_ia32_vsqrtbf16512((__v32bf)__A); + return __builtin_elementwise_sqrt(__A); } static __inline__ __m512bh __DEFAULT_FN_ATTRS512 diff --git a/clang/lib/Headers/avx10_2bf16intrin.h b/clang/lib/Headers/avx10_2bf16intrin.h index 3df6930f94be3..179ec534025c2 100644 --- a/clang/lib/Headers/avx10_2bf16intrin.h +++ b/clang/lib/Headers/avx10_2bf16intrin.h @@ -826,7 +826,7 @@ _mm_maskz_rsqrt_pbh(__mmask8 __U, __m128bh __A) { (__v8bf)_mm_setzero_pbh(), (__mmask8)(__U))) static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_sqrt_pbh(__m256bh __A) { - return (__m256bh)__builtin_ia32_vsqrtbf16256((__v16bf)__A); + return __builtin_elementwise_sqrt(__A); } static __inline__ __m256bh __DEFAULT_FN_ATTRS256 @@ -843,7 +843,7 @@ _mm256_maskz_sqrt_pbh(__mmask16 __U, __m256bh __A) { } static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_sqrt_pbh(__m128bh __A) { - return (__m128bh)__builtin_ia32_vsqrtbf16((__v8bf)__A); + return __builtin_elementwise_sqrt(__A); } static __inline__ __m128bh __DEFAULT_FN_ATTRS128 diff --git a/clang/lib/Headers/avx2intrin.h b/clang/lib/Headers/avx2intrin.h index 3e3c13d8bd662..d3ceb2327ac62 100644 --- a/clang/lib/Headers/avx2intrin.h +++ b/clang/lib/Headers/avx2intrin.h @@ -2095,9 +2095,8 @@ _mm256_slli_epi16(__m256i __a, int __count) { /// A 128-bit vector of [2 x i64] whose lower element gives the unsigned /// shift count (in bits). The upper element is ignored. /// \returns A 256-bit vector of [16 x i16] containing the result. -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sll_epi16(__m256i __a, __m128i __count) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_sll_epi16(__m256i __a, __m128i __count) { return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count); } @@ -2134,9 +2133,8 @@ _mm256_slli_epi32(__m256i __a, int __count) { /// A 128-bit vector of [2 x i64] whose lower element gives the unsigned /// shift count (in bits). The upper element is ignored. /// \returns A 256-bit vector of [8 x i32] containing the result. -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sll_epi32(__m256i __a, __m128i __count) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_sll_epi32(__m256i __a, __m128i __count) { return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count); } @@ -2173,9 +2171,8 @@ _mm256_slli_epi64(__m256i __a, int __count) { /// A 128-bit vector of [2 x i64] whose lower element gives the unsigned /// shift count (in bits). The upper element is ignored. /// \returns A 256-bit vector of [4 x i64] containing the result. -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sll_epi64(__m256i __a, __m128i __count) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_sll_epi64(__m256i __a, __m128i __count) { return __builtin_ia32_psllq256((__v4di)__a, __count); } @@ -2214,9 +2211,8 @@ _mm256_srai_epi16(__m256i __a, int __count) { /// A 128-bit vector of [2 x i64] whose lower element gives the unsigned /// shift count (in bits). The upper element is ignored. /// \returns A 256-bit vector of [16 x i16] containing the result. -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sra_epi16(__m256i __a, __m128i __count) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_sra_epi16(__m256i __a, __m128i __count) { return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count); } @@ -2255,9 +2251,8 @@ _mm256_srai_epi32(__m256i __a, int __count) { /// A 128-bit vector of [2 x i64] whose lower element gives the unsigned /// shift count (in bits). The upper element is ignored. /// \returns A 256-bit vector of [8 x i32] containing the result. -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sra_epi32(__m256i __a, __m128i __count) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_sra_epi32(__m256i __a, __m128i __count) { return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count); } @@ -2336,9 +2331,8 @@ _mm256_srli_epi16(__m256i __a, int __count) { /// A 128-bit vector of [2 x i64] whose lower element gives the unsigned /// shift count (in bits). The upper element is ignored. /// \returns A 256-bit vector of [16 x i16] containing the result. -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_srl_epi16(__m256i __a, __m128i __count) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_srl_epi16(__m256i __a, __m128i __count) { return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count); } @@ -2375,9 +2369,8 @@ _mm256_srli_epi32(__m256i __a, int __count) { /// A 128-bit vector of [2 x i64] whose lower element gives the unsigned /// shift count (in bits). The upper element is ignored. /// \returns A 256-bit vector of [8 x i32] containing the result. -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_srl_epi32(__m256i __a, __m128i __count) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_srl_epi32(__m256i __a, __m128i __count) { return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count); } @@ -2414,9 +2407,8 @@ _mm256_srli_epi64(__m256i __a, int __count) { /// A 128-bit vector of [2 x i64] whose lower element gives the unsigned /// shift count (in bits). The upper element is ignored. /// \returns A 256-bit vector of [4 x i64] containing the result. -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_srl_epi64(__m256i __a, __m128i __count) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_srl_epi64(__m256i __a, __m128i __count) { return __builtin_ia32_psrlq256((__v4di)__a, __count); } diff --git a/clang/lib/Headers/avx512bf16intrin.h b/clang/lib/Headers/avx512bf16intrin.h index 3973f0e389685..458d1f8b993ba 100644 --- a/clang/lib/Headers/avx512bf16intrin.h +++ b/clang/lib/Headers/avx512bf16intrin.h @@ -25,6 +25,14 @@ typedef __bf16 __bfloat16 __attribute__((deprecated("use __bf16 instead"))); #define __DEFAULT_FN_ATTRS \ __attribute__((__always_inline__, __nodebug__, __target__("avx512bf16"))) +#if defined(__cplusplus) && (__cplusplus >= 201103L) +#define __DEFAULT_FN_ATTRS512_CONSTEXPR __DEFAULT_FN_ATTRS512 constexpr +#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr +#else +#define __DEFAULT_FN_ATTRS512_CONSTEXPR __DEFAULT_FN_ATTRS512 +#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS +#endif + /// Convert One BF16 Data to One Single Float Data. /// /// \headerfile @@ -35,8 +43,8 @@ typedef __bf16 __bfloat16 __attribute__((deprecated("use __bf16 instead"))); /// A bfloat data. /// \returns A float data whose sign field and exponent field keep unchanged, /// and fraction field is extended to 23 bits. -static __inline__ float __DEFAULT_FN_ATTRS _mm_cvtsbh_ss(__bf16 __A) { - return __builtin_ia32_cvtsbf162ss_32(__A); +static __inline__ float __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtsbh_ss(__bf16 __A) { + return (float)(__A); } /// Convert Two Packed Single Data to One Packed BF16 Data. @@ -235,9 +243,9 @@ _mm512_maskz_dpbf16_ps(__mmask16 __U, __m512 __D, __m512bh __A, __m512bh __B) { /// \param __A /// A 256-bit vector of [16 x bfloat]. /// \returns A 512-bit vector of [16 x float] come from conversion of __A -static __inline__ __m512 __DEFAULT_FN_ATTRS512 _mm512_cvtpbh_ps(__m256bh __A) { - return _mm512_castsi512_ps((__m512i)_mm512_slli_epi32( - (__m512i)_mm512_cvtepi16_epi32((__m256i)__A), 16)); +static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_cvtpbh_ps(__m256bh __A) { + return (__m512) __builtin_convertvector(__A, __v16sf); } /// Convert Packed BF16 Data to Packed float Data using zeroing mask. @@ -250,10 +258,11 @@ static __inline__ __m512 __DEFAULT_FN_ATTRS512 _mm512_cvtpbh_ps(__m256bh __A) { /// \param __A /// A 256-bit vector of [16 x bfloat]. /// \returns A 512-bit vector of [16 x float] come from conversion of __A -static __inline__ __m512 __DEFAULT_FN_ATTRS512 +static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_cvtpbh_ps(__mmask16 __U, __m256bh __A) { - return _mm512_castsi512_ps((__m512i)_mm512_slli_epi32( - (__m512i)_mm512_maskz_cvtepi16_epi32((__mmask16)__U, (__m256i)__A), 16)); + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_cvtpbh_ps(__A), + (__v16sf)_mm512_setzero_ps()); } /// Convert Packed BF16 Data to Packed float Data using merging mask. @@ -268,15 +277,16 @@ _mm512_maskz_cvtpbh_ps(__mmask16 __U, __m256bh __A) { /// \param __A /// A 256-bit vector of [16 x bfloat]. /// \returns A 512-bit vector of [16 x float] come from conversion of __A -static __inline__ __m512 __DEFAULT_FN_ATTRS512 +static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_cvtpbh_ps(__m512 __S, __mmask16 __U, __m256bh __A) { - return _mm512_castsi512_ps((__m512i)_mm512_mask_slli_epi32( - (__m512i)__S, (__mmask16)__U, - (__m512i)_mm512_cvtepi16_epi32((__m256i)__A), 16)); + return (__m512)__builtin_ia32_selectps_512( + (__mmask16)__U, (__v16sf)_mm512_cvtpbh_ps(__A), (__v16sf)__S); } #undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS_CONSTEXPR #undef __DEFAULT_FN_ATTRS512 +#undef __DEFAULT_FN_ATTRS512_CONSTEXPR #endif #endif diff --git a/clang/lib/Headers/avx512bwintrin.h b/clang/lib/Headers/avx512bwintrin.h index 53d641e9e2eae..67e8461560b04 100644 --- a/clang/lib/Headers/avx512bwintrin.h +++ b/clang/lib/Headers/avx512bwintrin.h @@ -1383,23 +1383,20 @@ _mm512_maskz_sllv_epi16(__mmask32 __U, __m512i __A, __m512i __B) (__v32hi)_mm512_setzero_si512()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_sll_epi16(__m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_sll_epi16(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psllw512((__v32hi) __A, (__v8hi) __B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_mask_sll_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_mask_sll_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, (__v32hi)_mm512_sll_epi16(__A, __B), (__v32hi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, (__v32hi)_mm512_sll_epi16(__A, __B), (__v32hi)_mm512_setzero_si512()); @@ -1473,23 +1470,20 @@ _mm512_maskz_srav_epi16(__mmask32 __U, __m512i __A, __m512i __B) (__v32hi)_mm512_setzero_si512()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_sra_epi16(__m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_sra_epi16(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psraw512((__v32hi) __A, (__v8hi) __B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_mask_sra_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_mask_sra_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, (__v32hi)_mm512_sra_epi16(__A, __B), (__v32hi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, (__v32hi)_mm512_sra_epi16(__A, __B), (__v32hi)_mm512_setzero_si512()); @@ -1515,23 +1509,20 @@ _mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, unsigned int __B) { (__v32hi)_mm512_setzero_si512()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_srl_epi16(__m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_srl_epi16(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psrlw512((__v32hi) __A, (__v8hi) __B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_mask_srl_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_mask_srl_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, (__v32hi)_mm512_srl_epi16(__A, __B), (__v32hi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, (__v32hi)_mm512_srl_epi16(__A, __B), (__v32hi)_mm512_setzero_si512()); diff --git a/clang/lib/Headers/avx512fintrin.h b/clang/lib/Headers/avx512fintrin.h index e1de56069870b..806a13c414c10 100644 --- a/clang/lib/Headers/avx512fintrin.h +++ b/clang/lib/Headers/avx512fintrin.h @@ -5382,45 +5382,39 @@ _mm512_kmov (__mmask16 __A) ((long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R))) #endif -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_sll_epi32(__m512i __A, __m128i __B) -{ +static __inline__ __m512i + __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_sll_epi32(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_pslld512((__v16si) __A, (__v4si)__B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_mask_sll_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_mask_sll_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, (__v16si)_mm512_sll_epi32(__A, __B), (__v16si)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_maskz_sll_epi32(__mmask16 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_maskz_sll_epi32(__mmask16 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, (__v16si)_mm512_sll_epi32(__A, __B), (__v16si)_mm512_setzero_si512()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_sll_epi64(__m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_sll_epi64(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psllq512((__v8di)__A, (__v2di)__B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_mask_sll_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_mask_sll_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, (__v8di)_mm512_sll_epi64(__A, __B), (__v8di)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_maskz_sll_epi64(__mmask8 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_maskz_sll_epi64(__mmask8 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, (__v8di)_mm512_sll_epi64(__A, __B), (__v8di)_mm512_setzero_si512()); @@ -5467,45 +5461,39 @@ _mm512_maskz_sllv_epi64(__mmask8 __U, __m512i __X, __m512i __Y) (__v8di)_mm512_setzero_si512()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_sra_epi32(__m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_sra_epi32(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psrad512((__v16si) __A, (__v4si)__B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_mask_sra_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_mask_sra_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, (__v16si)_mm512_sra_epi32(__A, __B), (__v16si)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_maskz_sra_epi32(__mmask16 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_maskz_sra_epi32(__mmask16 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, (__v16si)_mm512_sra_epi32(__A, __B), (__v16si)_mm512_setzero_si512()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_sra_epi64(__m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_sra_epi64(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psraq512((__v8di)__A, (__v2di)__B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_mask_sra_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_mask_sra_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, (__v8di)_mm512_sra_epi64(__A, __B), (__v8di)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_maskz_sra_epi64(__mmask8 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_maskz_sra_epi64(__mmask8 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, (__v8di)_mm512_sra_epi64(__A, __B), (__v8di)_mm512_setzero_si512()); @@ -5552,45 +5540,39 @@ _mm512_maskz_srav_epi64(__mmask8 __U, __m512i __X, __m512i __Y) (__v8di)_mm512_setzero_si512()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_srl_epi32(__m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_srl_epi32(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psrld512((__v16si) __A, (__v4si)__B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_mask_srl_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_mask_srl_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, (__v16si)_mm512_srl_epi32(__A, __B), (__v16si)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_maskz_srl_epi32(__mmask16 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_maskz_srl_epi32(__mmask16 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, (__v16si)_mm512_srl_epi32(__A, __B), (__v16si)_mm512_setzero_si512()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_srl_epi64(__m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_srl_epi64(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psrlq512((__v8di)__A, (__v2di)__B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_mask_srl_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_mask_srl_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, (__v8di)_mm512_srl_epi64(__A, __B), (__v8di)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_maskz_srl_epi64(__mmask8 __U, __m512i __A, __m128i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_maskz_srl_epi64(__mmask8 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, (__v8di)_mm512_srl_epi64(__A, __B), (__v8di)_mm512_setzero_si512()); diff --git a/clang/lib/Headers/avx512vbmiintrin.h b/clang/lib/Headers/avx512vbmiintrin.h index 84fda5c5849e8..5ac78f0849c26 100644 --- a/clang/lib/Headers/avx512vbmiintrin.h +++ b/clang/lib/Headers/avx512vbmiintrin.h @@ -15,61 +15,57 @@ #define __VBMIINTRIN_H /* Define the default attributes for the functions in this file. */ +#if defined(__cplusplus) && (__cplusplus >= 201103L) #define __DEFAULT_FN_ATTRS \ __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi"), \ - __min_vector_width__(512))) - -#if defined(__cplusplus) && (__cplusplus >= 201103L) -#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr + __min_vector_width__(512))) constexpr #else -#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi"), \ + __min_vector_width__(512))) #endif -static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_permutex2var_epi8(__m512i __A, __m512i __I, __m512i __B) { return (__m512i)__builtin_ia32_vpermi2varqi512((__v64qi)__A, (__v64qi)__I, (__v64qi) __B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR -_mm512_mask_permutex2var_epi8(__m512i __A, __mmask64 __U, __m512i __I, - __m512i __B) { +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_permutex2var_epi8( + __m512i __A, __mmask64 __U, __m512i __I, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512(__U, (__v64qi)_mm512_permutex2var_epi8(__A, __I, __B), (__v64qi)__A); } -static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR -_mm512_mask2_permutex2var_epi8(__m512i __A, __m512i __I, __mmask64 __U, - __m512i __B) { +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask2_permutex2var_epi8( + __m512i __A, __m512i __I, __mmask64 __U, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512(__U, (__v64qi)_mm512_permutex2var_epi8(__A, __I, __B), (__v64qi)__I); } -static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR -_mm512_maskz_permutex2var_epi8(__mmask64 __U, __m512i __A, __m512i __I, - __m512i __B) { +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_permutex2var_epi8( + __mmask64 __U, __m512i __A, __m512i __I, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512(__U, (__v64qi)_mm512_permutex2var_epi8(__A, __I, __B), (__v64qi)_mm512_setzero_si512()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_permutexvar_epi8(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_permvarqi512((__v64qi) __B, (__v64qi) __A); } -static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_permutexvar_epi8(__mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, (__v64qi)_mm512_permutexvar_epi8(__A, __B), (__v64qi)_mm512_setzero_si512()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR -_mm512_mask_permutexvar_epi8(__m512i __W, __mmask64 __M, __m512i __A, - __m512i __B) { +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_permutexvar_epi8( + __m512i __W, __mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, (__v64qi)_mm512_permutexvar_epi8(__A, __B), (__v64qi)__W); @@ -97,6 +93,6 @@ _mm512_maskz_multishift_epi64_epi8(__mmask64 __M, __m512i __X, __m512i __Y) (__v64qi)_mm512_multishift_epi64_epi8(__X, __Y), (__v64qi)_mm512_setzero_si512()); } -#undef __DEFAULT_FN_ATTRS_CONSTEXPR + #undef __DEFAULT_FN_ATTRS #endif diff --git a/clang/lib/Headers/avx512vbmivlintrin.h b/clang/lib/Headers/avx512vbmivlintrin.h index 58a48dadff863..40a67bd63ca49 100644 --- a/clang/lib/Headers/avx512vbmivlintrin.h +++ b/clang/lib/Headers/avx512vbmivlintrin.h @@ -15,6 +15,16 @@ #define __VBMIVLINTRIN_H /* Define the default attributes for the functions in this file. */ +#if defined(__cplusplus) && (__cplusplus >= 201103L) +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vbmi,avx512vl"), \ + __min_vector_width__(128))) constexpr +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vbmi,avx512vl"), \ + __min_vector_width__(256))) constexpr +#else #define __DEFAULT_FN_ATTRS128 \ __attribute__((__always_inline__, __nodebug__, \ __target__("avx512vbmi,avx512vl"), \ @@ -23,111 +33,96 @@ __attribute__((__always_inline__, __nodebug__, \ __target__("avx512vbmi,avx512vl"), \ __min_vector_width__(256))) - -#if defined(__cplusplus) && (__cplusplus >= 201103L) -#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr -#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr -#else -#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 -#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 #endif -static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_permutex2var_epi8(__m128i __A, __m128i __I, __m128i __B) { return (__m128i)__builtin_ia32_vpermi2varqi128((__v16qi)__A, (__v16qi)__I, (__v16qi)__B); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR -_mm_mask_permutex2var_epi8(__m128i __A, __mmask16 __U, __m128i __I, - __m128i __B) { +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_permutex2var_epi8( + __m128i __A, __mmask16 __U, __m128i __I, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128(__U, (__v16qi)_mm_permutex2var_epi8(__A, __I, __B), (__v16qi)__A); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR -_mm_mask2_permutex2var_epi8(__m128i __A, __m128i __I, __mmask16 __U, - __m128i __B) { +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask2_permutex2var_epi8( + __m128i __A, __m128i __I, __mmask16 __U, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128(__U, (__v16qi)_mm_permutex2var_epi8(__A, __I, __B), (__v16qi)__I); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR -_mm_maskz_permutex2var_epi8(__mmask16 __U, __m128i __A, __m128i __I, - __m128i __B) { +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_permutex2var_epi8( + __mmask16 __U, __m128i __A, __m128i __I, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128(__U, (__v16qi)_mm_permutex2var_epi8(__A, __I, __B), (__v16qi)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_permutex2var_epi8(__m256i __A, __m256i __I, __m256i __B) { return (__m256i)__builtin_ia32_vpermi2varqi256((__v32qi)__A, (__v32qi)__I, (__v32qi)__B); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR -_mm256_mask_permutex2var_epi8(__m256i __A, __mmask32 __U, __m256i __I, - __m256i __B) { +static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutex2var_epi8( + __m256i __A, __mmask32 __U, __m256i __I, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256(__U, (__v32qi)_mm256_permutex2var_epi8(__A, __I, __B), (__v32qi)__A); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR -_mm256_mask2_permutex2var_epi8(__m256i __A, __m256i __I, __mmask32 __U, - __m256i __B) { +static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask2_permutex2var_epi8( + __m256i __A, __m256i __I, __mmask32 __U, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256(__U, (__v32qi)_mm256_permutex2var_epi8(__A, __I, __B), (__v32qi)__I); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR -_mm256_maskz_permutex2var_epi8(__mmask32 __U, __m256i __A, __m256i __I, - __m256i __B) { +static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutex2var_epi8( + __mmask32 __U, __m256i __A, __m256i __I, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256(__U, (__v32qi)_mm256_permutex2var_epi8(__A, __I, __B), (__v32qi)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_permutexvar_epi8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_permvarqi128((__v16qi)__B, (__v16qi)__A); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_permutexvar_epi8(__mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, (__v16qi)_mm_permutexvar_epi8(__A, __B), (__v16qi)_mm_setzero_si128()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR -_mm_mask_permutexvar_epi8(__m128i __W, __mmask16 __M, __m128i __A, - __m128i __B) { +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_permutexvar_epi8( + __m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, (__v16qi)_mm_permutexvar_epi8(__A, __B), (__v16qi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_permutexvar_epi8(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_permvarqi256((__v32qi) __B, (__v32qi) __A); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutexvar_epi8(__mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, (__v32qi)_mm256_permutexvar_epi8(__A, __B), (__v32qi)_mm256_setzero_si256()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR -_mm256_mask_permutexvar_epi8(__m256i __W, __mmask32 __M, __m256i __A, - __m256i __B) { +static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutexvar_epi8( + __m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, (__v32qi)_mm256_permutexvar_epi8(__A, __B), (__v32qi)__W); @@ -179,9 +174,6 @@ _mm256_maskz_multishift_epi64_epi8(__mmask32 __M, __m256i __X, __m256i __Y) (__v32qi)_mm256_setzero_si256()); } -#undef __DEFAULT_FN_ATTRS128_CONSTEXPR -#undef __DEFAULT_FN_ATTRS256_CONSTEXPR #undef __DEFAULT_FN_ATTRS128 #undef __DEFAULT_FN_ATTRS256 - #endif diff --git a/clang/lib/Headers/avx512vlbf16intrin.h b/clang/lib/Headers/avx512vlbf16intrin.h index 2d7ea0114d6a5..8543402065d76 100644 --- a/clang/lib/Headers/avx512vlbf16intrin.h +++ b/clang/lib/Headers/avx512vlbf16intrin.h @@ -24,6 +24,14 @@ __target__("avx512vl,avx512bf16"), \ __min_vector_width__(256))) +#if defined(__cplusplus) && (__cplusplus >= 201103L) +#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr +#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr +#else +#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 +#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 +#endif + /// Convert Two Packed Single Data to One Packed BF16 Data. /// /// \headerfile @@ -421,9 +429,10 @@ static __inline__ __bf16 __DEFAULT_FN_ATTRS128 _mm_cvtness_sbh(float __A) { /// \param __A /// A 128-bit vector of [4 x bfloat]. /// \returns A 128-bit vector of [4 x float] come from conversion of __A -static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_cvtpbh_ps(__m128bh __A) { - return _mm_castsi128_ps( - (__m128i)_mm_slli_epi32((__m128i)_mm_cvtepi16_epi32((__m128i)__A), 16)); +static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_cvtpbh_ps(__m128bh __A) { + return (__m128)_mm256_castps256_ps128( + (__m256) __builtin_convertvector(__A, __v8sf)); } /// Convert Packed BF16 Data to Packed float Data. @@ -433,9 +442,9 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_cvtpbh_ps(__m128bh __A) { /// \param __A /// A 128-bit vector of [8 x bfloat]. /// \returns A 256-bit vector of [8 x float] come from conversion of __A -static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtpbh_ps(__m128bh __A) { - return _mm256_castsi256_ps((__m256i)_mm256_slli_epi32( - (__m256i)_mm256_cvtepi16_epi32((__m128i)__A), 16)); +static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_cvtpbh_ps(__m128bh __A) { + return (__m256) __builtin_convertvector(__A, __v8sf); } /// Convert Packed BF16 Data to Packed float Data using zeroing mask. @@ -448,10 +457,10 @@ static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtpbh_ps(__m128bh __A) { /// \param __A /// A 128-bit vector of [4 x bfloat]. /// \returns A 128-bit vector of [4 x float] come from conversion of __A -static __inline__ __m128 __DEFAULT_FN_ATTRS128 +static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) { - return _mm_castsi128_ps((__m128i)_mm_slli_epi32( - (__m128i)_mm_maskz_cvtepi16_epi32((__mmask8)__U, (__m128i)__A), 16)); + return (__m128)__builtin_ia32_selectps_128( + (__mmask8)__U, (__v4sf)_mm_cvtpbh_ps(__A), (__v4sf)_mm_setzero_ps()); } /// Convert Packed BF16 Data to Packed float Data using zeroing mask. @@ -464,10 +473,11 @@ _mm_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) { /// \param __A /// A 128-bit vector of [8 x bfloat]. /// \returns A 256-bit vector of [8 x float] come from conversion of __A -static __inline__ __m256 __DEFAULT_FN_ATTRS256 +static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) { - return _mm256_castsi256_ps((__m256i)_mm256_slli_epi32( - (__m256i)_mm256_maskz_cvtepi16_epi32((__mmask8)__U, (__m128i)__A), 16)); + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_cvtpbh_ps(__A), + (__v8sf)_mm256_setzero_ps()); } /// Convert Packed BF16 Data to Packed float Data using merging mask. @@ -483,11 +493,10 @@ _mm256_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) { /// \param __A /// A 128-bit vector of [4 x bfloat]. /// \returns A 128-bit vector of [4 x float] come from conversion of __A -static __inline__ __m128 __DEFAULT_FN_ATTRS128 +static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_cvtpbh_ps(__m128 __S, __mmask8 __U, __m128bh __A) { - return _mm_castsi128_ps((__m128i)_mm_mask_slli_epi32( - (__m128i)__S, (__mmask8)__U, (__m128i)_mm_cvtepi16_epi32((__m128i)__A), - 16)); + return (__m128)__builtin_ia32_selectps_128( + (__mmask8)__U, (__v4sf)_mm_cvtpbh_ps(__A), (__v4sf)__S); } /// Convert Packed BF16 Data to Packed float Data using merging mask. @@ -503,15 +512,16 @@ _mm_mask_cvtpbh_ps(__m128 __S, __mmask8 __U, __m128bh __A) { /// \param __A /// A 128-bit vector of [8 x bfloat]. /// \returns A 256-bit vector of [8 x float] come from conversion of __A -static __inline__ __m256 __DEFAULT_FN_ATTRS256 +static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_cvtpbh_ps(__m256 __S, __mmask8 __U, __m128bh __A) { - return _mm256_castsi256_ps((__m256i)_mm256_mask_slli_epi32( - (__m256i)__S, (__mmask8)__U, (__m256i)_mm256_cvtepi16_epi32((__m128i)__A), - 16)); + return (__m256)__builtin_ia32_selectps_256( + (__mmask8)__U, (__v8sf)_mm256_cvtpbh_ps(__A), (__v8sf)__S); } #undef __DEFAULT_FN_ATTRS128 #undef __DEFAULT_FN_ATTRS256 +#undef __DEFAULT_FN_ATTRS128_CONSTEXPR +#undef __DEFAULT_FN_ATTRS256_CONSTEXPR #endif #endif diff --git a/clang/lib/Headers/avx512vlfp16intrin.h b/clang/lib/Headers/avx512vlfp16intrin.h index 885231b030b23..7a762e105e9af 100644 --- a/clang/lib/Headers/avx512vlfp16intrin.h +++ b/clang/lib/Headers/avx512vlfp16intrin.h @@ -623,7 +623,7 @@ _mm256_maskz_scalef_ph(__mmask16 __U, __m256h __A, __m256h __B) { (__mmask16)(U))) static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_sqrt_ph(__m128h __a) { - return __builtin_ia32_sqrtph((__v8hf)__a); + return __builtin_elementwise_sqrt(__a); } static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_sqrt_ph(__m128h __W, @@ -640,7 +640,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_sqrt_ph(__mmask8 __U, } static __inline __m256h __DEFAULT_FN_ATTRS256 _mm256_sqrt_ph(__m256h __a) { - return (__m256h)__builtin_ia32_sqrtph256((__v16hf)__a); + return __builtin_elementwise_sqrt(__a); } static __inline__ __m256h __DEFAULT_FN_ATTRS256 diff --git a/clang/lib/Headers/avx512vlintrin.h b/clang/lib/Headers/avx512vlintrin.h index 99c057030a4cc..388f99d812312 100644 --- a/clang/lib/Headers/avx512vlintrin.h +++ b/clang/lib/Headers/avx512vlintrin.h @@ -4299,33 +4299,29 @@ _mm256_maskz_rolv_epi64 (__mmask8 __U, __m256i __A, __m256i __B) (__v4di)_mm256_ror_epi64((a), (b)), \ (__v4di)_mm256_setzero_si256())) -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, (__v4si)_mm_sll_epi32(__A, __B), (__v4si)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, (__v4si)_mm_sll_epi32(__A, __B), (__v4si)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, (__v8si)_mm256_sll_epi32(__A, __B), (__v8si)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, (__v8si)_mm256_sll_epi32(__A, __B), (__v8si)_mm256_setzero_si256()); @@ -4362,33 +4358,29 @@ _mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B) { (__v8si)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, (__v2di)_mm_sll_epi64(__A, __B), (__v2di)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_maskz_sll_epi64(__mmask8 __U, __m128i __A, __m128i __B) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_maskz_sll_epi64(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, (__v2di)_mm_sll_epi64(__A, __B), (__v2di)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, (__v4di)_mm256_sll_epi64(__A, __B), (__v4di)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, (__v4di)_mm256_sll_epi64(__A, __B), (__v4di)_mm256_setzero_si256()); @@ -4641,33 +4633,29 @@ _mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y) (__v8si)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, (__v4si)_mm_srl_epi32(__A, __B), (__v4si)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, (__v4si)_mm_srl_epi32(__A, __B), (__v4si)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, (__v8si)_mm256_srl_epi32(__A, __B), (__v8si)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, (__v8si)_mm256_srl_epi32(__A, __B), (__v8si)_mm256_setzero_si256()); @@ -4704,33 +4692,29 @@ _mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B) { (__v8si)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, (__v2di)_mm_srl_epi64(__A, __B), (__v2di)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_maskz_srl_epi64(__mmask8 __U, __m128i __A, __m128i __B) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_maskz_srl_epi64(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, (__v2di)_mm_srl_epi64(__A, __B), (__v2di)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, (__v4di)_mm256_srl_epi64(__A, __B), (__v4di)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, (__v4di)_mm256_srl_epi64(__A, __B), (__v4di)_mm256_setzero_si256()); @@ -6127,33 +6111,29 @@ _mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B) { (__v4di)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, (__v4si)_mm_sra_epi32(__A, __B), (__v4si)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, (__v4si)_mm_sra_epi32(__A, __B), (__v4si)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, (__v8si)_mm256_sra_epi32(__A, __B), (__v8si)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, (__v8si)_mm256_sra_epi32(__A, __B), (__v8si)_mm256_setzero_si256()); @@ -6188,45 +6168,39 @@ _mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B) { (__v8si)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_sra_epi64(__m128i __A, __m128i __B) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_sra_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psraq128((__v2di)__A, (__v2di)__B); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \ (__v2di)_mm_sra_epi64(__A, __B), \ (__v2di)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_maskz_sra_epi64(__mmask8 __U, __m128i __A, __m128i __B) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_maskz_sra_epi64(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \ (__v2di)_mm_sra_epi64(__A, __B), \ (__v2di)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sra_epi64(__m256i __A, __m128i __B) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_sra_epi64(__m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psraq256((__v4di) __A, (__v2di) __B); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \ (__v4di)_mm256_sra_epi64(__A, __B), \ (__v4di)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \ (__v4di)_mm256_sra_epi64(__A, __B), \ (__v4di)_mm256_setzero_si256()); diff --git a/clang/lib/Headers/avxintrin.h b/clang/lib/Headers/avxintrin.h index 44ef88db5cbce..54a6e0cd73ab9 100644 --- a/clang/lib/Headers/avxintrin.h +++ b/clang/lib/Headers/avxintrin.h @@ -333,10 +333,8 @@ static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_mul_ps(__m256 __a, /// A 256-bit vector of [4 x double]. /// \returns A 256-bit vector of [4 x double] containing the square roots of the /// values in the operand. -static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_sqrt_pd(__m256d __a) -{ - return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a); +static __inline __m256d __DEFAULT_FN_ATTRS _mm256_sqrt_pd(__m256d __a) { + return __builtin_elementwise_sqrt(__a); } /// Calculates the square roots of the values in a 256-bit vector of @@ -350,10 +348,8 @@ _mm256_sqrt_pd(__m256d __a) /// A 256-bit vector of [8 x float]. /// \returns A 256-bit vector of [8 x float] containing the square roots of the /// values in the operand. -static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_sqrt_ps(__m256 __a) -{ - return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a); +static __inline __m256 __DEFAULT_FN_ATTRS _mm256_sqrt_ps(__m256 __a) { + return __builtin_elementwise_sqrt(__a); } /// Calculates the reciprocal square roots of the values in a 256-bit diff --git a/clang/lib/Headers/emmintrin.h b/clang/lib/Headers/emmintrin.h index dbe5ca0379cf5..9d71c69878e47 100644 --- a/clang/lib/Headers/emmintrin.h +++ b/clang/lib/Headers/emmintrin.h @@ -241,8 +241,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_div_pd(__m128d __a, /// bits are copied from the upper 64 bits of operand \a __a. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_sd(__m128d __a, __m128d __b) { - __m128d __c = __builtin_ia32_sqrtsd((__v2df)__b); - return __extension__(__m128d){__c[0], __a[1]}; + return __extension__(__m128d){__builtin_elementwise_sqrt(__b[0]), __a[1]}; } /// Calculates the square root of the each of two values stored in a @@ -257,7 +256,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_sd(__m128d __a, /// \returns A 128-bit vector of [2 x double] containing the square roots of the /// values in the operand. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a) { - return __builtin_ia32_sqrtpd((__v2df)__a); + return __builtin_elementwise_sqrt(__a); } /// Compares lower 64-bit double-precision values of both operands, and @@ -2783,8 +2782,8 @@ _mm_slli_epi16(__m128i __a, int __count) { /// A 128-bit integer vector in which bits [63:0] specify the number of bits /// to left-shift each value in operand \a __a. /// \returns A 128-bit integer vector containing the left-shifted values. -static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi16(__m128i __a, - __m128i __count) { +static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_sll_epi16(__m128i __a, __m128i __count) { return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count); } @@ -2819,8 +2818,8 @@ _mm_slli_epi32(__m128i __a, int __count) { /// A 128-bit integer vector in which bits [63:0] specify the number of bits /// to left-shift each value in operand \a __a. /// \returns A 128-bit integer vector containing the left-shifted values. -static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi32(__m128i __a, - __m128i __count) { +static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_sll_epi32(__m128i __a, __m128i __count) { return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count); } @@ -2855,8 +2854,8 @@ _mm_slli_epi64(__m128i __a, int __count) { /// A 128-bit integer vector in which bits [63:0] specify the number of bits /// to left-shift each value in operand \a __a. /// \returns A 128-bit integer vector containing the left-shifted values. -static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi64(__m128i __a, - __m128i __count) { +static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_sll_epi64(__m128i __a, __m128i __count) { return __builtin_ia32_psllq128((__v2di)__a, (__v2di)__count); } @@ -2893,8 +2892,8 @@ _mm_srai_epi16(__m128i __a, int __count) { /// A 128-bit integer vector in which bits [63:0] specify the number of bits /// to right-shift each value in operand \a __a. /// \returns A 128-bit integer vector containing the right-shifted values. -static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi16(__m128i __a, - __m128i __count) { +static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_sra_epi16(__m128i __a, __m128i __count) { return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count); } @@ -2931,8 +2930,8 @@ _mm_srai_epi32(__m128i __a, int __count) { /// A 128-bit integer vector in which bits [63:0] specify the number of bits /// to right-shift each value in operand \a __a. /// \returns A 128-bit integer vector containing the right-shifted values. -static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi32(__m128i __a, - __m128i __count) { +static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_sra_epi32(__m128i __a, __m128i __count) { return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count); } @@ -2992,8 +2991,8 @@ _mm_srli_epi16(__m128i __a, int __count) { /// A 128-bit integer vector in which bits [63:0] specify the number of bits /// to right-shift each value in operand \a __a. /// \returns A 128-bit integer vector containing the right-shifted values. -static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi16(__m128i __a, - __m128i __count) { +static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_srl_epi16(__m128i __a, __m128i __count) { return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count); } @@ -3028,8 +3027,8 @@ _mm_srli_epi32(__m128i __a, int __count) { /// A 128-bit integer vector in which bits [63:0] specify the number of bits /// to right-shift each value in operand \a __a. /// \returns A 128-bit integer vector containing the right-shifted values. -static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi32(__m128i __a, - __m128i __count) { +static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_srl_epi32(__m128i __a, __m128i __count) { return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count); } @@ -3064,8 +3063,8 @@ _mm_srli_epi64(__m128i __a, int __count) { /// A 128-bit integer vector in which bits [63:0] specify the number of bits /// to right-shift each value in operand \a __a. /// \returns A 128-bit integer vector containing the right-shifted values. -static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a, - __m128i __count) { +static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_srl_epi64(__m128i __a, __m128i __count) { return __builtin_ia32_psrlq128((__v2di)__a, (__v2di)__count); } diff --git a/clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h b/clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h index d1dc8275431c0..3550409b6988d 100644 --- a/clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h +++ b/clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h @@ -137,11 +137,7 @@ template constexpr vector lit_impl(T NDotL, T NDotH, T M) { } template constexpr T faceforward_impl(T N, T I, T Ng) { -#if (__has_builtin(__builtin_spirv_faceforward)) - return __builtin_spirv_faceforward(N, I, Ng); -#else return select(dot(I, Ng) < 0, N, -N); -#endif } template constexpr T ldexp_impl(T X, T Exp) { diff --git a/clang/lib/Headers/mmintrin.h b/clang/lib/Headers/mmintrin.h index aca78e6986ad9..2cf46455d7915 100644 --- a/clang/lib/Headers/mmintrin.h +++ b/clang/lib/Headers/mmintrin.h @@ -39,14 +39,14 @@ typedef short __v8hi __attribute__((__vector_size__(16))); typedef char __v16qi __attribute__((__vector_size__(16))); /* Define the default attributes for the functions in this file. */ +#if defined(__cplusplus) && (__cplusplus >= 201103L) #define __DEFAULT_FN_ATTRS_SSE2 \ __attribute__((__always_inline__, __nodebug__, __target__("sse2"), \ - __min_vector_width__(128))) - -#if defined(__cplusplus) && (__cplusplus >= 201103L) -#define __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR __DEFAULT_FN_ATTRS_SSE2 constexpr + __min_vector_width__(128))) constexpr #else -#define __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR __DEFAULT_FN_ATTRS_SSE2 +#define __DEFAULT_FN_ATTRS_SSE2 \ + __attribute__((__always_inline__, __nodebug__, __target__("sse2"), \ + __min_vector_width__(128))) #endif #define __trunc64(x) \ @@ -54,9 +54,6 @@ typedef char __v16qi __attribute__((__vector_size__(16))); #define __zext128(x) \ (__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \ 1, 2, 3) -#define __anyext128(x) \ - (__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \ - 1, -1, -1) /// Clears the MMX state by setting the state of the x87 stack registers /// to empty. @@ -82,10 +79,8 @@ static __inline__ void /// A 32-bit integer value. /// \returns A 64-bit integer vector. The lower 32 bits contain the value of the /// parameter. The upper 32 bits are set to 0. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_cvtsi32_si64(int __i) -{ - return __extension__ (__m64)(__v2si){__i, 0}; +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cvtsi32_si64(int __i) { + return __extension__(__m64)(__v2si){__i, 0}; } /// Returns the lower 32 bits of a 64-bit integer vector as a 32-bit @@ -99,10 +94,8 @@ _mm_cvtsi32_si64(int __i) /// A 64-bit integer vector. /// \returns A 32-bit signed integer value containing the lower 32 bits of the /// parameter. -static __inline__ int __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_cvtsi64_si32(__m64 __m) -{ - return ((__v2si)__m)[0]; +static __inline__ int __DEFAULT_FN_ATTRS_SSE2 _mm_cvtsi64_si32(__m64 __m) { + return ((__v2si)__m)[0]; } /// Casts a 64-bit signed integer value into a 64-bit integer vector. @@ -115,10 +108,8 @@ _mm_cvtsi64_si32(__m64 __m) /// A 64-bit signed integer. /// \returns A 64-bit integer vector containing the same bitwise pattern as the /// parameter. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_cvtsi64_m64(long long __i) -{ - return __extension__ (__m64)(__v1di){__i}; +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cvtsi64_m64(long long __i) { + return __extension__(__m64)(__v1di){__i}; } /// Casts a 64-bit integer vector into a 64-bit signed integer value. @@ -131,10 +122,8 @@ _mm_cvtsi64_m64(long long __i) /// A 64-bit integer vector. /// \returns A 64-bit signed integer containing the same bitwise pattern as the /// parameter. -static __inline__ long long __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_cvtm64_si64(__m64 __m) -{ - return ((__v1di)__m)[0]; +static __inline__ long long __DEFAULT_FN_ATTRS_SSE2 _mm_cvtm64_si64(__m64 __m) { + return ((__v1di)__m)[0]; } /// Converts, with saturation, 16-bit signed integers from both 64-bit integer @@ -156,8 +145,8 @@ _mm_cvtm64_si64(__m64 __m) /// written to the upper 32 bits of the result. /// \returns A 64-bit integer vector of [8 x i8] containing the converted /// values. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_packs_pi16(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_packs_pi16(__m64 __m1, + __m64 __m2) { return __trunc64(__builtin_ia32_packsswb128( (__v8hi)__builtin_shufflevector(__m1, __m2, 0, 1), (__v8hi){})); } @@ -181,8 +170,8 @@ _mm_packs_pi16(__m64 __m1, __m64 __m2) { /// written to the upper 32 bits of the result. /// \returns A 64-bit integer vector of [4 x i16] containing the converted /// values. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_packs_pi32(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_packs_pi32(__m64 __m1, + __m64 __m2) { return __trunc64(__builtin_ia32_packssdw128( (__v4si)__builtin_shufflevector(__m1, __m2, 0, 1), (__v4si){})); } @@ -206,8 +195,8 @@ _mm_packs_pi32(__m64 __m1, __m64 __m2) { /// written to the upper 32 bits of the result. /// \returns A 64-bit integer vector of [8 x i8] containing the converted /// values. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_packs_pu16(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_packs_pu16(__m64 __m1, + __m64 __m2) { return __trunc64(__builtin_ia32_packuswb128( (__v8hi)__builtin_shufflevector(__m1, __m2, 0, 1), (__v8hi){})); } @@ -233,8 +222,8 @@ _mm_packs_pu16(__m64 __m1, __m64 __m2) { /// Bits [63:56] are written to bits [63:56] of the result. /// \returns A 64-bit integer vector of [8 x i8] containing the interleaved /// values. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_unpackhi_pi8(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_unpackhi_pi8(__m64 __m1, + __m64 __m2) { return (__m64)__builtin_shufflevector((__v8qi)__m1, (__v8qi)__m2, 4, 12, 5, 13, 6, 14, 7, 15); } @@ -256,8 +245,8 @@ _mm_unpackhi_pi8(__m64 __m1, __m64 __m2) { /// Bits [63:48] are written to bits [63:48] of the result. /// \returns A 64-bit integer vector of [4 x i16] containing the interleaved /// values. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_unpackhi_pi16(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_unpackhi_pi16(__m64 __m1, + __m64 __m2) { return (__m64)__builtin_shufflevector((__v4hi)__m1, (__v4hi)__m2, 2, 6, 3, 7); } @@ -276,8 +265,8 @@ _mm_unpackhi_pi16(__m64 __m1, __m64 __m2) { /// the upper 32 bits of the result. /// \returns A 64-bit integer vector of [2 x i32] containing the interleaved /// values. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_unpackhi_pi32(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_unpackhi_pi32(__m64 __m1, + __m64 __m2) { return (__m64)__builtin_shufflevector((__v2si)__m1, (__v2si)__m2, 1, 3); } @@ -302,8 +291,8 @@ _mm_unpackhi_pi32(__m64 __m1, __m64 __m2) { /// Bits [31:24] are written to bits [63:56] of the result. /// \returns A 64-bit integer vector of [8 x i8] containing the interleaved /// values. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_unpacklo_pi8(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_unpacklo_pi8(__m64 __m1, + __m64 __m2) { return (__m64)__builtin_shufflevector((__v8qi)__m1, (__v8qi)__m2, 0, 8, 1, 9, 2, 10, 3, 11); } @@ -325,8 +314,8 @@ _mm_unpacklo_pi8(__m64 __m1, __m64 __m2) { /// Bits [31:16] are written to bits [63:48] of the result. /// \returns A 64-bit integer vector of [4 x i16] containing the interleaved /// values. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_unpacklo_pi16(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_unpacklo_pi16(__m64 __m1, + __m64 __m2) { return (__m64)__builtin_shufflevector((__v4hi)__m1, (__v4hi)__m2, 0, 4, 1, 5); } @@ -345,8 +334,8 @@ _mm_unpacklo_pi16(__m64 __m1, __m64 __m2) { /// the upper 32 bits of the result. /// \returns A 64-bit integer vector of [2 x i32] containing the interleaved /// values. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_unpacklo_pi32(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_unpacklo_pi32(__m64 __m1, + __m64 __m2) { return (__m64)__builtin_shufflevector((__v2si)__m1, (__v2si)__m2, 0, 2); } @@ -365,10 +354,9 @@ _mm_unpacklo_pi32(__m64 __m1, __m64 __m2) { /// A 64-bit integer vector of [8 x i8]. /// \returns A 64-bit integer vector of [8 x i8] containing the sums of both /// parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_add_pi8(__m64 __m1, __m64 __m2) -{ - return (__m64)(((__v8qu)__m1) + ((__v8qu)__m2)); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_add_pi8(__m64 __m1, + __m64 __m2) { + return (__m64)(((__v8qu)__m1) + ((__v8qu)__m2)); } /// Adds each 16-bit integer element of the first 64-bit integer vector @@ -386,10 +374,9 @@ _mm_add_pi8(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [4 x i16]. /// \returns A 64-bit integer vector of [4 x i16] containing the sums of both /// parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_add_pi16(__m64 __m1, __m64 __m2) -{ - return (__m64)(((__v4hu)__m1) + ((__v4hu)__m2)); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_add_pi16(__m64 __m1, + __m64 __m2) { + return (__m64)(((__v4hu)__m1) + ((__v4hu)__m2)); } /// Adds each 32-bit integer element of the first 64-bit integer vector @@ -407,10 +394,9 @@ _mm_add_pi16(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [2 x i32]. /// \returns A 64-bit integer vector of [2 x i32] containing the sums of both /// parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_add_pi32(__m64 __m1, __m64 __m2) -{ - return (__m64)(((__v2su)__m1) + ((__v2su)__m2)); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_add_pi32(__m64 __m1, + __m64 __m2) { + return (__m64)(((__v2su)__m1) + ((__v2su)__m2)); } /// Adds, with saturation, each 8-bit signed integer element of the first @@ -431,8 +417,8 @@ _mm_add_pi32(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [8 x i8]. /// \returns A 64-bit integer vector of [8 x i8] containing the saturated sums /// of both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_adds_pi8(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_adds_pi8(__m64 __m1, + __m64 __m2) { return (__m64)__builtin_elementwise_add_sat((__v8qs)__m1, (__v8qs)__m2); } @@ -454,8 +440,8 @@ _mm_adds_pi8(__m64 __m1, __m64 __m2) { /// A 64-bit integer vector of [4 x i16]. /// \returns A 64-bit integer vector of [4 x i16] containing the saturated sums /// of both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_adds_pi16(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_adds_pi16(__m64 __m1, + __m64 __m2) { return (__m64)__builtin_elementwise_add_sat((__v4hi)__m1, (__v4hi)__m2); } @@ -476,8 +462,8 @@ _mm_adds_pi16(__m64 __m1, __m64 __m2) { /// A 64-bit integer vector of [8 x i8]. /// \returns A 64-bit integer vector of [8 x i8] containing the saturated /// unsigned sums of both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_adds_pu8(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_adds_pu8(__m64 __m1, + __m64 __m2) { return (__m64)__builtin_elementwise_add_sat((__v8qu)__m1, (__v8qu)__m2); } @@ -498,8 +484,8 @@ _mm_adds_pu8(__m64 __m1, __m64 __m2) { /// A 64-bit integer vector of [4 x i16]. /// \returns A 64-bit integer vector of [4 x i16] containing the saturated /// unsigned sums of both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_adds_pu16(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_adds_pu16(__m64 __m1, + __m64 __m2) { return (__m64)__builtin_elementwise_add_sat((__v4hu)__m1, (__v4hu)__m2); } @@ -518,10 +504,9 @@ _mm_adds_pu16(__m64 __m1, __m64 __m2) { /// A 64-bit integer vector of [8 x i8] containing the subtrahends. /// \returns A 64-bit integer vector of [8 x i8] containing the differences of /// both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_sub_pi8(__m64 __m1, __m64 __m2) -{ - return (__m64)(((__v8qu)__m1) - ((__v8qu)__m2)); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sub_pi8(__m64 __m1, + __m64 __m2) { + return (__m64)(((__v8qu)__m1) - ((__v8qu)__m2)); } /// Subtracts each 16-bit integer element of the second 64-bit integer @@ -539,10 +524,9 @@ _mm_sub_pi8(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [4 x i16] containing the subtrahends. /// \returns A 64-bit integer vector of [4 x i16] containing the differences of /// both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_sub_pi16(__m64 __m1, __m64 __m2) -{ - return (__m64)(((__v4hu)__m1) - ((__v4hu)__m2)); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sub_pi16(__m64 __m1, + __m64 __m2) { + return (__m64)(((__v4hu)__m1) - ((__v4hu)__m2)); } /// Subtracts each 32-bit integer element of the second 64-bit integer @@ -560,10 +544,9 @@ _mm_sub_pi16(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [2 x i32] containing the subtrahends. /// \returns A 64-bit integer vector of [2 x i32] containing the differences of /// both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_sub_pi32(__m64 __m1, __m64 __m2) -{ - return (__m64)(((__v2su)__m1) - ((__v2su)__m2)); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sub_pi32(__m64 __m1, + __m64 __m2) { + return (__m64)(((__v2su)__m1) - ((__v2su)__m2)); } /// Subtracts, with saturation, each 8-bit signed integer element of the second @@ -584,8 +567,8 @@ _mm_sub_pi32(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [8 x i8] containing the subtrahends. /// \returns A 64-bit integer vector of [8 x i8] containing the saturated /// differences of both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_subs_pi8(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_subs_pi8(__m64 __m1, + __m64 __m2) { return (__m64)__builtin_elementwise_sub_sat((__v8qs)__m1, (__v8qs)__m2); } @@ -607,8 +590,8 @@ _mm_subs_pi8(__m64 __m1, __m64 __m2) { /// A 64-bit integer vector of [4 x i16] containing the subtrahends. /// \returns A 64-bit integer vector of [4 x i16] containing the saturated /// differences of both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_subs_pi16(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_subs_pi16(__m64 __m1, + __m64 __m2) { return (__m64)__builtin_elementwise_sub_sat((__v4hi)__m1, (__v4hi)__m2); } @@ -630,8 +613,8 @@ _mm_subs_pi16(__m64 __m1, __m64 __m2) { /// A 64-bit integer vector of [8 x i8] containing the subtrahends. /// \returns A 64-bit integer vector of [8 x i8] containing the saturated /// differences of both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_subs_pu8(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_subs_pu8(__m64 __m1, + __m64 __m2) { return (__m64)__builtin_elementwise_sub_sat((__v8qu)__m1, (__v8qu)__m2); } @@ -653,8 +636,8 @@ _mm_subs_pu8(__m64 __m1, __m64 __m2) { /// A 64-bit integer vector of [4 x i16] containing the subtrahends. /// \returns A 64-bit integer vector of [4 x i16] containing the saturated /// differences of both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_subs_pu16(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_subs_pu16(__m64 __m1, + __m64 __m2) { return (__m64)__builtin_elementwise_sub_sat((__v4hu)__m1, (__v4hu)__m2); } @@ -679,8 +662,8 @@ _mm_subs_pu16(__m64 __m1, __m64 __m2) { /// A 64-bit integer vector of [4 x i16]. /// \returns A 64-bit integer vector of [2 x i32] containing the sums of /// products of both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_madd_pi16(__m64 __m1, __m64 __m2) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_madd_pi16(__m64 __m1, + __m64 __m2) { return __trunc64(__builtin_ia32_pmaddwd128((__v8hi)__zext128(__m1), (__v8hi)__zext128(__m2))); } @@ -700,11 +683,10 @@ _mm_madd_pi16(__m64 __m1, __m64 __m2) { /// A 64-bit integer vector of [4 x i16]. /// \returns A 64-bit integer vector of [4 x i16] containing the upper 16 bits /// of the products of both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_mulhi_pi16(__m64 __m1, __m64 __m2) -{ - return __trunc64(__builtin_ia32_pmulhw128((__v8hi)__zext128(__m1), - (__v8hi)__zext128(__m2))); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_mulhi_pi16(__m64 __m1, + __m64 __m2) { + return __trunc64(__builtin_ia32_pmulhw128((__v8hi)__zext128(__m1), + (__v8hi)__zext128(__m2))); } /// Multiplies each 16-bit signed integer element of the first 64-bit @@ -722,10 +704,9 @@ _mm_mulhi_pi16(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [4 x i16]. /// \returns A 64-bit integer vector of [4 x i16] containing the lower 16 bits /// of the products of both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_mullo_pi16(__m64 __m1, __m64 __m2) -{ - return (__m64)(((__v4hu)__m1) * ((__v4hu)__m2)); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_mullo_pi16(__m64 __m1, + __m64 __m2) { + return (__m64)(((__v4hu)__m1) * ((__v4hu)__m2)); } /// Left-shifts each 16-bit signed integer element of the first @@ -748,8 +729,8 @@ _mm_mullo_pi16(__m64 __m1, __m64 __m2) static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sll_pi16(__m64 __m, __m64 __count) { - return __trunc64(__builtin_ia32_psllw128((__v8hi)__anyext128(__m), - (__v8hi)__anyext128(__count))); + return __trunc64(__builtin_ia32_psllw128((__v8hi)__zext128(__m), + (__v8hi)__zext128(__count))); } /// Left-shifts each 16-bit signed integer element of a 64-bit integer @@ -768,8 +749,8 @@ _mm_sll_pi16(__m64 __m, __m64 __count) /// \returns A 64-bit integer vector of [4 x i16] containing the left-shifted /// values. If \a __count is greater or equal to 16, the result is set to all /// 0. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_slli_pi16(__m64 __m, int __count) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_slli_pi16(__m64 __m, + int __count) { return __trunc64(__builtin_ia32_psllwi128((__v8hi)__zext128(__m), __count)); } @@ -793,8 +774,8 @@ _mm_slli_pi16(__m64 __m, int __count) { static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sll_pi32(__m64 __m, __m64 __count) { - return __trunc64(__builtin_ia32_pslld128((__v4si)__anyext128(__m), - (__v4si)__anyext128(__count))); + return __trunc64(__builtin_ia32_pslld128((__v4si)__zext128(__m), + (__v4si)__zext128(__count))); } /// Left-shifts each 32-bit signed integer element of a 64-bit integer @@ -813,8 +794,8 @@ _mm_sll_pi32(__m64 __m, __m64 __count) /// \returns A 64-bit integer vector of [2 x i32] containing the left-shifted /// values. If \a __count is greater or equal to 32, the result is set to all /// 0. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_slli_pi32(__m64 __m, int __count) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_slli_pi32(__m64 __m, + int __count) { return __trunc64(__builtin_ia32_pslldi128((__v4si)__zext128(__m), __count)); } @@ -835,8 +816,8 @@ _mm_slli_pi32(__m64 __m, int __count) { static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sll_si64(__m64 __m, __m64 __count) { - return __trunc64(__builtin_ia32_psllq128((__v2di)__anyext128(__m), - (__v2di)__anyext128(__count))); + return __trunc64(__builtin_ia32_psllq128((__v2di)__zext128(__m), + (__v2di)__zext128(__count))); } /// Left-shifts the first parameter, which is a 64-bit integer, by the @@ -853,8 +834,8 @@ _mm_sll_si64(__m64 __m, __m64 __count) /// A 32-bit integer value. /// \returns A 64-bit integer vector containing the left-shifted value. If /// \a __count is greater or equal to 64, the result is set to 0. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_slli_si64(__m64 __m, int __count) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_slli_si64(__m64 __m, + int __count) { return __trunc64(__builtin_ia32_psllqi128((__v2di)__zext128(__m), __count)); } @@ -879,8 +860,8 @@ _mm_slli_si64(__m64 __m, int __count) { static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sra_pi16(__m64 __m, __m64 __count) { - return __trunc64(__builtin_ia32_psraw128((__v8hi)__anyext128(__m), - (__v8hi)__anyext128(__count))); + return __trunc64(__builtin_ia32_psraw128((__v8hi)__zext128(__m), + (__v8hi)__zext128(__count))); } /// Right-shifts each 16-bit integer element of a 64-bit integer vector @@ -900,8 +881,8 @@ _mm_sra_pi16(__m64 __m, __m64 __count) /// A 32-bit integer value. /// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted /// values. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_srai_pi16(__m64 __m, int __count) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_srai_pi16(__m64 __m, + int __count) { return __trunc64(__builtin_ia32_psrawi128((__v8hi)__zext128(__m), __count)); } @@ -926,8 +907,8 @@ _mm_srai_pi16(__m64 __m, int __count) { static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sra_pi32(__m64 __m, __m64 __count) { - return __trunc64(__builtin_ia32_psrad128((__v4si)__anyext128(__m), - (__v4si)__anyext128(__count))); + return __trunc64(__builtin_ia32_psrad128((__v4si)__zext128(__m), + (__v4si)__zext128(__count))); } /// Right-shifts each 32-bit integer element of a 64-bit integer vector @@ -947,8 +928,8 @@ _mm_sra_pi32(__m64 __m, __m64 __count) /// A 32-bit integer value. /// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted /// values. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_srai_pi32(__m64 __m, int __count) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_srai_pi32(__m64 __m, + int __count) { return __trunc64(__builtin_ia32_psradi128((__v4si)__zext128(__m), __count)); } @@ -972,8 +953,8 @@ _mm_srai_pi32(__m64 __m, int __count) { static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_srl_pi16(__m64 __m, __m64 __count) { - return __trunc64(__builtin_ia32_psrlw128((__v8hi)__anyext128(__m), - (__v8hi)__anyext128(__count))); + return __trunc64(__builtin_ia32_psrlw128((__v8hi)__zext128(__m), + (__v8hi)__zext128(__count))); } /// Right-shifts each 16-bit integer element of a 64-bit integer vector @@ -992,8 +973,8 @@ _mm_srl_pi16(__m64 __m, __m64 __count) /// A 32-bit integer value. /// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted /// values. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_srli_pi16(__m64 __m, int __count) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_srli_pi16(__m64 __m, + int __count) { return __trunc64(__builtin_ia32_psrlwi128((__v8hi)__zext128(__m), __count)); } @@ -1017,8 +998,8 @@ _mm_srli_pi16(__m64 __m, int __count) { static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_srl_pi32(__m64 __m, __m64 __count) { - return __trunc64(__builtin_ia32_psrld128((__v4si)__anyext128(__m), - (__v4si)__anyext128(__count))); + return __trunc64(__builtin_ia32_psrld128((__v4si)__zext128(__m), + (__v4si)__zext128(__count))); } /// Right-shifts each 32-bit integer element of a 64-bit integer vector @@ -1037,8 +1018,8 @@ _mm_srl_pi32(__m64 __m, __m64 __count) /// A 32-bit integer value. /// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted /// values. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_srli_pi32(__m64 __m, int __count) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_srli_pi32(__m64 __m, + int __count) { return __trunc64(__builtin_ia32_psrldi128((__v4si)__zext128(__m), __count)); } @@ -1059,8 +1040,8 @@ _mm_srli_pi32(__m64 __m, int __count) { static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_srl_si64(__m64 __m, __m64 __count) { - return __trunc64(__builtin_ia32_psrlq128((__v2di)__anyext128(__m), - (__v2di)__anyext128(__count))); + return __trunc64(__builtin_ia32_psrlq128((__v2di)__zext128(__m), + (__v2di)__zext128(__count))); } /// Right-shifts the first parameter, which is a 64-bit integer, by the @@ -1078,8 +1059,8 @@ _mm_srl_si64(__m64 __m, __m64 __count) /// \param __count /// A 32-bit integer value. /// \returns A 64-bit integer vector containing the right-shifted value. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_srli_si64(__m64 __m, int __count) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_srli_si64(__m64 __m, + int __count) { return __trunc64(__builtin_ia32_psrlqi128((__v2di)__zext128(__m), __count)); } @@ -1095,10 +1076,9 @@ _mm_srli_si64(__m64 __m, int __count) { /// A 64-bit integer vector. /// \returns A 64-bit integer vector containing the bitwise AND of both /// parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_and_si64(__m64 __m1, __m64 __m2) -{ - return (__m64)(((__v1du)__m1) & ((__v1du)__m2)); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_and_si64(__m64 __m1, + __m64 __m2) { + return (__m64)(((__v1du)__m1) & ((__v1du)__m2)); } /// Performs a bitwise NOT of the first 64-bit integer vector, and then @@ -1116,10 +1096,9 @@ _mm_and_si64(__m64 __m1, __m64 __m2) /// A 64-bit integer vector. /// \returns A 64-bit integer vector containing the bitwise AND of the second /// parameter and the one's complement of the first parameter. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_andnot_si64(__m64 __m1, __m64 __m2) -{ - return (__m64)(~((__v1du)__m1) & ((__v1du)__m2)); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_andnot_si64(__m64 __m1, + __m64 __m2) { + return (__m64)(~((__v1du)__m1) & ((__v1du)__m2)); } /// Performs a bitwise OR of two 64-bit integer vectors. @@ -1134,10 +1113,9 @@ _mm_andnot_si64(__m64 __m1, __m64 __m2) /// A 64-bit integer vector. /// \returns A 64-bit integer vector containing the bitwise OR of both /// parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_or_si64(__m64 __m1, __m64 __m2) -{ - return (__m64)(((__v1du)__m1) | ((__v1du)__m2)); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_or_si64(__m64 __m1, + __m64 __m2) { + return (__m64)(((__v1du)__m1) | ((__v1du)__m2)); } /// Performs a bitwise exclusive OR of two 64-bit integer vectors. @@ -1152,10 +1130,9 @@ _mm_or_si64(__m64 __m1, __m64 __m2) /// A 64-bit integer vector. /// \returns A 64-bit integer vector containing the bitwise exclusive OR of both /// parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_xor_si64(__m64 __m1, __m64 __m2) -{ - return (__m64)(((__v1du)__m1) ^ ((__v1du)__m2)); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_xor_si64(__m64 __m1, + __m64 __m2) { + return (__m64)(((__v1du)__m1) ^ ((__v1du)__m2)); } /// Compares the 8-bit integer elements of two 64-bit integer vectors of @@ -1174,10 +1151,9 @@ _mm_xor_si64(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [8 x i8]. /// \returns A 64-bit integer vector of [8 x i8] containing the comparison /// results. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_cmpeq_pi8(__m64 __m1, __m64 __m2) -{ - return (__m64)(((__v8qi)__m1) == ((__v8qi)__m2)); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cmpeq_pi8(__m64 __m1, + __m64 __m2) { + return (__m64)(((__v8qi)__m1) == ((__v8qi)__m2)); } /// Compares the 16-bit integer elements of two 64-bit integer vectors of @@ -1196,10 +1172,9 @@ _mm_cmpeq_pi8(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [4 x i16]. /// \returns A 64-bit integer vector of [4 x i16] containing the comparison /// results. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_cmpeq_pi16(__m64 __m1, __m64 __m2) -{ - return (__m64)(((__v4hi)__m1) == ((__v4hi)__m2)); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cmpeq_pi16(__m64 __m1, + __m64 __m2) { + return (__m64)(((__v4hi)__m1) == ((__v4hi)__m2)); } /// Compares the 32-bit integer elements of two 64-bit integer vectors of @@ -1218,10 +1193,9 @@ _mm_cmpeq_pi16(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [2 x i32]. /// \returns A 64-bit integer vector of [2 x i32] containing the comparison /// results. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_cmpeq_pi32(__m64 __m1, __m64 __m2) -{ - return (__m64)(((__v2si)__m1) == ((__v2si)__m2)); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cmpeq_pi32(__m64 __m1, + __m64 __m2) { + return (__m64)(((__v2si)__m1) == ((__v2si)__m2)); } /// Compares the 8-bit integer elements of two 64-bit integer vectors of @@ -1240,9 +1214,8 @@ _mm_cmpeq_pi32(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [8 x i8]. /// \returns A 64-bit integer vector of [8 x i8] containing the comparison /// results. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_cmpgt_pi8(__m64 __m1, __m64 __m2) -{ +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cmpgt_pi8(__m64 __m1, + __m64 __m2) { /* This function always performs a signed comparison, but __v8qi is a char which may be signed or unsigned, so use __v8qs. */ return (__m64)((__v8qs)__m1 > (__v8qs)__m2); @@ -1264,10 +1237,9 @@ _mm_cmpgt_pi8(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [4 x i16]. /// \returns A 64-bit integer vector of [4 x i16] containing the comparison /// results. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_cmpgt_pi16(__m64 __m1, __m64 __m2) -{ - return (__m64)((__v4hi)__m1 > (__v4hi)__m2); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cmpgt_pi16(__m64 __m1, + __m64 __m2) { + return (__m64)((__v4hi)__m1 > (__v4hi)__m2); } /// Compares the 32-bit integer elements of two 64-bit integer vectors of @@ -1286,10 +1258,9 @@ _mm_cmpgt_pi16(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [2 x i32]. /// \returns A 64-bit integer vector of [2 x i32] containing the comparison /// results. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_cmpgt_pi32(__m64 __m1, __m64 __m2) -{ - return (__m64)((__v2si)__m1 > (__v2si)__m2); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cmpgt_pi32(__m64 __m1, + __m64 __m2) { + return (__m64)((__v2si)__m1 > (__v2si)__m2); } /// Constructs a 64-bit integer vector initialized to zero. @@ -1299,8 +1270,7 @@ _mm_cmpgt_pi32(__m64 __m1, __m64 __m2) /// This intrinsic corresponds to the PXOR instruction. /// /// \returns An initialized 64-bit integer vector with all elements set to zero. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_setzero_si64(void) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_setzero_si64(void) { return __extension__(__m64){0LL}; } @@ -1319,8 +1289,8 @@ _mm_setzero_si64(void) { /// A 32-bit integer value used to initialize the lower 32 bits of the /// result. /// \returns An initialized 64-bit integer vector. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_set_pi32(int __i1, int __i0) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_set_pi32(int __i1, + int __i0) { return __extension__(__m64)(__v2si){__i0, __i1}; } @@ -1341,8 +1311,10 @@ _mm_set_pi32(int __i1, int __i0) { /// \param __s0 /// A 16-bit integer value used to initialize bits [15:0] of the result. /// \returns An initialized 64-bit integer vector. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_set_pi16(short __s3, short __s2, short __s1, short __s0) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_set_pi16(short __s3, + short __s2, + short __s1, + short __s0) { return __extension__(__m64)(__v4hi){__s0, __s1, __s2, __s3}; } @@ -1371,7 +1343,7 @@ _mm_set_pi16(short __s3, short __s2, short __s1, short __s0) { /// \param __b0 /// An 8-bit integer value used to initialize bits [7:0] of the result. /// \returns An initialized 64-bit integer vector. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0) { return __extension__(__m64)(__v8qi){__b0, __b1, __b2, __b3, @@ -1391,8 +1363,7 @@ _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, /// A 32-bit integer value used to initialize each vector element of the /// result. /// \returns An initialized 64-bit integer vector of [2 x i32]. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_set1_pi32(int __i) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_set1_pi32(int __i) { return _mm_set_pi32(__i, __i); } @@ -1409,8 +1380,7 @@ _mm_set1_pi32(int __i) { /// A 16-bit integer value used to initialize each vector element of the /// result. /// \returns An initialized 64-bit integer vector of [4 x i16]. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_set1_pi16(short __w) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_set1_pi16(short __w) { return _mm_set_pi16(__w, __w, __w, __w); } @@ -1426,8 +1396,7 @@ _mm_set1_pi16(short __w) { /// An 8-bit integer value used to initialize each vector element of the /// result. /// \returns An initialized 64-bit integer vector of [8 x i8]. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_set1_pi8(char __b) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_set1_pi8(char __b) { return _mm_set_pi8(__b, __b, __b, __b, __b, __b, __b, __b); } @@ -1446,8 +1415,8 @@ _mm_set1_pi8(char __b) { /// A 32-bit integer value used to initialize the upper 32 bits of the /// result. /// \returns An initialized 64-bit integer vector. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_setr_pi32(int __i0, int __i1) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_setr_pi32(int __i0, + int __i1) { return _mm_set_pi32(__i1, __i0); } @@ -1468,8 +1437,10 @@ _mm_setr_pi32(int __i0, int __i1) { /// \param __w3 /// A 16-bit integer value used to initialize bits [63:48] of the result. /// \returns An initialized 64-bit integer vector. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR -_mm_setr_pi16(short __w0, short __w1, short __w2, short __w3) { +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_setr_pi16(short __w0, + short __w1, + short __w2, + short __w3) { return _mm_set_pi16(__w3, __w2, __w1, __w0); } @@ -1498,13 +1469,12 @@ _mm_setr_pi16(short __w0, short __w1, short __w2, short __w3) { /// \param __b7 /// An 8-bit integer value used to initialize bits [63:56] of the result. /// \returns An initialized 64-bit integer vector. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7) { return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0); } -#undef __anyext128 #undef __trunc64 #undef __DEFAULT_FN_ATTRS_SSE2 diff --git a/clang/lib/Headers/xmmintrin.h b/clang/lib/Headers/xmmintrin.h index fe6afdcfc3fdb..72a643948bed6 100644 --- a/clang/lib/Headers/xmmintrin.h +++ b/clang/lib/Headers/xmmintrin.h @@ -231,10 +231,9 @@ _mm_div_ps(__m128 __a, __m128 __b) { /// used in the calculation. /// \returns A 128-bit vector of [4 x float] containing the square root of the /// value in the low-order bits of the operand. -static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_sqrt_ss(__m128 __a) -{ - return (__m128)__builtin_ia32_sqrtss((__v4sf)__a); +static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_sqrt_ss(__m128 __a) { + __a[0] = __builtin_elementwise_sqrt(__a[0]); + return __a; } /// Calculates the square roots of the values stored in a 128-bit vector @@ -248,10 +247,8 @@ _mm_sqrt_ss(__m128 __a) /// A 128-bit vector of [4 x float]. /// \returns A 128-bit vector of [4 x float] containing the square roots of the /// values in the operand. -static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_sqrt_ps(__m128 __a) -{ - return __builtin_ia32_sqrtps((__v4sf)__a); +static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_sqrt_ps(__m128 __a) { + return __builtin_elementwise_sqrt(__a); } /// Calculates the approximate reciprocal of the value stored in the diff --git a/clang/lib/Interpreter/CMakeLists.txt b/clang/lib/Interpreter/CMakeLists.txt index 37faa0302caaa..9a597146b2fc4 100644 --- a/clang/lib/Interpreter/CMakeLists.txt +++ b/clang/lib/Interpreter/CMakeLists.txt @@ -46,6 +46,7 @@ add_clang_library(clangInterpreter clangFrontend clangFrontendTool clangLex + clangOptions clangParse clangSema clangSerialization diff --git a/clang/lib/Interpreter/Interpreter.cpp b/clang/lib/Interpreter/Interpreter.cpp index 7764fa7dc92b9..6cbc5e9910bcc 100644 --- a/clang/lib/Interpreter/Interpreter.cpp +++ b/clang/lib/Interpreter/Interpreter.cpp @@ -42,6 +42,7 @@ #include "clang/Interpreter/Interpreter.h" #include "clang/Interpreter/Value.h" #include "clang/Lex/PreprocessorOptions.h" +#include "clang/Options/OptionUtils.h" #include "clang/Options/Options.h" #include "clang/Sema/Lookup.h" #include "clang/Serialization/ObjectFilePCHContainerReader.h" @@ -105,7 +106,7 @@ CreateCI(const llvm::opt::ArgStringList &Argv) { if (Clang->getHeaderSearchOpts().UseBuiltinIncludes && Clang->getHeaderSearchOpts().ResourceDir.empty()) Clang->getHeaderSearchOpts().ResourceDir = - CompilerInvocation::GetResourcesPath(Argv[0], nullptr); + GetResourcesPath(Argv[0], nullptr); Clang->createVirtualFileSystem(); diff --git a/clang/lib/Options/OptionUtils.cpp b/clang/lib/Options/OptionUtils.cpp index fcafd3c83c6b3..e5aefa012f679 100644 --- a/clang/lib/Options/OptionUtils.cpp +++ b/clang/lib/Options/OptionUtils.cpp @@ -9,7 +9,12 @@ #include "clang/Options/OptionUtils.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/DiagnosticDriver.h" +#include "clang/Basic/Version.h" +#include "clang/Config/config.h" +#include "clang/Options/Options.h" #include "llvm/Option/ArgList.h" +#include "llvm/Support/FileSystem.h" +#include "llvm/Support/Path.h" using namespace clang; using namespace llvm::opt; @@ -31,17 +36,211 @@ IntTy getLastArgIntValueImpl(const ArgList &Args, OptSpecifier Id, } } // namespace -namespace clang { - -int getLastArgIntValue(const ArgList &Args, OptSpecifier Id, int Default, - DiagnosticsEngine *Diags, unsigned Base) { +int clang::getLastArgIntValue(const ArgList &Args, OptSpecifier Id, int Default, + DiagnosticsEngine *Diags, unsigned Base) { return getLastArgIntValueImpl(Args, Id, Default, Diags, Base); } -uint64_t getLastArgUInt64Value(const ArgList &Args, OptSpecifier Id, - uint64_t Default, DiagnosticsEngine *Diags, - unsigned Base) { +uint64_t clang::getLastArgUInt64Value(const ArgList &Args, OptSpecifier Id, + uint64_t Default, + DiagnosticsEngine *Diags, unsigned Base) { return getLastArgIntValueImpl(Args, Id, Default, Diags, Base); } -} // namespace clang +StringRef clang::parseMPreferVectorWidthOption(clang::DiagnosticsEngine &Diags, + const llvm::opt::ArgList &Args) { + const Arg *A = Args.getLastArg(options::OPT_mprefer_vector_width_EQ); + if (!A) + return ""; + + StringRef Value = A->getValue(); + unsigned Width LLVM_ATTRIBUTE_UNINITIALIZED; + + // Only "none" and Integer values are accepted by + // -mprefer-vector-width=. + if (Value != "none" && Value.getAsInteger(10, Width)) { + Diags.Report(clang::diag::err_drv_invalid_value) + << A->getOption().getName() << Value; + return ""; + } + + return Value; +} + +// This is a helper function for validating the optional refinement step +// parameter in reciprocal argument strings. Return false if there is an error +// parsing the refinement step. Otherwise, return true and set the Position +// of the refinement step in the input string. +static bool getRefinementStep(StringRef In, clang::DiagnosticsEngine &Diags, + const Arg &A, size_t &Position) { + const char RefinementStepToken = ':'; + Position = In.find(RefinementStepToken); + if (Position != StringRef::npos) { + StringRef Option = A.getOption().getName(); + StringRef RefStep = In.substr(Position + 1); + // Allow exactly one numeric character for the additional refinement + // step parameter. This is reasonable for all currently-supported + // operations and architectures because we would expect that a larger value + // of refinement steps would cause the estimate "optimization" to + // under-perform the native operation. Also, if the estimate does not + // converge quickly, it probably will not ever converge, so further + // refinement steps will not produce a better answer. + if (RefStep.size() != 1) { + Diags.Report(diag::err_drv_invalid_value) << Option << RefStep; + return false; + } + char RefStepChar = RefStep[0]; + if (RefStepChar < '0' || RefStepChar > '9') { + Diags.Report(diag::err_drv_invalid_value) << Option << RefStep; + return false; + } + } + return true; +} + +StringRef clang::parseMRecipOption(clang::DiagnosticsEngine &Diags, + const ArgList &Args) { + StringRef DisabledPrefixIn = "!"; + StringRef DisabledPrefixOut = "!"; + StringRef EnabledPrefixOut = ""; + StringRef Out = ""; + + const Arg *A = Args.getLastArg(options::OPT_mrecip, options::OPT_mrecip_EQ); + if (!A) + return ""; + + const unsigned NumOptions = A->getNumValues(); + if (NumOptions == 0) { + // No option is the same as "all". + return "all"; + } + + // Pass through "all", "none", or "default" with an optional refinement step. + if (NumOptions == 1) { + StringRef Val = A->getValue(0); + size_t RefStepLoc; + if (!getRefinementStep(Val, Diags, *A, RefStepLoc)) + return ""; + StringRef ValBase = Val.slice(0, RefStepLoc); + if (ValBase == "all" || ValBase == "none" || ValBase == "default") { + return Val; + } + } + + // Each reciprocal type may be enabled or disabled individually. + // Check each input value for validity, concatenate them all back together, + // and pass through. + + llvm::StringMap OptionStrings; + OptionStrings.insert(std::make_pair("divd", false)); + OptionStrings.insert(std::make_pair("divf", false)); + OptionStrings.insert(std::make_pair("divh", false)); + OptionStrings.insert(std::make_pair("vec-divd", false)); + OptionStrings.insert(std::make_pair("vec-divf", false)); + OptionStrings.insert(std::make_pair("vec-divh", false)); + OptionStrings.insert(std::make_pair("sqrtd", false)); + OptionStrings.insert(std::make_pair("sqrtf", false)); + OptionStrings.insert(std::make_pair("sqrth", false)); + OptionStrings.insert(std::make_pair("vec-sqrtd", false)); + OptionStrings.insert(std::make_pair("vec-sqrtf", false)); + OptionStrings.insert(std::make_pair("vec-sqrth", false)); + + for (unsigned i = 0; i != NumOptions; ++i) { + StringRef Val = A->getValue(i); + + bool IsDisabled = Val.starts_with(DisabledPrefixIn); + // Ignore the disablement token for string matching. + if (IsDisabled) + Val = Val.substr(1); + + size_t RefStep; + if (!getRefinementStep(Val, Diags, *A, RefStep)) + return ""; + + StringRef ValBase = Val.slice(0, RefStep); + llvm::StringMap::iterator OptionIter = OptionStrings.find(ValBase); + if (OptionIter == OptionStrings.end()) { + // Try again specifying float suffix. + OptionIter = OptionStrings.find(ValBase.str() + 'f'); + if (OptionIter == OptionStrings.end()) { + // The input name did not match any known option string. + Diags.Report(diag::err_drv_unknown_argument) << Val; + return ""; + } + // The option was specified without a half or float or double suffix. + // Make sure that the double or half entry was not already specified. + // The float entry will be checked below. + if (OptionStrings[ValBase.str() + 'd'] || + OptionStrings[ValBase.str() + 'h']) { + Diags.Report(diag::err_drv_invalid_value) + << A->getOption().getName() << Val; + return ""; + } + } + + if (OptionIter->second == true) { + // Duplicate option specified. + Diags.Report(diag::err_drv_invalid_value) + << A->getOption().getName() << Val; + return ""; + } + + // Mark the matched option as found. Do not allow duplicate specifiers. + OptionIter->second = true; + + // If the precision was not specified, also mark the double and half entry + // as found. + if (ValBase.back() != 'f' && ValBase.back() != 'd' && + ValBase.back() != 'h') { + OptionStrings[ValBase.str() + 'd'] = true; + OptionStrings[ValBase.str() + 'h'] = true; + } + + // Build the output string. + StringRef Prefix = IsDisabled ? DisabledPrefixOut : EnabledPrefixOut; + Out = Args.MakeArgString(Out + Prefix + Val); + if (i != NumOptions - 1) + Out = Args.MakeArgString(Out + ","); + } + + return Out; +} + +std::string clang::GetResourcesPath(StringRef BinaryPath) { + // Since the resource directory is embedded in the module hash, it's important + // that all places that need it call this function, so that they get the + // exact same string ("a/../b/" and "b/" get different hashes, for example). + + // Dir is bin/ or lib/, depending on where BinaryPath is. + StringRef Dir = llvm::sys::path::parent_path(BinaryPath); + SmallString<128> P(Dir); + + StringRef ConfiguredResourceDir(CLANG_RESOURCE_DIR); + if (!ConfiguredResourceDir.empty()) { + // FIXME: We should fix the behavior of llvm::sys::path::append so we don't + // need to check for absolute paths here. + if (llvm::sys::path::is_absolute(ConfiguredResourceDir)) + P = ConfiguredResourceDir; + else + llvm::sys::path::append(P, ConfiguredResourceDir); + } else { + // On Windows, libclang.dll is in bin/. + // On non-Windows, libclang.so/.dylib is in lib/. + // With a static-library build of libclang, LibClangPath will contain the + // path of the embedding binary, which for LLVM binaries will be in bin/. + // ../lib gets us to lib/ in both cases. + P = llvm::sys::path::parent_path(Dir); + // This search path is also created in the COFF driver of lld, so any + // changes here also needs to happen in lld/COFF/Driver.cpp + llvm::sys::path::append(P, CLANG_INSTALL_LIBDIR_BASENAME, "clang", + CLANG_VERSION_MAJOR_STRING); + } + + return std::string(P); +} + +std::string clang::GetResourcesPath(const char *Argv0, void *MainAddr) { + const std::string ClangExecutable = + llvm::sys::fs::getMainExecutable(Argv0, MainAddr); + return GetResourcesPath(ClangExecutable); +} diff --git a/clang/lib/Sema/CheckExprLifetime.cpp b/clang/lib/Sema/CheckExprLifetime.cpp index f9665b5e59831..c91ca751984c9 100644 --- a/clang/lib/Sema/CheckExprLifetime.cpp +++ b/clang/lib/Sema/CheckExprLifetime.cpp @@ -17,6 +17,9 @@ #include "llvm/ADT/PointerIntPair.h" namespace clang::sema { +using lifetimes::isGslOwnerType; +using lifetimes::isGslPointerType; + namespace { enum LifetimeKind { /// The lifetime of a temporary bound to this entity ends at the end of the @@ -257,38 +260,8 @@ static void visitLocalsRetainedByReferenceBinding(IndirectLocalPath &Path, Expr *Init, ReferenceKind RK, LocalVisitor Visit); -template static bool isRecordWithAttr(QualType Type) { - auto *RD = Type->getAsCXXRecordDecl(); - if (!RD) - return false; - // Generally, if a primary template class declaration is annotated with an - // attribute, all its specializations generated from template instantiations - // should inherit the attribute. - // - // However, since lifetime analysis occurs during parsing, we may encounter - // cases where a full definition of the specialization is not required. In - // such cases, the specialization declaration remains incomplete and lacks the - // attribute. Therefore, we fall back to checking the primary template class. - // - // Note: it is possible for a specialization declaration to have an attribute - // even if the primary template does not. - // - // FIXME: What if the primary template and explicit specialization - // declarations have conflicting attributes? We should consider diagnosing - // this scenario. - bool Result = RD->hasAttr(); - - if (auto *CTSD = dyn_cast(RD)) - Result |= CTSD->getSpecializedTemplate()->getTemplatedDecl()->hasAttr(); - - return Result; -} - -// Tells whether the type is annotated with [[gsl::Pointer]]. -bool isGLSPointerType(QualType QT) { return isRecordWithAttr(QT); } - static bool isPointerLikeType(QualType QT) { - return isGLSPointerType(QT) || QT->isPointerType() || QT->isNullPtrType(); + return isGslPointerType(QT) || QT->isPointerType() || QT->isNullPtrType(); } // Decl::isInStdNamespace will return false for iterators in some STL @@ -331,7 +304,7 @@ static bool isContainerOfOwner(const RecordDecl *Container) { return false; const auto &TAs = CTSD->getTemplateArgs(); return TAs.size() > 0 && TAs[0].getKind() == TemplateArgument::Type && - isRecordWithAttr(TAs[0].getAsType()); + isGslOwnerType(TAs[0].getAsType()); } // Returns true if the given Record is `std::initializer_list`. @@ -349,14 +322,13 @@ static bool isStdInitializerListOfPointer(const RecordDecl *RD) { static bool shouldTrackImplicitObjectArg(const CXXMethodDecl *Callee) { if (auto *Conv = dyn_cast_or_null(Callee)) - if (isRecordWithAttr(Conv->getConversionType()) && + if (isGslPointerType(Conv->getConversionType()) && Callee->getParent()->hasAttr()) return true; if (!isInStlNamespace(Callee->getParent())) return false; - if (!isRecordWithAttr( - Callee->getFunctionObjectParameterType()) && - !isRecordWithAttr(Callee->getFunctionObjectParameterType())) + if (!isGslPointerType(Callee->getFunctionObjectParameterType()) && + !isGslOwnerType(Callee->getFunctionObjectParameterType())) return false; if (isPointerLikeType(Callee->getReturnType())) { if (!Callee->getIdentifier()) @@ -393,7 +365,7 @@ static bool shouldTrackFirstArgument(const FunctionDecl *FD) { if (!RD->hasAttr() && !RD->hasAttr()) return false; if (FD->getReturnType()->isPointerType() || - isRecordWithAttr(FD->getReturnType())) { + isGslPointerType(FD->getReturnType())) { return llvm::StringSwitch(FD->getName()) .Cases({"begin", "rbegin", "cbegin", "crbegin"}, true) .Cases({"end", "rend", "cend", "crend"}, true) @@ -465,7 +437,7 @@ shouldTrackFirstArgumentForConstructor(const CXXConstructExpr *Ctor) { return true; // RHS must be an owner. - if (!isRecordWithAttr(RHSArgType)) + if (!isGslOwnerType(RHSArgType)) return false; // Bail out if the RHS is Owner. @@ -547,7 +519,7 @@ static void visitFunctionCallArguments(IndirectLocalPath &Path, Expr *Call, // Once we initialized a value with a non gsl-owner reference, it can no // longer dangle. if (ReturnType->isReferenceType() && - !isRecordWithAttr(ReturnType->getPointeeType())) { + !isGslOwnerType(ReturnType->getPointeeType())) { for (const IndirectLocalPathEntry &PE : llvm::reverse(Path)) { if (PE.Kind == IndirectLocalPathEntry::GslReferenceInit || PE.Kind == IndirectLocalPathEntry::LifetimeBoundCall) @@ -1158,8 +1130,7 @@ static AnalysisResult analyzePathForGSLPointer(const IndirectLocalPath &Path, // auto p2 = Temp().owner; // Here p2 is dangling. if (const auto *FD = llvm::dyn_cast_or_null(E.D); FD && !FD->getType()->isReferenceType() && - isRecordWithAttr(FD->getType()) && - LK != LK_MemInitializer) { + isGslOwnerType(FD->getType()) && LK != LK_MemInitializer) { return Report; } return Abandon; @@ -1191,10 +1162,9 @@ static AnalysisResult analyzePathForGSLPointer(const IndirectLocalPath &Path, // const GSLOwner& func(const Foo& foo [[clang::lifetimebound]]) // GSLOwner* func(cosnt Foo& foo [[clang::lifetimebound]]) // GSLPointer func(const Foo& foo [[clang::lifetimebound]]) - if (FD && - ((FD->getReturnType()->isPointerOrReferenceType() && - isRecordWithAttr(FD->getReturnType()->getPointeeType())) || - isGLSPointerType(FD->getReturnType()))) + if (FD && ((FD->getReturnType()->isPointerOrReferenceType() && + isGslOwnerType(FD->getReturnType()->getPointeeType())) || + isGslPointerType(FD->getReturnType()))) return Report; return Abandon; @@ -1206,7 +1176,7 @@ static AnalysisResult analyzePathForGSLPointer(const IndirectLocalPath &Path, // int &p = *localUniquePtr; // someContainer.add(std::move(localUniquePtr)); // return p; - if (!pathContainsInit(Path) && isRecordWithAttr(L->getType())) + if (!pathContainsInit(Path) && isGslOwnerType(L->getType())) return Report; return Abandon; } @@ -1215,8 +1185,7 @@ static AnalysisResult analyzePathForGSLPointer(const IndirectLocalPath &Path, auto *MTE = dyn_cast(L); bool IsGslPtrValueFromGslTempOwner = - MTE && !MTE->getExtendingDecl() && - isRecordWithAttr(MTE->getType()); + MTE && !MTE->getExtendingDecl() && isGslOwnerType(MTE->getType()); // Skipping a chain of initializing gsl::Pointer annotated objects. // We are looking only for the final source to find out if it was // a local or temporary owner or the address of a local @@ -1231,7 +1200,7 @@ static bool shouldRunGSLAssignmentAnalysis(const Sema &SemaRef, bool EnableGSLAssignmentWarnings = !SemaRef.getDiagnostics().isIgnored( diag::warn_dangling_lifetime_pointer_assignment, SourceLocation()); return (EnableGSLAssignmentWarnings && - (isRecordWithAttr(Entity.LHS->getType()) || + (isGslPointerType(Entity.LHS->getType()) || lifetimes::isAssignmentOperatorLifetimeBound( Entity.AssignmentOperator))); } @@ -1400,7 +1369,7 @@ checkExprLifetimeImpl(Sema &SemaRef, const InitializedEntity *InitEntity, // Suppress false positives for code like the one below: // Ctor(unique_ptr up) : pointer(up.get()), owner(move(up)) {} // FIXME: move this logic to analyzePathForGSLPointer. - if (DRE && isRecordWithAttr(DRE->getType())) + if (DRE && isGslOwnerType(DRE->getType())) return false; auto *VD = DRE ? dyn_cast(DRE->getDecl()) : nullptr; diff --git a/clang/lib/Sema/CheckExprLifetime.h b/clang/lib/Sema/CheckExprLifetime.h index 16595d0ca1b36..38b7061988dc7 100644 --- a/clang/lib/Sema/CheckExprLifetime.h +++ b/clang/lib/Sema/CheckExprLifetime.h @@ -18,9 +18,6 @@ namespace clang::sema { -// Tells whether the type is annotated with [[gsl::Pointer]]. -bool isGLSPointerType(QualType QT); - /// Describes an entity that is being assigned. struct AssignedEntity { // The left-hand side expression of the assignment. diff --git a/clang/lib/Sema/SemaAttr.cpp b/clang/lib/Sema/SemaAttr.cpp index 8411a3da8322d..7729c113e422e 100644 --- a/clang/lib/Sema/SemaAttr.cpp +++ b/clang/lib/Sema/SemaAttr.cpp @@ -11,11 +11,11 @@ // //===----------------------------------------------------------------------===// -#include "CheckExprLifetime.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/Attr.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/Expr.h" +#include "clang/Analysis/Analyses/LifetimeSafety/LifetimeAnnotations.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Lookup.h" @@ -289,7 +289,7 @@ void Sema::inferLifetimeCaptureByAttribute(FunctionDecl *FD) { // We only apply the lifetime_capture_by attribute to parameters of // pointer-like reference types (`const T&`, `T&&`). if (PVD->getType()->isReferenceType() && - sema::isGLSPointerType(PVD->getType().getNonReferenceType())) { + lifetimes::isGslPointerType(PVD->getType().getNonReferenceType())) { int CaptureByThis[] = {LifetimeCaptureByAttr::This}; PVD->addAttr( LifetimeCaptureByAttr::CreateImplicit(Context, CaptureByThis, 1)); diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp index f4e58de91286b..0ffb4854ba86d 100644 --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -4482,6 +4482,8 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, case AtomicExpr::AO__scoped_atomic_or_fetch: case AtomicExpr::AO__scoped_atomic_xor_fetch: case AtomicExpr::AO__scoped_atomic_nand_fetch: + case AtomicExpr::AO__scoped_atomic_uinc_wrap: + case AtomicExpr::AO__scoped_atomic_udec_wrap: Form = Arithmetic; break; diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp index c808dec12a6cf..cfabd1b76c103 100644 --- a/clang/lib/Sema/SemaExpr.cpp +++ b/clang/lib/Sema/SemaExpr.cpp @@ -6736,14 +6736,13 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc, checkDirectCallValidity(*this, Fn, FD, ArgExprs); - // If this expression is a call to a builtin function in HIP device - // compilation, allow a pointer-type argument to default address space to be - // passed as a pointer-type parameter to a non-default address space. - // If Arg is declared in the default address space and Param is declared - // in a non-default address space, perform an implicit address space cast to - // the parameter type. - if (getLangOpts().HIP && getLangOpts().CUDAIsDevice && FD && - FD->getBuiltinID()) { + // If this expression is a call to a builtin function in HIP compilation, + // allow a pointer-type argument to default address space to be passed as a + // pointer-type parameter to a non-default address space. If Arg is declared + // in the default address space and Param is declared in a non-default + // address space, perform an implicit address space cast to the parameter + // type. + if (getLangOpts().HIP && FD && FD->getBuiltinID()) { for (unsigned Idx = 0; Idx < ArgExprs.size() && Idx < FD->param_size(); ++Idx) { ParmVarDecl *Param = FD->getParamDecl(Idx); diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp index 43bcb4f743cfa..d6f70e728be29 100644 --- a/clang/lib/Sema/SemaExprCXX.cpp +++ b/clang/lib/Sema/SemaExprCXX.cpp @@ -5658,20 +5658,13 @@ static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) { // extension. static bool isValidVectorForConditionalCondition(ASTContext &Ctx, QualType CondTy) { - if (!CondTy->isVectorType() && !CondTy->isExtVectorType()) + bool IsSVEVectorType = CondTy->isSveVLSBuiltinType(); + if (!CondTy->isVectorType() && !CondTy->isExtVectorType() && !IsSVEVectorType) return false; const QualType EltTy = - cast(CondTy.getCanonicalType())->getElementType(); - assert(!EltTy->isEnumeralType() && "Vectors cant be enum types"); - return EltTy->isIntegralType(Ctx); -} - -static bool isValidSizelessVectorForConditionalCondition(ASTContext &Ctx, - QualType CondTy) { - if (!CondTy->isSveVLSBuiltinType()) - return false; - const QualType EltTy = - cast(CondTy.getCanonicalType())->getSveEltType(Ctx); + IsSVEVectorType + ? cast(CondTy.getCanonicalType())->getSveEltType(Ctx) + : cast(CondTy.getCanonicalType())->getElementType(); assert(!EltTy->isEnumeralType() && "Vectors cant be enum types"); return EltTy->isIntegralType(Ctx); } @@ -5683,21 +5676,29 @@ QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, RHS = DefaultFunctionArrayLvalueConversion(RHS.get()); QualType CondType = Cond.get()->getType(); - const auto *CondVT = CondType->castAs(); - QualType CondElementTy = CondVT->getElementType(); - unsigned CondElementCount = CondVT->getNumElements(); QualType LHSType = LHS.get()->getType(); - const auto *LHSVT = LHSType->getAs(); QualType RHSType = RHS.get()->getType(); - const auto *RHSVT = RHSType->getAs(); - QualType ResultType; + bool LHSIsVector = LHSType->isVectorType() || LHSType->isSizelessVectorType(); + bool RHSIsVector = RHSType->isVectorType() || RHSType->isSizelessVectorType(); + + auto GetVectorInfo = + [&](QualType Type) -> std::pair { + if (const auto *VT = Type->getAs()) + return std::make_pair(VT->getElementType(), + llvm::ElementCount::getFixed(VT->getNumElements())); + ASTContext::BuiltinVectorTypeInfo VectorInfo = + Context.getBuiltinVectorTypeInfo(Type->castAs()); + return std::make_pair(VectorInfo.ElementType, VectorInfo.EC); + }; + auto [CondElementTy, CondElementCount] = GetVectorInfo(CondType); - if (LHSVT && RHSVT) { - if (isa(CondVT) != isa(LHSVT)) { + QualType ResultType; + if (LHSIsVector && RHSIsVector) { + if (CondType->isExtVectorType() != LHSType->isExtVectorType()) { Diag(QuestionLoc, diag::err_conditional_vector_cond_result_mismatch) - << /*isExtVector*/ isa(CondVT); + << /*isExtVector*/ CondType->isExtVectorType(); return {}; } @@ -5708,12 +5709,17 @@ QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, return {}; } ResultType = Context.getCommonSugaredType(LHSType, RHSType); - } else if (LHSVT || RHSVT) { - ResultType = CheckVectorOperands( - LHS, RHS, QuestionLoc, /*isCompAssign*/ false, /*AllowBothBool*/ true, - /*AllowBoolConversions*/ false, - /*AllowBoolOperation*/ true, - /*ReportInvalid*/ true); + } else if (LHSIsVector || RHSIsVector) { + if (CondType->isSizelessVectorType()) + ResultType = CheckSizelessVectorOperands(LHS, RHS, QuestionLoc, + /*IsCompAssign*/ false, + ArithConvKind::Conditional); + else + ResultType = CheckVectorOperands( + LHS, RHS, QuestionLoc, /*isCompAssign*/ false, /*AllowBothBool*/ true, + /*AllowBoolConversions*/ false, + /*AllowBoolOperation*/ true, + /*ReportInvalid*/ true); if (ResultType.isNull()) return {}; } else { @@ -5731,24 +5737,33 @@ QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, << ResultElementTy; return {}; } - if (CondType->isExtVectorType()) - ResultType = - Context.getExtVectorType(ResultElementTy, CondVT->getNumElements()); - else - ResultType = Context.getVectorType( - ResultElementTy, CondVT->getNumElements(), VectorKind::Generic); - + if (CondType->isExtVectorType()) { + ResultType = Context.getExtVectorType(ResultElementTy, + CondElementCount.getFixedValue()); + } else if (CondType->isSizelessVectorType()) { + ResultType = Context.getScalableVectorType( + ResultElementTy, CondElementCount.getKnownMinValue()); + // There are not scalable vector type mappings for all element counts. + if (ResultType.isNull()) { + Diag(QuestionLoc, diag::err_conditional_vector_scalar_type_unsupported) + << ResultElementTy << CondType; + return {}; + } + } else { + ResultType = Context.getVectorType(ResultElementTy, + CondElementCount.getFixedValue(), + VectorKind::Generic); + } LHS = ImpCastExprToType(LHS.get(), ResultType, CK_VectorSplat); RHS = ImpCastExprToType(RHS.get(), ResultType, CK_VectorSplat); } - assert(!ResultType.isNull() && ResultType->isVectorType() && + assert(!ResultType.isNull() && + (ResultType->isVectorType() || ResultType->isSizelessVectorType()) && (!CondType->isExtVectorType() || ResultType->isExtVectorType()) && "Result should have been a vector type"); - auto *ResultVectorTy = ResultType->castAs(); - QualType ResultElementTy = ResultVectorTy->getElementType(); - unsigned ResultElementCount = ResultVectorTy->getNumElements(); + auto [ResultElementTy, ResultElementCount] = GetVectorInfo(ResultType); if (ResultElementCount != CondElementCount) { Diag(QuestionLoc, diag::err_conditional_vector_size) << CondType << ResultType; @@ -5767,90 +5782,6 @@ QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, return ResultType; } -QualType Sema::CheckSizelessVectorConditionalTypes(ExprResult &Cond, - ExprResult &LHS, - ExprResult &RHS, - SourceLocation QuestionLoc) { - LHS = DefaultFunctionArrayLvalueConversion(LHS.get()); - RHS = DefaultFunctionArrayLvalueConversion(RHS.get()); - - QualType CondType = Cond.get()->getType(); - const auto *CondBT = CondType->castAs(); - QualType CondElementTy = CondBT->getSveEltType(Context); - llvm::ElementCount CondElementCount = - Context.getBuiltinVectorTypeInfo(CondBT).EC; - - QualType LHSType = LHS.get()->getType(); - const auto *LHSBT = - LHSType->isSveVLSBuiltinType() ? LHSType->getAs() : nullptr; - QualType RHSType = RHS.get()->getType(); - const auto *RHSBT = - RHSType->isSveVLSBuiltinType() ? RHSType->getAs() : nullptr; - - QualType ResultType; - - if (LHSBT && RHSBT) { - // If both are sizeless vector types, they must be the same type. - if (!Context.hasSameType(LHSType, RHSType)) { - Diag(QuestionLoc, diag::err_conditional_vector_mismatched) - << LHSType << RHSType; - return QualType(); - } - ResultType = LHSType; - } else if (LHSBT || RHSBT) { - ResultType = CheckSizelessVectorOperands(LHS, RHS, QuestionLoc, - /*IsCompAssign*/ false, - ArithConvKind::Conditional); - if (ResultType.isNull()) - return QualType(); - } else { - // Both are scalar so splat - QualType ResultElementTy; - LHSType = LHSType.getCanonicalType().getUnqualifiedType(); - RHSType = RHSType.getCanonicalType().getUnqualifiedType(); - - if (Context.hasSameType(LHSType, RHSType)) - ResultElementTy = LHSType; - else - ResultElementTy = UsualArithmeticConversions(LHS, RHS, QuestionLoc, - ArithConvKind::Conditional); - - if (ResultElementTy->isEnumeralType()) { - Diag(QuestionLoc, diag::err_conditional_vector_operand_type) - << ResultElementTy; - return QualType(); - } - - ResultType = Context.getScalableVectorType( - ResultElementTy, CondElementCount.getKnownMinValue()); - - LHS = ImpCastExprToType(LHS.get(), ResultType, CK_VectorSplat); - RHS = ImpCastExprToType(RHS.get(), ResultType, CK_VectorSplat); - } - - assert(!ResultType.isNull() && ResultType->isSveVLSBuiltinType() && - "Result should have been a vector type"); - auto *ResultBuiltinTy = ResultType->castAs(); - QualType ResultElementTy = ResultBuiltinTy->getSveEltType(Context); - llvm::ElementCount ResultElementCount = - Context.getBuiltinVectorTypeInfo(ResultBuiltinTy).EC; - - if (ResultElementCount != CondElementCount) { - Diag(QuestionLoc, diag::err_conditional_vector_size) - << CondType << ResultType; - return QualType(); - } - - if (Context.getTypeSize(ResultElementTy) != - Context.getTypeSize(CondElementTy)) { - Diag(QuestionLoc, diag::err_conditional_vector_element_size) - << CondType << ResultType; - return QualType(); - } - - return ResultType; -} - QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, @@ -5864,14 +5795,10 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, bool IsVectorConditional = isValidVectorForConditionalCondition(Context, Cond.get()->getType()); - bool IsSizelessVectorConditional = - isValidSizelessVectorForConditionalCondition(Context, - Cond.get()->getType()); - // C++11 [expr.cond]p1 // The first expression is contextually converted to bool. if (!Cond.get()->isTypeDependent()) { - ExprResult CondRes = IsVectorConditional || IsSizelessVectorConditional + ExprResult CondRes = IsVectorConditional ? DefaultFunctionArrayLvalueConversion(Cond.get()) : CheckCXXBooleanCondition(Cond.get()); if (CondRes.isInvalid()) @@ -5940,9 +5867,6 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, if (IsVectorConditional) return CheckVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc); - if (IsSizelessVectorConditional) - return CheckSizelessVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc); - // WebAssembly tables are not allowed as conditional LHS or RHS. if (LTy->isWebAssemblyTableType() || RTy->isWebAssemblyTableType()) { Diag(QuestionLoc, diag::err_wasm_table_conditional_expression) diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp index 655fa31bbf5c7..6bb1a27d1800c 100644 --- a/clang/lib/Sema/SemaStmt.cpp +++ b/clang/lib/Sema/SemaStmt.cpp @@ -3889,6 +3889,11 @@ bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, // Update all declarations of the function to have the deduced return type. Context.adjustDeducedFunctionResultType(FD, Deduced); + if (!Deduced->isDependentType() && !Deduced->isRecordType() && + !FD->isFunctionTemplateSpecialization()) + diagnoseIgnoredQualifiers( + diag::warn_qual_return_type, + FD->getDeclaredReturnType().getLocalCVRQualifiers(), FD->getLocation()); return false; } diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp index 26693514bb278..e74c41517ecbf 100644 --- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp +++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp @@ -245,15 +245,17 @@ static void sharedInstantiateConstructorDestructorAttr( ExprResult Result = S.SubstExpr(A->getPriority(), TemplateArgs); if (Result.isInvalid()) return; - tempInstPriority = Result.get(); - if (std::optional CE = - tempInstPriority->getIntegerConstantExpr(C)) { - // Consistent with non-templated priority arguments, which must fit in a - // 32-bit unsigned integer. - if (!CE->isIntN(32)) { - S.Diag(tempInstPriority->getExprLoc(), diag::err_ice_too_large) - << toString(*CE, 10, false) << /*Size=*/32 << /*Unsigned=*/1; - return; + if (Result.isUsable()) { + tempInstPriority = Result.get(); + if (std::optional CE = + tempInstPriority->getIntegerConstantExpr(C)) { + // Consistent with non-templated priority arguments, which must fit in a + // 32-bit unsigned integer. + if (!CE->isIntN(32)) { + S.Diag(tempInstPriority->getExprLoc(), diag::err_ice_too_large) + << toString(*CE, 10, false) << /*Size=*/32 << /*Unsigned=*/1; + return; + } } } } diff --git a/clang/lib/Sema/SemaType.cpp b/clang/lib/Sema/SemaType.cpp index eb8b1352d1be1..eaf95a8371c2f 100644 --- a/clang/lib/Sema/SemaType.cpp +++ b/clang/lib/Sema/SemaType.cpp @@ -5067,8 +5067,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state, // cv-qualifiers on return types are pointless except when the type is a // class type in C++. if ((T.getCVRQualifiers() || T->isAtomicType()) && + // A dependent type or an undeduced type might later become a class + // type. !(S.getLangOpts().CPlusPlus && - (T->isDependentType() || T->isRecordType()))) { + (T->isRecordType() || T->isDependentType() || + T->isUndeducedAutoType()))) { if (T->isVoidType() && !S.getLangOpts().CPlusPlus && D.getFunctionDefinitionKind() == FunctionDefinitionKind::Definition) { diff --git a/clang/lib/Tooling/Tooling.cpp b/clang/lib/Tooling/Tooling.cpp index 9bae12454d2dc..1d55f615de8a9 100644 --- a/clang/lib/Tooling/Tooling.cpp +++ b/clang/lib/Tooling/Tooling.cpp @@ -31,6 +31,7 @@ #include "clang/Frontend/TextDiagnosticPrinter.h" #include "clang/Lex/HeaderSearchOptions.h" #include "clang/Lex/PreprocessorOptions.h" +#include "clang/Options/OptionUtils.h" #include "clang/Options/Options.h" #include "clang/Tooling/ArgumentsAdjusters.h" #include "clang/Tooling/CompilationDatabase.h" @@ -510,8 +511,7 @@ static void injectResourceDir(CommandLineArguments &Args, const char *Argv0, // If there's no override in place add our resource dir. Args = getInsertArgumentAdjuster( - ("-resource-dir=" + CompilerInvocation::GetResourcesPath(Argv0, MainAddr)) - .c_str())(Args, ""); + ("-resource-dir=" + GetResourcesPath(Argv0, MainAddr)).c_str())(Args, ""); } int ClangTool::run(ToolAction *Action) { diff --git a/clang/lib/Tooling/Transformer/SourceCode.cpp b/clang/lib/Tooling/Transformer/SourceCode.cpp index 922dafeddf416..7adceda05ad4f 100644 --- a/clang/lib/Tooling/Transformer/SourceCode.cpp +++ b/clang/lib/Tooling/Transformer/SourceCode.cpp @@ -86,8 +86,12 @@ llvm::Error clang::tooling::validateEditRange(const CharSourceRange &Range, return validateRange(Range, SM, /*AllowSystemHeaders=*/false); } -static bool spelledInMacroDefinition(SourceLocation Loc, - const SourceManager &SM) { +// Returns the location of the top-level macro argument that is the spelling for +// the expansion `Loc` is from. If `Loc` is spelled in the macro definition, +// returns an invalid `SourceLocation`. +static SourceLocation getMacroArgumentSpellingLoc(SourceLocation Loc, + const SourceManager &SM) { + assert(Loc.isMacroID() && "Location must be in a macro"); while (Loc.isMacroID()) { const auto &Expansion = SM.getSLocEntry(SM.getFileID(Loc)).getExpansion(); if (Expansion.isMacroArgExpansion()) { @@ -95,10 +99,22 @@ static bool spelledInMacroDefinition(SourceLocation Loc, // in a macro expansion. Loc = Expansion.getSpellingLoc(); } else { - return true; + return {}; } } - return false; + return Loc; +} + +static bool spelledInMacroDefinition(CharSourceRange Range, + const SourceManager &SM) { + if (Range.getBegin().isMacroID() && Range.getEnd().isMacroID()) { + // Check whether the range is entirely within a single macro argument. + auto B = getMacroArgumentSpellingLoc(Range.getBegin(), SM); + auto E = getMacroArgumentSpellingLoc(Range.getEnd(), SM); + return B.isInvalid() || B != E; + } + + return Range.getBegin().isMacroID() || Range.getEnd().isMacroID(); } // Returns the expansion char-range of `Loc` if `Loc` is a split token. For @@ -158,8 +174,7 @@ static CharSourceRange getRange(const CharSourceRange &EditRange, Range = Lexer::makeFileCharRange(EditRange, SM, LangOpts); } else { auto AdjustedRange = getRangeForSplitTokens(EditRange, SM, LangOpts); - if (spelledInMacroDefinition(AdjustedRange.getBegin(), SM) || - spelledInMacroDefinition(AdjustedRange.getEnd(), SM)) + if (spelledInMacroDefinition(AdjustedRange, SM)) return {}; auto B = SM.getSpellingLoc(AdjustedRange.getBegin()); diff --git a/clang/test/AST/ByteCode/invalid.cpp b/clang/test/AST/ByteCode/invalid.cpp index 6b49cc44d64df..541aa634007e3 100644 --- a/clang/test/AST/ByteCode/invalid.cpp +++ b/clang/test/AST/ByteCode/invalid.cpp @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -fcxx-exceptions -std=c++20 -fexperimental-new-constant-interpreter -verify=expected,both %s -// RUN: %clang_cc1 -fcxx-exceptions -std=c++20 -verify=ref,both %s +// RUN: %clang_cc1 -triple x86_64 -fcxx-exceptions -std=c++20 -fexperimental-new-constant-interpreter -verify=expected,both %s +// RUN: %clang_cc1 -triple x86_64 -fcxx-exceptions -std=c++20 -verify=ref,both %s namespace Throw { @@ -111,3 +111,15 @@ namespace InvalidBitCast { struct s myx; int *myy = ((struct s *)&myx.a)->b; } + +namespace InvalidIntPtrRecord { + typedef __SIZE_TYPE__ Size_t; + +#define bufsize ((1LL << (8 * sizeof(Size_t) - 2)) - 256) + + struct S { + short buf[bufsize]; // both-error {{array is too large}} + int a; + }; + Size_t foo() { return (Size_t)(&((struct S *)0)->a); } +} diff --git a/clang/test/AST/ast-dump-APValue-addrlabeldiff.c b/clang/test/AST/ast-dump-APValue-addrlabeldiff.c new file mode 100644 index 0000000000000..481098eabedb9 --- /dev/null +++ b/clang/test/AST/ast-dump-APValue-addrlabeldiff.c @@ -0,0 +1,22 @@ +// Test without serialization: +// RUN: %clang_cc1 -std=c23 -ast-dump %s -ast-dump-filter Test \ +// RUN: | FileCheck --strict-whitespace --match-full-lines %s +// +// Test with serialization: +// RUN: %clang_cc1 -triple x86_64-unknown-unknown -Wno-unused-value -std=c23 -emit-pch -o %t %s +// RUN: %clang_cc1 -x c -triple x86_64-unknown-unknown -Wno-unused-value -std=c23 \ +// RUN: -include-pch %t -ast-dump-all -ast-dump-filter Test /dev/null \ +// RUN: | sed -e "s/ //" -e "s/ imported//" \ +// RUN: | FileCheck --strict-whitespace --match-full-lines %s + + +// CHECK: | |-value: AddrLabelDiff &&l2 - &&l1 +int Test(void) { + constexpr char ar = &&l2 - &&l1; +l1: + return 10; +l2: + return 11; +} + + diff --git a/clang/test/CIR/CodeGen/X86/avx512bw-builtins.c b/clang/test/CIR/CodeGen/X86/avx512bw-builtins.c deleted file mode 100644 index 3522e2c7e50bf..0000000000000 --- a/clang/test/CIR/CodeGen/X86/avx512bw-builtins.c +++ /dev/null @@ -1,117 +0,0 @@ -// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -fclangir -emit-cir -o %t.cir -Wall -Werror -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -fclangir -emit-llvm -o %t.ll -Wall -Werror -// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s - -// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -fno-signed-char -fclangir -emit-cir -o %t.cir -Wall -Werror -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -fno-signed-char -fclangir -emit-llvm -o %t.ll -Wall -Werror -// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s - -// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -emit-llvm -o - -Wall -Werror | FileCheck %s -check-prefix=OGCG -// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -emit-llvm -o - -Wall -Werror | FileCheck %s -check-prefix=OGCG - -// This test mimics clang/test/CodeGen/X86/avx512bw-builtins.c, which eventually -// CIR shall be able to support fully. - -#include - -__mmask32 test_kshiftli_mask32(__mmask32 A) { - // CIR-LABEL: test_kshiftli_mask32 - // CIR: [[VAL:%.*]] = cir.cast bitcast %{{.*}} : !u32i -> !cir.vector<32 x !cir.int> - // CIR: [[SHIFT:%.*]] = cir.const #cir.zero : !cir.vector<32 x !cir.int> - // CIR: %{{.*}} = cir.vec.shuffle([[SHIFT]], [[VAL]] : !cir.vector<32 x !cir.int>) [#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i, #cir.int<4> : !s32i, #cir.int<5> : !s32i, #cir.int<6> : !s32i, #cir.int<7> : !s32i, #cir.int<8> : !s32i, #cir.int<9> : !s32i, #cir.int<10> : !s32i, #cir.int<11> : !s32i, #cir.int<12> : !s32i, #cir.int<13> : !s32i, #cir.int<14> : !s32i, #cir.int<15> : !s32i, #cir.int<16> : !s32i, #cir.int<17> : !s32i, #cir.int<18> : !s32i, #cir.int<19> : !s32i, #cir.int<20> : !s32i, #cir.int<21> : !s32i, #cir.int<22> : !s32i, #cir.int<23> : !s32i, #cir.int<24> : !s32i, #cir.int<25> : !s32i, #cir.int<26> : !s32i, #cir.int<27> : !s32i, #cir.int<28> : !s32i, #cir.int<29> : !s32i, #cir.int<30> : !s32i, #cir.int<31> : !s32i, #cir.int<32> : !s32i] : !cir.vector<32 x !cir.int> - - // LLVM-LABEL: test_kshiftli_mask32 - // LLVM: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1> - // LLVM: [[RES:%.*]] = shufflevector <32 x i1> zeroinitializer, <32 x i1> [[VAL]], <32 x i32> - - // OGCG-LABEL: test_kshiftli_mask32 - // OGCG: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1> - // OGCG: [[RES:%.*]] = shufflevector <32 x i1> zeroinitializer, <32 x i1> [[VAL]], <32 x i32> - return _kshiftli_mask32(A, 31); -} - -__mmask32 test_kshiftri_mask32(__mmask32 A) { - // CIR-LABEL: test_kshiftri_mask32 - // CIR: [[VAL:%.*]] = cir.cast bitcast %{{.*}} : !u32i -> !cir.vector<32 x !cir.int> - // CIR: [[SHIFT:%.*]] = cir.const #cir.zero : !cir.vector<32 x !cir.int> - // CIR: %{{.*}} = cir.vec.shuffle([[VAL]], [[SHIFT]] : !cir.vector<32 x !cir.int>) [#cir.int<31> : !s32i, #cir.int<32> : !s32i, #cir.int<33> : !s32i, #cir.int<34> : !s32i, #cir.int<35> : !s32i, #cir.int<36> : !s32i, #cir.int<37> : !s32i, #cir.int<38> : !s32i, #cir.int<39> : !s32i, #cir.int<40> : !s32i, #cir.int<41> : !s32i, #cir.int<42> : !s32i, #cir.int<43> : !s32i, #cir.int<44> : !s32i, #cir.int<45> : !s32i, #cir.int<46> : !s32i, #cir.int<47> : !s32i, #cir.int<48> : !s32i, #cir.int<49> : !s32i, #cir.int<50> : !s32i, #cir.int<51> : !s32i, #cir.int<52> : !s32i, #cir.int<53> : !s32i, #cir.int<54> : !s32i, #cir.int<55> : !s32i, #cir.int<56> : !s32i, #cir.int<57> : !s32i, #cir.int<58> : !s32i, #cir.int<59> : !s32i, #cir.int<60> : !s32i, #cir.int<61> : !s32i, #cir.int<62> : !s32i] : !cir.vector<32 x !cir.int> - - // LLVM-LABEL: test_kshiftri_mask32 - // LLVM: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1> - // LLVM: [[RES:%.*]] = shufflevector <32 x i1> [[VAL]], <32 x i1> zeroinitializer, <32 x i32> - - // OGCG-LABEL: test_kshiftri_mask32 - // OGCG: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1> - // OGCG: [[RES:%.*]] = shufflevector <32 x i1> [[VAL]], <32 x i1> zeroinitializer, <32 x i32> - return _kshiftri_mask32(A, 31); -} - -__mmask64 test_kshiftli_mask64(__mmask64 A) { - // CIR-LABEL: test_kshiftli_mask64 - // CIR: [[VAL:%.*]] = cir.cast bitcast %{{.*}} : !u64i -> !cir.vector<64 x !cir.int> - // CIR: [[SHIFT:%.*]] = cir.const #cir.zero : !cir.vector<64 x !cir.int> - // CIR: %{{.*}} = cir.vec.shuffle([[SHIFT]], [[VAL]] : !cir.vector<64 x !cir.int>) [#cir.int<32> : !s32i, #cir.int<33> : !s32i, #cir.int<34> : !s32i, #cir.int<35> : !s32i, #cir.int<36> : !s32i, #cir.int<37> : !s32i, #cir.int<38> : !s32i, #cir.int<39> : !s32i, #cir.int<40> : !s32i, #cir.int<41> : !s32i, #cir.int<42> : !s32i, #cir.int<43> : !s32i, #cir.int<44> : !s32i, #cir.int<45> : !s32i, #cir.int<46> : !s32i, #cir.int<47> : !s32i, #cir.int<48> : !s32i, #cir.int<49> : !s32i, #cir.int<50> : !s32i, #cir.int<51> : !s32i, #cir.int<52> : !s32i, #cir.int<53> : !s32i, #cir.int<54> : !s32i, #cir.int<55> : !s32i, #cir.int<56> : !s32i, #cir.int<57> : !s32i, #cir.int<58> : !s32i, #cir.int<59> : !s32i, #cir.int<60> : !s32i, #cir.int<61> : !s32i, #cir.int<62> : !s32i, #cir.int<63> : !s32i, #cir.int<64> : !s32i, #cir.int<65> : !s32i, #cir.int<66> : !s32i, #cir.int<67> : !s32i, #cir.int<68> : !s32i, #cir.int<69> : !s32i, #cir.int<70> : !s32i, #cir.int<71> : !s32i, #cir.int<72> : !s32i, #cir.int<73> : !s32i, #cir.int<74> : !s32i, #cir.int<75> : !s32i, #cir.int<76> : !s32i, #cir.int<77> : !s32i, #cir.int<78> : !s32i, #cir.int<79> : !s32i, #cir.int<80> : !s32i, #cir.int<81> : !s32i, #cir.int<82> : !s32i, #cir.int<83> : !s32i, #cir.int<84> : !s32i, #cir.int<85> : !s32i, #cir.int<86> : !s32i, #cir.int<87> : !s32i, #cir.int<88> : !s32i, #cir.int<89> : !s32i, #cir.int<90> : !s32i, #cir.int<91> : !s32i, #cir.int<92> : !s32i, #cir.int<93> : !s32i, #cir.int<94> : !s32i, #cir.int<95> : !s32i] : !cir.vector<64 x !cir.int> - - // LLVM-LABEL: test_kshiftli_mask64 - // LLVM: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1> - // LLVM: [[RES:%.*]] = shufflevector <64 x i1> zeroinitializer, <64 x i1> [[VAL]], <64 x i32> - - // OGCG-LABEL: test_kshiftli_mask64 - // OGCG: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1> - // OGCG: [[RES:%.*]] = shufflevector <64 x i1> zeroinitializer, <64 x i1> [[VAL]], <64 x i32> - return _kshiftli_mask64(A, 32); -} - -__mmask64 test_kshiftri_mask64(__mmask64 A) { - // CIR-LABEL: test_kshiftri_mask64 - // CIR: [[VAL:%.*]] = cir.cast bitcast %{{.*}} : !u64i -> !cir.vector<64 x !cir.int> - // CIR: [[SHIFT:%.*]] = cir.const #cir.zero : !cir.vector<64 x !cir.int> - // CIR: %{{.*}} = cir.vec.shuffle([[VAL]], [[SHIFT]] : !cir.vector<64 x !cir.int>) [#cir.int<32> : !s32i, #cir.int<33> : !s32i, #cir.int<34> : !s32i, #cir.int<35> : !s32i, #cir.int<36> : !s32i, #cir.int<37> : !s32i, #cir.int<38> : !s32i, #cir.int<39> : !s32i, #cir.int<40> : !s32i, #cir.int<41> : !s32i, #cir.int<42> : !s32i, #cir.int<43> : !s32i, #cir.int<44> : !s32i, #cir.int<45> : !s32i, #cir.int<46> : !s32i, #cir.int<47> : !s32i, #cir.int<48> : !s32i, #cir.int<49> : !s32i, #cir.int<50> : !s32i, #cir.int<51> : !s32i, #cir.int<52> : !s32i, #cir.int<53> : !s32i, #cir.int<54> : !s32i, #cir.int<55> : !s32i, #cir.int<56> : !s32i, #cir.int<57> : !s32i, #cir.int<58> : !s32i, #cir.int<59> : !s32i, #cir.int<60> : !s32i, #cir.int<61> : !s32i, #cir.int<62> : !s32i, #cir.int<63> : !s32i, #cir.int<64> : !s32i, #cir.int<65> : !s32i, #cir.int<66> : !s32i, #cir.int<67> : !s32i, #cir.int<68> : !s32i, #cir.int<69> : !s32i, #cir.int<70> : !s32i, #cir.int<71> : !s32i, #cir.int<72> : !s32i, #cir.int<73> : !s32i, #cir.int<74> : !s32i, #cir.int<75> : !s32i, #cir.int<76> : !s32i, #cir.int<77> : !s32i, #cir.int<78> : !s32i, #cir.int<79> : !s32i, #cir.int<80> : !s32i, #cir.int<81> : !s32i, #cir.int<82> : !s32i, #cir.int<83> : !s32i, #cir.int<84> : !s32i, #cir.int<85> : !s32i, #cir.int<86> : !s32i, #cir.int<87> : !s32i, #cir.int<88> : !s32i, #cir.int<89> : !s32i, #cir.int<90> : !s32i, #cir.int<91> : !s32i, #cir.int<92> : !s32i, #cir.int<93> : !s32i, #cir.int<94> : !s32i, #cir.int<95> : !s32i] : !cir.vector<64 x !cir.int> - - // LLVM-LABEL: test_kshiftri_mask64 - // LLVM: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1> - // LLVM: [[RES:%.*]] = shufflevector <64 x i1> [[VAL]], <64 x i1> zeroinitializer, <64 x i32> - - // OGCG-LABEL: test_kshiftri_mask64 - // OGCG: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1> - // OGCG: [[RES:%.*]] = shufflevector <64 x i1> [[VAL]], <64 x i1> zeroinitializer, <64 x i32> - return _kshiftri_mask64(A, 32); -} - -__mmask32 test_kshiftli_mask32_out_of_range(__mmask32 A) { - // CIR-LABEL: test_kshiftli_mask32_out_of_range - // CIR: [[VAL:%.*]] = cir.const #cir.int<0> : !u32i - // CIR: cir.store [[VAL]], {{%.*}} : !u32i, !cir.ptr - // CIR: [[RES:%.*]] = cir.load {{%.*}} : !cir.ptr, !u32i - // CIR: cir.return [[RES]] : !u32i - - // LLVM-LABEL: test_kshiftli_mask32_out_of_range - // LLVM: store i32 0, ptr [[VAL:%.*]], align 4 - // LLVM: [[RES:%.*]] = load i32, ptr [[VAL]], align 4 - // LLVM: ret i32 [[RES]] - - // OGCG-LABEL: test_kshiftli_mask32_out_of_range - // OGCG: ret i32 0 - - return _kshiftli_mask32(A, 33); -} - -__mmask32 test_kshiftri_mask32_out_of_range(__mmask32 A) { - // CIR-LABEL: test_kshiftri_mask32_out_of_range - // CIR: [[VAL:%.*]] = cir.const #cir.int<0> : !u32i - // CIR: cir.store [[VAL]], {{%.*}} : !u32i, !cir.ptr - // CIR: [[RES:%.*]] = cir.load {{%.*}} : !cir.ptr, !u32i - // CIR: cir.return [[RES]] : !u32i - - // LLVM-LABEL: test_kshiftri_mask32_out_of_range - // LLVM: store i32 0, ptr [[VAL:%.*]], align 4 - // LLVM: [[RES:%.*]] = load i32, ptr [[VAL]], align 4 - // LLVM: ret i32 [[RES]] - - // OGCG-LABEL: test_kshiftri_mask32_out_of_range - // OGCG: ret i32 0 - - return _kshiftri_mask32(A, 33); -} diff --git a/clang/test/CIR/CodeGen/X86/avx512f-builtins.c b/clang/test/CIR/CodeGen/X86/avx512f-builtins.c deleted file mode 100644 index dc54a87856a7c..0000000000000 --- a/clang/test/CIR/CodeGen/X86/avx512f-builtins.c +++ /dev/null @@ -1,79 +0,0 @@ -// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512f -fclangir -emit-cir -o %t.cir -Wall -Werror -Wsign-conversion -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512f -fclangir -emit-llvm -o %t.ll -Wall -Werror -Wsign-conversion -// RUN: FileCheck --check-prefixes=LLVM --input-file=%t.ll %s - -// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512f -fclangir -emit-cir -o %t.cir -Wall -Werror -Wsign-conversion -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512f -fclangir -emit-llvm -o %t.ll -Wall -Werror -Wsign-conversion -// RUN: FileCheck --check-prefixes=LLVM --input-file=%t.ll %s - -// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s --check-prefixes=OGCG -// RUN: %clang_cc1 -x c -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s --check-prefixes=OGCG -// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s --check-prefixes=OGCG -// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s --check-prefixes=OGCG - -#include - -__m512 test_mm512_undefined(void) { - // CIR-LABEL: _mm512_undefined - // CIR: %[[A:.*]] = cir.const #cir.zero : !cir.vector<8 x !cir.double> - // CIR: %{{.*}} = cir.cast bitcast %[[A]] : !cir.vector<8 x !cir.double> -> !cir.vector<16 x !cir.float> - // CIR: cir.return %{{.*}} : !cir.vector<16 x !cir.float> - - // LLVM-LABEL: test_mm512_undefined - // LLVM: store <16 x float> zeroinitializer, ptr %[[A:.*]], align 64 - // LLVM: %{{.*}} = load <16 x float>, ptr %[[A]], align 64 - // LLVM: ret <16 x float> %{{.*}} - - // OGCG-LABEL: test_mm512_undefined - // OGCG: ret <16 x float> zeroinitializer - return _mm512_undefined(); -} - -__m512 test_mm512_undefined_ps(void) { - // CIR-LABEL: _mm512_undefined_ps - // CIR: %[[A:.*]] = cir.const #cir.zero : !cir.vector<8 x !cir.double> - // CIR: %{{.*}} = cir.cast bitcast %[[A]] : !cir.vector<8 x !cir.double> -> !cir.vector<16 x !cir.float> - // CIR: cir.return %{{.*}} : !cir.vector<16 x !cir.float> - - // LLVM-LABEL: test_mm512_undefined_ps - // LLVM: store <16 x float> zeroinitializer, ptr %[[A:.*]], align 64 - // LLVM: %{{.*}} = load <16 x float>, ptr %[[A]], align 64 - // LLVM: ret <16 x float> %{{.*}} - - // OGCG-LABEL: test_mm512_undefined_ps - // OGCG: ret <16 x float> zeroinitializer - return _mm512_undefined_ps(); -} - -__m512d test_mm512_undefined_pd(void) { - // CIR-LABEL: _mm512_undefined_pd - // CIR: %{{.*}} = cir.const #cir.zero : !cir.vector<8 x !cir.double> - // CIR: cir.return %{{.*}} : !cir.vector<8 x !cir.double> - - // LLVM-LABEL: test_mm512_undefined_pd - // LLVM: store <8 x double> zeroinitializer, ptr %[[A:.*]], align 64 - // LLVM: %{{.*}} = load <8 x double>, ptr %[[A]], align 64 - // LLVM: ret <8 x double> %{{.*}} - - // OGCG-LABEL: test_mm512_undefined_pd - // OGCG: ret <8 x double> zeroinitializer - return _mm512_undefined_pd(); -} - -__m512i test_mm512_undefined_epi32(void) { - // CIR-LABEL: _mm512_undefined_epi32 - // CIR: %[[A:.*]] = cir.const #cir.zero : !cir.vector<8 x !cir.double> - // CIR: %{{.*}} = cir.cast bitcast %[[A]] : !cir.vector<8 x !cir.double> -> !cir.vector<8 x !s64i> - // CIR: cir.return %{{.*}} : !cir.vector<8 x !s64i> - - // LLVM-LABEL: test_mm512_undefined_epi32 - // LLVM: store <8 x i64> zeroinitializer, ptr %[[A:.*]], align 64 - // LLVM: %{{.*}} = load <8 x i64>, ptr %[[A]], align 64 - // LLVM: ret <8 x i64> %{{.*}} - - // OGCG-LABEL: test_mm512_undefined_epi32 - // OGCG: ret <8 x i64> zeroinitializer - return _mm512_undefined_epi32(); -} diff --git a/clang/test/CIR/CodeGen/copy-constructor.cpp b/clang/test/CIR/CodeGen/copy-constructor.cpp new file mode 100644 index 0000000000000..be05bd582d6f0 --- /dev/null +++ b/clang/test/CIR/CodeGen/copy-constructor.cpp @@ -0,0 +1,52 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-llvm %s -o %t-cir.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s + +struct HasScalarArrayMember { + int arr[2][2]; + HasScalarArrayMember(const HasScalarArrayMember &); +}; + +HasScalarArrayMember::HasScalarArrayMember(const HasScalarArrayMember &) = default; + +// CIR-LABEL: cir.func dso_local @_ZN20HasScalarArrayMemberC2ERKS_( +// CIR-NEXT: %[[THIS:.*]] = cir.alloca !cir.ptr +// CIR-NEXT: %[[OTHER:.*]] = cir.alloca !cir.ptr +// CIR-NEXT: cir.store %arg0, %[[THIS]] +// CIR-NEXT: cir.store %arg1, %[[OTHER]] +// CIR-NEXT: %[[THIS_LOAD:.*]] = cir.load{{.*}} %[[THIS]] +// CIR-NEXT: %[[THIS_ARR:.*]] = cir.get_member %[[THIS_LOAD]][0] {name = "arr"} +// CIR-NEXT: %[[OTHER_LOAD:.*]] = cir.load{{.*}} %[[OTHER]] +// CIR-NEXT: %[[OTHER_ARR:.*]] = cir.get_member %[[OTHER_LOAD]][0] {name = "arr"} +// CIR-NEXT: cir.copy %[[OTHER_ARR]] to %[[THIS_ARR]] : !cir.ptr x 2>> +// CIR-NEXT: cir.return + +// LLVM-LABEL: define {{.*}} @_ZN20HasScalarArrayMemberC2ERKS_( +// LLVM-SAME: ptr %[[ARG0:.*]], ptr %[[ARG1:.*]]) +// LLVM-NEXT: %[[THIS:.*]] = alloca ptr +// LLVM-NEXT: %[[OTHER:.*]] = alloca ptr +// LLVM-NEXT: store ptr %[[ARG0]], ptr %[[THIS]] +// LLVM-NEXT: store ptr %[[ARG1]], ptr %[[OTHER]] +// LLVM-NEXT: %[[THIS_LOAD:.*]] = load ptr, ptr %[[THIS]] +// LLVM-NEXT: %[[THIS_ARR:.*]] = getelementptr %struct.HasScalarArrayMember, ptr %[[THIS_LOAD]], i32 0, i32 0 +// LLVM-NEXT: %[[OTHER_LOAD:.*]] = load ptr, ptr %[[OTHER]] +// LLVM-NEXT: %[[OTHER_ARR:.*]] = getelementptr %struct.HasScalarArrayMember, ptr %[[OTHER_LOAD]], i32 0, i32 0 +// LLVM-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr %[[THIS_ARR]], ptr %[[OTHER_ARR]], i32 16, i1 false) +// LLVM-NEXT: ret void + +// OGCG-LABEL: define {{.*}} @_ZN20HasScalarArrayMemberC2ERKS_( +// OGCG-SAME: ptr {{.*}} %[[ARG0:.*]], ptr {{.*}} %[[ARG1:.*]]) +// OGCG-NEXT: entry: +// OGCG-NEXT: %[[THIS:.*]] = alloca ptr +// OGCG-NEXT: %[[OTHER:.*]] = alloca ptr +// OGCG-NEXT: store ptr %[[ARG0]], ptr %[[THIS]] +// OGCG-NEXT: store ptr %[[ARG1]], ptr %[[OTHER]] +// OGCG-NEXT: %[[THIS_LOAD:.*]] = load ptr, ptr %[[THIS]] +// OGCG-NEXT: %[[THIS_ARR:.*]] = getelementptr inbounds nuw %struct.HasScalarArrayMember, ptr %[[THIS_LOAD]], i32 0, i32 0 +// OGCG-NEXT: %[[OTHER_LOAD:.*]] = load ptr, ptr %[[OTHER]] +// OGCG-NEXT: %[[OTHER_ARR:.*]] = getelementptr inbounds nuw %struct.HasScalarArrayMember, ptr %[[OTHER_LOAD]], i32 0, i32 0 +// OGCG-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr {{.*}} %[[THIS_ARR]], ptr {{.*}} %[[OTHER_ARR]], i64 16, i1 false) +// OGCG-NEXT: ret void diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 01e0786fbda71..4843f2433fa64 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -111,6 +111,9 @@ co_invoke_fn co_invoke; // CIR-DAG: ![[VoidPromisse:.*]] = !cir.record::promise_type" padded {!u8i}> // CIR-DAG: ![[IntPromisse:.*]] = !cir.record::promise_type" padded {!u8i}> // CIR-DAG: ![[StdString:.*]] = !cir.record +// CIR-DAG: ![[CoroHandleVoid:.*]] = !cir.record" padded {!u8i}> +// CIR-DAG: ![[CoroHandlePromiseVoid:rec_.*]] = !cir.record::promise_type>" padded {!u8i}> +// CIR-DAG: ![[CoroHandlePromiseInt:rec_.*]] = !cir.record::promise_type>" padded {!u8i}> // CIR-DAG: ![[SuspendAlways:.*]] = !cir.record // CIR: module {{.*}} { @@ -160,6 +163,8 @@ VoidTask silly_task() { // CIR: cir.scope { // CIR: %[[SuspendAlwaysAddr:.*]] = cir.alloca ![[SuspendAlways]], {{.*}} ["ref.tmp0"] {alignment = 1 : i64} +// CIR: %[[CoroHandleVoidAddr:.*]] = cir.alloca ![[CoroHandleVoid]], {{.*}} ["agg.tmp0"] {alignment = 1 : i64} +// CIR: %[[CoroHandlePromiseAddr:.*]] = cir.alloca ![[CoroHandlePromiseVoid]], {{.*}} ["agg.tmp1"] {alignment = 1 : i64} // Effectively execute `coawait promise_type::initial_suspend()` by calling initial_suspend() and getting // the suspend_always struct to use for cir.await. Note that we return by-value since we defer ABI lowering @@ -175,8 +180,28 @@ VoidTask silly_task() { // First regions `ready` has a special cir.yield code to veto suspension. // CIR: cir.await(init, ready : { -// CIR: cir.condition({{.*}}) +// CIR: %[[ReadyVeto:.*]] = cir.scope { +// CIR: %[[TmpCallRes:.*]] = cir.call @_ZNSt14suspend_always11await_readyEv(%[[SuspendAlwaysAddr]]) +// CIR: cir.yield %[[TmpCallRes:.*]] : !cir.bool +// CIR: } +// CIR: cir.condition(%[[ReadyVeto]]) + +// Second region `suspend` contains the actual suspend logic. +// +// - Start by getting the coroutine handle using from_address(). +// - Implicit convert coroutine handle from task specific promisse +// specialization to a void one. +// - Call suspend_always::await_suspend() passing the handle. +// +// FIXME: add veto support for non-void await_suspends. + // CIR: }, suspend : { +// CIR: %[[FromAddrRes:.*]] = cir.call @_ZNSt16coroutine_handleIN5folly4coro4TaskIvE12promise_typeEE12from_addressEPv(%[[CoroFrameAddr]]) +// CIR: cir.store{{.*}} %[[FromAddrRes]], %[[CoroHandlePromiseAddr]] : ![[CoroHandlePromiseVoid]] +// CIR: %[[CoroHandlePromiseReload:.*]] = cir.load{{.*}} %[[CoroHandlePromiseAddr]] +// CIR: cir.call @_ZNSt16coroutine_handleIvEC1IN5folly4coro4TaskIvE12promise_typeEEES_IT_E(%[[CoroHandleVoidAddr]], %[[CoroHandlePromiseReload]]) +// CIR: %[[CoroHandleVoidReload:.*]] = cir.load{{.*}} %[[CoroHandleVoidAddr]] : !cir.ptr, ![[CoroHandleVoid]] +// CIR: cir.call @_ZNSt14suspend_always13await_suspendESt16coroutine_handleIvE(%[[SuspendAlwaysAddr]], %[[CoroHandleVoidReload]]) // CIR: cir.yield // CIR: }, resume : { // CIR: cir.yield @@ -203,11 +228,23 @@ folly::coro::Task byRef(const std::string& s) { // CIR: cir.store {{.*}} %[[RetObj]], %[[IntTaskAddr]] : ![[IntTask]] // CIR: cir.scope { // CIR: %[[SuspendAlwaysAddr:.*]] = cir.alloca ![[SuspendAlways]], {{.*}} ["ref.tmp0"] {alignment = 1 : i64} +// CIR: %[[CoroHandleVoidAddr:.*]] = cir.alloca ![[CoroHandleVoid]], {{.*}} ["agg.tmp0"] {alignment = 1 : i64} +// CIR: %[[CoroHandlePromiseAddr:.*]] = cir.alloca ![[CoroHandlePromiseInt]], {{.*}} ["agg.tmp1"] {alignment = 1 : i64} // CIR: %[[Tmp0:.*]] = cir.call @_ZN5folly4coro4TaskIiE12promise_type15initial_suspendEv(%[[IntPromisseAddr]]) // CIR: cir.await(init, ready : { -// CIR: cir.condition({{.*}}) +// CIR: %[[ReadyVeto:.*]] = cir.scope { +// CIR: %[[TmpCallRes:.*]] = cir.call @_ZNSt14suspend_always11await_readyEv(%[[SuspendAlwaysAddr]]) +// CIR: cir.yield %[[TmpCallRes:.*]] : !cir.bool +// CIR: } +// CIR: cir.condition(%[[ReadyVeto]]) // CIR: }, suspend : { -// CIR: cir.yield +// CIR: %[[FromAddrRes:.*]] = cir.call @_ZNSt16coroutine_handleIN5folly4coro4TaskIiE12promise_typeEE12from_addressEPv(%[[CoroFrameAddr:.*]]) +// CIR: cir.store{{.*}} %[[FromAddrRes]], %[[CoroHandlePromiseAddr]] : ![[CoroHandlePromiseInt]] +// CIR: %[[CoroHandlePromiseReload:.*]] = cir.load{{.*}} %[[CoroHandlePromiseAddr]] +// CIR: cir.call @_ZNSt16coroutine_handleIvEC1IN5folly4coro4TaskIiE12promise_typeEEES_IT_E(%[[CoroHandleVoidAddr]], %[[CoroHandlePromiseReload]]) +// CIR: %[[CoroHandleVoidReload:.*]] = cir.load{{.*}} %[[CoroHandleVoidAddr]] : !cir.ptr, ![[CoroHandleVoid]] +// CIR: cir.call @_ZNSt14suspend_always13await_suspendESt16coroutine_handleIvE(%[[SuspendAlwaysAddr]], %[[CoroHandleVoidReload]]) +// CIR: cir.yield // CIR: }, resume : { // CIR: cir.yield // CIR: },) diff --git a/clang/test/CIR/CodeGen/count-of.c b/clang/test/CIR/CodeGen/count-of.c new file mode 100644 index 0000000000000..1fd1290c42e6b --- /dev/null +++ b/clang/test/CIR/CodeGen/count-of.c @@ -0,0 +1,52 @@ +// RUN: %clang_cc1 -std=c2y -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -std=c2y -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll +// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM +// RUN: %clang_cc1 -std=c2y -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG + +unsigned long vla_with_array_element_type_with_const_size() { + long size; + return _Countof(int[5][size]); +} + +// CIR: %[[RET_ADDR:.*]] = cir.alloca !u64i, !cir.ptr, ["__retval"] +// CIR: %[[SIZE_ADDR:.*]] = cir.alloca !s64i, !cir.ptr, ["size"] +// CIR: %[[CONST_5:.*]] = cir.const #cir.int<5> : !u64i +// CIR: cir.store %[[CONST_5]], %[[RET_ADDR]] : !u64i, !cir.ptr +// CIR: %[[RET_VAL:.*]] = cir.load %[[RET_ADDR]] : !cir.ptr, !u64i +// CIR: cir.return %[[RET_VAL]] : !u64i + +// LLVM: %[[RET_ADDR:.*]] = alloca i64, i64 1, align 8 +// LLVM: %[[SIZE_ADDR:.*]] = alloca i64, i64 1, align 8 +// LLVM: store i64 5, ptr %[[RET_ADDR]], align 8 +// LLVM: %[[RET_VAL:.*]] = load i64, ptr %[[RET_ADDR]], align 8 +// LLVM: ret i64 %[[RET_VAL]] + +// OGCG: %[[SIZE_ADDR:.*]] = alloca i64, align 8 +// OGCG: ret i64 5 + +unsigned long vla_with_array_element_type_non_const_size() { + long size; + return _Countof(int[size][size]); +} + +// CIR: %[[REET_ADDR:.*]] = cir.alloca !u64i, !cir.ptr, ["__retval"] +// CIR: %[[SIZE_ADDR:.*]] = cir.alloca !s64i, !cir.ptr, ["size"] +// CIR: %[[TMP_SIZE:.*]] = cir.load {{.*}} %[[SIZE_ADDR]] : !cir.ptr, !s64i +// CIR: %[[TMP_SIZE_U64:.*]] = cir.cast integral %[[TMP_SIZE]] : !s64i -> !u64i +// CIR: cir.store %[[TMP_SIZE_U64]], %[[RET_ADDR]] : !u64i, !cir.ptr +// CIR: %[[TMP_RET:.*]] = cir.load %[[RET_ADDR]] : !cir.ptr, !u64i +// CIR: cir.return %[[TMP_RET]] : !u64i + +// LLVM: %[[RET_ADDR:.*]] = alloca i64, i64 1, align 8 +// LLVM: %[[SIZE_ADDR:.*]] = alloca i64, i64 1, align 8 +// LLVM: %[[TMP_SIZE:.*]] = load i64, ptr %[[SIZE_ADDR]], align 8 +// LLVM: store i64 %[[TMP_SIZE]], ptr %[[RET_ADDR]], align 8 +// LLVM: %[[TMP_RET:.*]] = load i64, ptr %[[RET_ADDR]], align 8 +// LLVM: ret i64 %[[TMP_RET]] + +// OGCG: %[[SIZE_ADDR:.*]] = alloca i64, align 8 +// OGCG: %[[TMP_SIZE:.*]] = load i64, ptr %[[SIZE_ADDR]], align 8 +// OGCG: %[[TMP_SIZE_2:.*]] = load i64, ptr %[[SIZE_ADDR]], align 8 +// OGCG: ret i64 %[[TMP_SIZE]] diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 91380b9bea296..1d06496a85530 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -8,6 +8,39 @@ // We declare anonymous record types to represent lambdas. Rather than trying to // to match the declarations, we establish variables for these when they are used. +auto global_lambda = [](){}; +void use_global_lambda() { + global_lambda(); +} + +// CIR: cir.global "private" internal dso_local @global_lambda = #cir.undef : ![[REC_LAM_GLOBAL_LAMBDA:.*]] {alignment = 1 : i64} +// CIR: cir.func lambda internal private dso_local @_ZNK3$_0clEv(%[[THIS_ARG:.*]]: !cir.ptr {{.*}}) +// CIR: %[[THIS:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] +// CIR: cir.store %[[THIS_ARG]], %[[THIS]] +// CIR: cir.load %[[THIS]] +// +// CIR: cir.func {{.*}} @_Z17use_global_lambdav() +// CIR: %[[LAMBDA:.*]] = cir.get_global @global_lambda : !cir.ptr +// CIR: cir.call @_ZNK3$_0clEv(%[[LAMBDA]]) : (!cir.ptr) -> () + +// LLVM: @global_lambda = internal global %[[REC_LAM_GLOBAL_LAMBDA:.*]] undef, align 1 +// LLVM: define internal void @"_ZNK3$_0clEv"(ptr %[[THIS_ARG:.*]]) +// LLVM: %[[THIS_ADDR:.*]] = alloca ptr +// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// +// LLVM: define dso_local void @_Z17use_global_lambdav() +// LLVM: call void @"_ZNK3$_0clEv"(ptr @global_lambda) + +// OGCG: @global_lambda = internal global %[[REC_LAM_GLOBAL_LAMBDA:.*]] undef, align 1 +// OGCG: define dso_local void @_Z17use_global_lambdav() +// OGCG: call void @"_ZNK3$_0clEv"(ptr noundef nonnull align 1 dereferenceable(1) @global_lambda) +// +// OGCG: define internal void @"_ZNK3$_0clEv"(ptr {{.*}} %[[THIS_ARG:.*]]) +// OGCG: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] + void fn() { auto a = [](){}; a(); diff --git a/clang/test/CIR/CodeGen/X86/avx-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx-builtins.c similarity index 100% rename from clang/test/CIR/CodeGen/X86/avx-builtins.c rename to clang/test/CIR/CodeGenBuiltins/X86/avx-builtins.c diff --git a/clang/test/CIR/CodeGen/X86/avx10_2_512bf16-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx10_2_512bf16-builtins.c similarity index 100% rename from clang/test/CIR/CodeGen/X86/avx10_2_512bf16-builtins.c rename to clang/test/CIR/CodeGenBuiltins/X86/avx10_2_512bf16-builtins.c diff --git a/clang/test/CIR/CodeGen/X86/avx10_2bf16-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx10_2bf16-builtins.c similarity index 100% rename from clang/test/CIR/CodeGen/X86/avx10_2bf16-builtins.c rename to clang/test/CIR/CodeGenBuiltins/X86/avx10_2bf16-builtins.c diff --git a/clang/test/CIR/CodeGenBuiltins/X86/avx512bw-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx512bw-builtins.c new file mode 100644 index 0000000000000..4863ba0bd8848 --- /dev/null +++ b/clang/test/CIR/CodeGenBuiltins/X86/avx512bw-builtins.c @@ -0,0 +1,467 @@ +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -fclangir -emit-cir -o %t.cir -Wall -Werror +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -fclangir -emit-llvm -o %t.ll -Wall -Werror +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -fno-signed-char -fclangir -emit-cir -o %t.cir -Wall -Werror +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -fno-signed-char -fclangir -emit-llvm -o %t.ll -Wall -Werror +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -emit-llvm -o - -Wall -Werror | FileCheck %s -check-prefix=OGCG +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -emit-llvm -o - -Wall -Werror | FileCheck %s -check-prefix=OGCG + +// This test mimics clang/test/CodeGen/X86/avx512bw-builtins.c, which eventually +// CIR shall be able to support fully. + +#include + +__mmask32 test_kshiftli_mask32(__mmask32 A) { + // CIR-LABEL: test_kshiftli_mask32 + // CIR: [[VAL:%.*]] = cir.cast bitcast %{{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: [[SHIFT:%.*]] = cir.const #cir.zero : !cir.vector<32 x !cir.int> + // CIR: %{{.*}} = cir.vec.shuffle([[SHIFT]], [[VAL]] : !cir.vector<32 x !cir.int>) [#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i, #cir.int<4> : !s32i, #cir.int<5> : !s32i, #cir.int<6> : !s32i, #cir.int<7> : !s32i, #cir.int<8> : !s32i, #cir.int<9> : !s32i, #cir.int<10> : !s32i, #cir.int<11> : !s32i, #cir.int<12> : !s32i, #cir.int<13> : !s32i, #cir.int<14> : !s32i, #cir.int<15> : !s32i, #cir.int<16> : !s32i, #cir.int<17> : !s32i, #cir.int<18> : !s32i, #cir.int<19> : !s32i, #cir.int<20> : !s32i, #cir.int<21> : !s32i, #cir.int<22> : !s32i, #cir.int<23> : !s32i, #cir.int<24> : !s32i, #cir.int<25> : !s32i, #cir.int<26> : !s32i, #cir.int<27> : !s32i, #cir.int<28> : !s32i, #cir.int<29> : !s32i, #cir.int<30> : !s32i, #cir.int<31> : !s32i, #cir.int<32> : !s32i] : !cir.vector<32 x !cir.int> + + // LLVM-LABEL: test_kshiftli_mask32 + // LLVM: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[RES:%.*]] = shufflevector <32 x i1> zeroinitializer, <32 x i1> [[VAL]], <32 x i32> + + // OGCG-LABEL: test_kshiftli_mask32 + // OGCG: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // OGCG: [[RES:%.*]] = shufflevector <32 x i1> zeroinitializer, <32 x i1> [[VAL]], <32 x i32> + return _kshiftli_mask32(A, 31); +} + +__mmask32 test_kshiftri_mask32(__mmask32 A) { + // CIR-LABEL: test_kshiftri_mask32 + // CIR: [[VAL:%.*]] = cir.cast bitcast %{{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: [[SHIFT:%.*]] = cir.const #cir.zero : !cir.vector<32 x !cir.int> + // CIR: %{{.*}} = cir.vec.shuffle([[VAL]], [[SHIFT]] : !cir.vector<32 x !cir.int>) [#cir.int<31> : !s32i, #cir.int<32> : !s32i, #cir.int<33> : !s32i, #cir.int<34> : !s32i, #cir.int<35> : !s32i, #cir.int<36> : !s32i, #cir.int<37> : !s32i, #cir.int<38> : !s32i, #cir.int<39> : !s32i, #cir.int<40> : !s32i, #cir.int<41> : !s32i, #cir.int<42> : !s32i, #cir.int<43> : !s32i, #cir.int<44> : !s32i, #cir.int<45> : !s32i, #cir.int<46> : !s32i, #cir.int<47> : !s32i, #cir.int<48> : !s32i, #cir.int<49> : !s32i, #cir.int<50> : !s32i, #cir.int<51> : !s32i, #cir.int<52> : !s32i, #cir.int<53> : !s32i, #cir.int<54> : !s32i, #cir.int<55> : !s32i, #cir.int<56> : !s32i, #cir.int<57> : !s32i, #cir.int<58> : !s32i, #cir.int<59> : !s32i, #cir.int<60> : !s32i, #cir.int<61> : !s32i, #cir.int<62> : !s32i] : !cir.vector<32 x !cir.int> + + // LLVM-LABEL: test_kshiftri_mask32 + // LLVM: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[RES:%.*]] = shufflevector <32 x i1> [[VAL]], <32 x i1> zeroinitializer, <32 x i32> + + // OGCG-LABEL: test_kshiftri_mask32 + // OGCG: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // OGCG: [[RES:%.*]] = shufflevector <32 x i1> [[VAL]], <32 x i1> zeroinitializer, <32 x i32> + return _kshiftri_mask32(A, 31); +} + +__mmask64 test_kshiftli_mask64(__mmask64 A) { + // CIR-LABEL: test_kshiftli_mask64 + // CIR: [[VAL:%.*]] = cir.cast bitcast %{{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: [[SHIFT:%.*]] = cir.const #cir.zero : !cir.vector<64 x !cir.int> + // CIR: %{{.*}} = cir.vec.shuffle([[SHIFT]], [[VAL]] : !cir.vector<64 x !cir.int>) [#cir.int<32> : !s32i, #cir.int<33> : !s32i, #cir.int<34> : !s32i, #cir.int<35> : !s32i, #cir.int<36> : !s32i, #cir.int<37> : !s32i, #cir.int<38> : !s32i, #cir.int<39> : !s32i, #cir.int<40> : !s32i, #cir.int<41> : !s32i, #cir.int<42> : !s32i, #cir.int<43> : !s32i, #cir.int<44> : !s32i, #cir.int<45> : !s32i, #cir.int<46> : !s32i, #cir.int<47> : !s32i, #cir.int<48> : !s32i, #cir.int<49> : !s32i, #cir.int<50> : !s32i, #cir.int<51> : !s32i, #cir.int<52> : !s32i, #cir.int<53> : !s32i, #cir.int<54> : !s32i, #cir.int<55> : !s32i, #cir.int<56> : !s32i, #cir.int<57> : !s32i, #cir.int<58> : !s32i, #cir.int<59> : !s32i, #cir.int<60> : !s32i, #cir.int<61> : !s32i, #cir.int<62> : !s32i, #cir.int<63> : !s32i, #cir.int<64> : !s32i, #cir.int<65> : !s32i, #cir.int<66> : !s32i, #cir.int<67> : !s32i, #cir.int<68> : !s32i, #cir.int<69> : !s32i, #cir.int<70> : !s32i, #cir.int<71> : !s32i, #cir.int<72> : !s32i, #cir.int<73> : !s32i, #cir.int<74> : !s32i, #cir.int<75> : !s32i, #cir.int<76> : !s32i, #cir.int<77> : !s32i, #cir.int<78> : !s32i, #cir.int<79> : !s32i, #cir.int<80> : !s32i, #cir.int<81> : !s32i, #cir.int<82> : !s32i, #cir.int<83> : !s32i, #cir.int<84> : !s32i, #cir.int<85> : !s32i, #cir.int<86> : !s32i, #cir.int<87> : !s32i, #cir.int<88> : !s32i, #cir.int<89> : !s32i, #cir.int<90> : !s32i, #cir.int<91> : !s32i, #cir.int<92> : !s32i, #cir.int<93> : !s32i, #cir.int<94> : !s32i, #cir.int<95> : !s32i] : !cir.vector<64 x !cir.int> + + // LLVM-LABEL: test_kshiftli_mask64 + // LLVM: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[RES:%.*]] = shufflevector <64 x i1> zeroinitializer, <64 x i1> [[VAL]], <64 x i32> + + // OGCG-LABEL: test_kshiftli_mask64 + // OGCG: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // OGCG: [[RES:%.*]] = shufflevector <64 x i1> zeroinitializer, <64 x i1> [[VAL]], <64 x i32> + return _kshiftli_mask64(A, 32); +} + +__mmask64 test_kshiftri_mask64(__mmask64 A) { + // CIR-LABEL: test_kshiftri_mask64 + // CIR: [[VAL:%.*]] = cir.cast bitcast %{{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: [[SHIFT:%.*]] = cir.const #cir.zero : !cir.vector<64 x !cir.int> + // CIR: %{{.*}} = cir.vec.shuffle([[VAL]], [[SHIFT]] : !cir.vector<64 x !cir.int>) [#cir.int<32> : !s32i, #cir.int<33> : !s32i, #cir.int<34> : !s32i, #cir.int<35> : !s32i, #cir.int<36> : !s32i, #cir.int<37> : !s32i, #cir.int<38> : !s32i, #cir.int<39> : !s32i, #cir.int<40> : !s32i, #cir.int<41> : !s32i, #cir.int<42> : !s32i, #cir.int<43> : !s32i, #cir.int<44> : !s32i, #cir.int<45> : !s32i, #cir.int<46> : !s32i, #cir.int<47> : !s32i, #cir.int<48> : !s32i, #cir.int<49> : !s32i, #cir.int<50> : !s32i, #cir.int<51> : !s32i, #cir.int<52> : !s32i, #cir.int<53> : !s32i, #cir.int<54> : !s32i, #cir.int<55> : !s32i, #cir.int<56> : !s32i, #cir.int<57> : !s32i, #cir.int<58> : !s32i, #cir.int<59> : !s32i, #cir.int<60> : !s32i, #cir.int<61> : !s32i, #cir.int<62> : !s32i, #cir.int<63> : !s32i, #cir.int<64> : !s32i, #cir.int<65> : !s32i, #cir.int<66> : !s32i, #cir.int<67> : !s32i, #cir.int<68> : !s32i, #cir.int<69> : !s32i, #cir.int<70> : !s32i, #cir.int<71> : !s32i, #cir.int<72> : !s32i, #cir.int<73> : !s32i, #cir.int<74> : !s32i, #cir.int<75> : !s32i, #cir.int<76> : !s32i, #cir.int<77> : !s32i, #cir.int<78> : !s32i, #cir.int<79> : !s32i, #cir.int<80> : !s32i, #cir.int<81> : !s32i, #cir.int<82> : !s32i, #cir.int<83> : !s32i, #cir.int<84> : !s32i, #cir.int<85> : !s32i, #cir.int<86> : !s32i, #cir.int<87> : !s32i, #cir.int<88> : !s32i, #cir.int<89> : !s32i, #cir.int<90> : !s32i, #cir.int<91> : !s32i, #cir.int<92> : !s32i, #cir.int<93> : !s32i, #cir.int<94> : !s32i, #cir.int<95> : !s32i] : !cir.vector<64 x !cir.int> + + // LLVM-LABEL: test_kshiftri_mask64 + // LLVM: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[RES:%.*]] = shufflevector <64 x i1> [[VAL]], <64 x i1> zeroinitializer, <64 x i32> + + // OGCG-LABEL: test_kshiftri_mask64 + // OGCG: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // OGCG: [[RES:%.*]] = shufflevector <64 x i1> [[VAL]], <64 x i1> zeroinitializer, <64 x i32> + return _kshiftri_mask64(A, 32); +} + +__mmask32 test_kshiftli_mask32_out_of_range(__mmask32 A) { + // CIR-LABEL: test_kshiftli_mask32_out_of_range + // CIR: [[VAL:%.*]] = cir.const #cir.int<0> : !u32i + // CIR: cir.store [[VAL]], {{%.*}} : !u32i, !cir.ptr + // CIR: [[RES:%.*]] = cir.load {{%.*}} : !cir.ptr, !u32i + // CIR: cir.return [[RES]] : !u32i + + // LLVM-LABEL: test_kshiftli_mask32_out_of_range + // LLVM: store i32 0, ptr [[VAL:%.*]], align 4 + // LLVM: [[RES:%.*]] = load i32, ptr [[VAL]], align 4 + // LLVM: ret i32 [[RES]] + + // OGCG-LABEL: test_kshiftli_mask32_out_of_range + // OGCG: ret i32 0 + + return _kshiftli_mask32(A, 33); +} + +__mmask32 test_kshiftri_mask32_out_of_range(__mmask32 A) { + // CIR-LABEL: test_kshiftri_mask32_out_of_range + // CIR: [[VAL:%.*]] = cir.const #cir.int<0> : !u32i + // CIR: cir.store [[VAL]], {{%.*}} : !u32i, !cir.ptr + // CIR: [[RES:%.*]] = cir.load {{%.*}} : !cir.ptr, !u32i + // CIR: cir.return [[RES]] : !u32i + + // LLVM-LABEL: test_kshiftri_mask32_out_of_range + // LLVM: store i32 0, ptr [[VAL:%.*]], align 4 + // LLVM: [[RES:%.*]] = load i32, ptr [[VAL]], align 4 + // LLVM: ret i32 [[RES]] + + // OGCG-LABEL: test_kshiftri_mask32_out_of_range + // OGCG: ret i32 0 + + return _kshiftri_mask32(A, 33); +} + + +__mmask32 test_kadd_mask32(__mmask32 A, __mmask32 B) { + // CIR-LABEL: _kadd_mask32 + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.call_llvm_intrinsic "x86.avx512.kadd.d" + // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int> -> !u32i + + // LLVM-LABEL: _kadd_mask32 + // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[RES:%.*]] = call <32 x i1> @llvm.x86.avx512.kadd.d(<32 x i1> [[L]], <32 x i1> [[R]]) + // LLVM: bitcast <32 x i1> [[RES]] to i32 + + // OGCG-LABEL: _kadd_mask32 + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: call <32 x i1> @llvm.x86.avx512.kadd.d + // OGCG: bitcast <32 x i1> {{.*}} to i32 + return _kadd_mask32(A, B); +} + +__mmask64 test_kadd_mask64(__mmask64 A, __mmask64 B) { + // CIR-LABEL: _kadd_mask64 + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.call_llvm_intrinsic "x86.avx512.kadd.q" + // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int> -> !u64i + + // LLVM-LABEL: _kadd_mask64 + // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[RES:%.*]] = call <64 x i1> @llvm.x86.avx512.kadd.q(<64 x i1> [[L]], <64 x i1> [[R]]) + // LLVM: bitcast <64 x i1> [[RES]] to i64 + + // OGCG-LABEL: _kadd_mask64 + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: call <64 x i1> @llvm.x86.avx512.kadd.q + // OGCG: bitcast <64 x i1> {{.*}} to i64 + return _kadd_mask64(A, B); +} + +__mmask32 test_kand_mask32(__mmask32 A, __mmask32 B) { + // CIR-LABEL: _kand_mask32 + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int> -> !u32i + + // LLVM-LABEL: _kand_mask32 + // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[RES:%.*]] = and <32 x i1> [[L]], [[R]] + // LLVM: bitcast <32 x i1> [[RES]] to i32 + + // OGCG-LABEL: _kand_mask32 + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: and <32 x i1> + // OGCG: bitcast <32 x i1> {{.*}} to i32 + return _kand_mask32(A, B); +} + +__mmask64 test_kand_mask64(__mmask64 A, __mmask64 B) { + // CIR-LABEL: _kand_mask64 + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int> -> !u64i + + // LLVM-LABEL: _kand_mask64 + // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[RES:%.*]] = and <64 x i1> [[L]], [[R]] + // LLVM: bitcast <64 x i1> [[RES]] to i64 + + // OGCG-LABEL: _kand_mask64 + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: and <64 x i1> + // OGCG: bitcast <64 x i1> {{.*}} to i64 + return _kand_mask64(A, B); +} + +__mmask32 test_kandn_mask32(__mmask32 A, __mmask32 B) { + // CIR-LABEL: _kandn_mask32 + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<32 x !cir.int> + // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int> -> !u32i + + // LLVM-LABEL: _kandn_mask32 + // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: xor <32 x i1> [[L]], splat (i1 true) + // LLVM: and <32 x i1> + // LLVM: bitcast <32 x i1> {{.*}} to i32 + + // OGCG-LABEL: _kandn_mask32 + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: xor <32 x i1> + // OGCG: and <32 x i1> + // OGCG: bitcast <32 x i1> {{.*}} to i32 + return _kandn_mask32(A, B); +} + +__mmask64 test_kandn_mask64(__mmask64 A, __mmask64 B) { + // CIR-LABEL: _kandn_mask64 + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<64 x !cir.int> + // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int> -> !u64i + + // LLVM-LABEL: _kandn_mask64 + // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: xor <64 x i1> [[L]], splat (i1 true) + // LLVM: and <64 x i1> + // LLVM: bitcast <64 x i1> {{.*}} to i64 + + // OGCG-LABEL: _kandn_mask64 + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: xor <64 x i1> + // OGCG: and <64 x i1> + // OGCG: bitcast <64 x i1> {{.*}} to i64 + return _kandn_mask64(A, B); +} + +__mmask32 test_kor_mask32(__mmask32 A, __mmask32 B) { + // CIR-LABEL: _kor_mask32 + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.binop(or, {{.*}}, {{.*}}) : !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int> -> !u32i + + // LLVM-LABEL: _kor_mask32 + // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: or <32 x i1> [[L]], [[R]] + // LLVM: bitcast <32 x i1> {{.*}} to i32 + + // OGCG-LABEL: _kor_mask32 + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: or <32 x i1> + // OGCG: bitcast <32 x i1> {{.*}} to i32 + return _kor_mask32(A, B); +} + +__mmask64 test_kor_mask64(__mmask64 A, __mmask64 B) { + // CIR-LABEL: _kor_mask64 + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.binop(or, {{.*}}, {{.*}}) : !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int> -> !u64i + + // LLVM-LABEL: _kor_mask64 + // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: or <64 x i1> [[L]], [[R]] + // LLVM: bitcast <64 x i1> {{.*}} to i64 + + // OGCG-LABEL: _kor_mask64 + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: or <64 x i1> + // OGCG: bitcast <64 x i1> {{.*}} to i64 + return _kor_mask64(A, B); +} + +__mmask32 test_kxor_mask32(__mmask32 A, __mmask32 B) { + // CIR-LABEL: _kxor_mask32 + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int> -> !u32i + + // LLVM-LABEL: _kxor_mask32 + // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: xor <32 x i1> [[L]], [[R]] + // LLVM: bitcast <32 x i1> {{.*}} to i32 + + // OGCG-LABEL: _kxor_mask32 + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: xor <32 x i1> + // OGCG: bitcast <32 x i1> {{.*}} to i32 + return _kxor_mask32(A, B); +} + +__mmask64 test_kxor_mask64(__mmask64 A, __mmask64 B) { + // CIR-LABEL: _kxor_mask64 + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int> -> !u64i + + // LLVM-LABEL: _kxor_mask64 + // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: xor <64 x i1> [[L]], [[R]] + // LLVM: bitcast <64 x i1> {{.*}} to i64 + + // OGCG-LABEL: _kxor_mask64 + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: xor <64 x i1> + // OGCG: bitcast <64 x i1> {{.*}} to i64 + return _kxor_mask64(A, B); +} + +__mmask32 test_kxnor_mask32(__mmask32 A, __mmask32 B) { + // CIR-LABEL: _kxnor_mask32 + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<32 x !cir.int> + // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int> -> !u32i + + // LLVM-LABEL: _kxnor_mask32 + // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[NOT:%.*]] = xor <32 x i1> [[L]], splat (i1 true) + // LLVM: [[RES:%.*]] = xor <32 x i1> [[NOT]], [[R]] + // LLVM: bitcast <32 x i1> [[RES]] to i32 + + // OGCG-LABEL: _kxnor_mask32 + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: xor <32 x i1> + // OGCG: xor <32 x i1> + // OGCG: bitcast <32 x i1> {{.*}} to i32 + + return _kxnor_mask32(A, B); +} + +__mmask64 test_kxnor_mask64(__mmask64 A, __mmask64 B) { + // CIR-LABEL: _kxnor_mask64 + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<64 x !cir.int> + // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int> -> !u64i + + // LLVM-LABEL: _kxnor_mask64 + // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[NOT:%.*]] = xor <64 x i1> [[L]], splat (i1 true) + // LLVM: [[RES:%.*]] = xor <64 x i1> [[NOT]], [[R]] + // LLVM: bitcast <64 x i1> [[RES]] to i64 + + // OGCG-LABEL: _kxnor_mask64 + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: xor <64 x i1> + // OGCG: xor <64 x i1> + // OGCG: bitcast <64 x i1> {{.*}} to i64 + + return _kxnor_mask64(A, B); +} + + +__mmask32 test_knot_mask32(__mmask32 A) { + // CIR-LABEL: _knot_mask32 + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int> -> !u32i + + // LLVM-LABEL: _knot_mask32 + // LLVM: bitcast i32 %{{.*}} to <32 x i1> + // LLVM: xor <32 x i1> + // LLVM: bitcast <32 x i1> {{.*}} to i32 + + // OGCG-LABEL: _knot_mask32 + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: xor <32 x i1> + // OGCG: bitcast <32 x i1> {{.*}} to i32 + return _knot_mask32(A); +} + +__mmask64 test_knot_mask64(__mmask64 A) { + // CIR-LABEL: _knot_mask64 + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int> -> !u64i + + // LLVM-LABEL: _knot_mask64 + // LLVM: bitcast i64 %{{.*}} to <64 x i1> + // LLVM: xor <64 x i1> + // LLVM: bitcast <64 x i1> {{.*}} to i64 + + // OGCG-LABEL: _knot_mask64 + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: xor <64 x i1> + // OGCG: bitcast <64 x i1> {{.*}} to i64 + return _knot_mask64(A); +} + +// Multiple user-level mask helpers inline to this same kmov builtin. +// CIR does not implement any special lowering for those helpers. +// +// Therefore, testing the builtin (__builtin_ia32_kmov*) directly is +// sufficient to cover the CIR lowering behavior. Testing each helper +// individually would add no new CIR paths. + +__mmask32 test_kmov_d(__mmask32 A) { + // CIR-LABEL: test_kmov_d + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int> -> !u32i + + // LLVM-LABEL: test_kmov_d + // LLVM: bitcast i32 %{{.*}} to <32 x i1> + // LLVM: bitcast <32 x i1> {{.*}} to i32 + + // OGCG-LABEL: test_kmov_d + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: bitcast <32 x i1> {{.*}} to i32 + + return __builtin_ia32_kmovd(A); +} + +// Multiple user-level mask helpers inline to this same kmov builtin. +// CIR does not implement any special lowering for those helpers. +// +// Therefore, testing the builtin (__builtin_ia32_kmov*) directly is +// sufficient to cover the CIR lowering behavior. Testing each helper +// individually would add no new CIR paths. + +__mmask64 test_kmov_q(__mmask64 A) { + // CIR-LABEL: test_kmov_q + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int> -> !u64i + + // LLVM-LABEL: test_kmov_q + // LLVM: bitcast i64 %{{.*}} to <64 x i1> + // LLVM: bitcast <64 x i1> {{.*}} to i64 + + // OGCG-LABEL: test_kmov_q + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: bitcast <64 x i1> {{.*}} to i64 + + return __builtin_ia32_kmovq(A); +} diff --git a/clang/test/CIR/CodeGenBuiltins/X86/avx512dq-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx512dq-builtins.c new file mode 100644 index 0000000000000..5d81f666271be --- /dev/null +++ b/clang/test/CIR/CodeGenBuiltins/X86/avx512dq-builtins.c @@ -0,0 +1,210 @@ +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512dq -fclangir -emit-cir -o %t.cir -Wall -Werror +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512dq -fclangir -emit-llvm -o %t.ll -Wall -Werror +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512dq -fno-signed-char -fclangir -emit-cir -o %t.cir -Wall -Werror +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512dq -fno-signed-char -fclangir -emit-llvm -o %t.ll -Wall -Werror +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512dq -emit-llvm -o - -Wall -Werror | FileCheck %s -check-prefix=OGCG +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512dq -emit-llvm -o - -Wall -Werror | FileCheck %s -check-prefix=OGCG + +#include + +__mmask8 test_kadd_mask8(__mmask8 A, __mmask8 B) { + // CIR-LABEL: _kadd_mask8 + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.call_llvm_intrinsic "x86.avx512.kadd.b" + // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int> -> !u8i + + // LLVM-LABEL: _kadd_mask8 + // LLVM: [[L:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[R:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[RES:%.*]] = call <8 x i1> @llvm.x86.avx512.kadd.b(<8 x i1> [[L]], <8 x i1> [[R]]) + // LLVM: bitcast <8 x i1> [[RES]] to i8 + + // OGCG-LABEL: _kadd_mask8 + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: call <8 x i1> @llvm.x86.avx512.kadd.b + // OGCG: bitcast <8 x i1> {{.*}} to i8 + return _kadd_mask8(A, B); +} + +__mmask16 test_kadd_mask16(__mmask16 A, __mmask16 B) { + // CIR-LABEL: _kadd_mask16 + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.call_llvm_intrinsic "x86.avx512.kadd.w" + // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int> -> !u16i + + // LLVM-LABEL: _kadd_mask16 + // LLVM: [[L:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[R:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[RES:%.*]] = call <16 x i1> @llvm.x86.avx512.kadd.w(<16 x i1> [[L]], <16 x i1> [[R]]) + // LLVM: bitcast <16 x i1> [[RES]] to i16 + + // OGCG-LABEL: _kadd_mask16 + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: call <16 x i1> @llvm.x86.avx512.kadd.w + // OGCG: bitcast <16 x i1> {{.*}} to i16 + return _kadd_mask16(A, B); +} + +__mmask8 test_kand_mask8(__mmask8 A, __mmask8 B) { + // CIR-LABEL: _kand_mask8 + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int> -> !u8i + + // LLVM-LABEL: _kand_mask8 + // LLVM: [[L:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[R:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[RES:%.*]] = and <8 x i1> [[L]], [[R]] + // LLVM: bitcast <8 x i1> [[RES]] to i8 + + // OGCG-LABEL: _kand_mask8 + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: and <8 x i1> + // OGCG: bitcast <8 x i1> {{.*}} to i8 + return _kand_mask8(A, B); +} + + +__mmask8 test_kandn_mask8(__mmask8 A, __mmask8 B) { + // CIR-LABEL: _kandn_mask8 + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<8 x !cir.int> + // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int> -> !u8i + + // LLVM-LABEL: _kandn_mask8 + // LLVM: [[L:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[R:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: xor <8 x i1> [[L]], splat (i1 true) + // LLVM: and <8 x i1> + // LLVM: bitcast <8 x i1> {{.*}} to i8 + + // OGCG-LABEL: _kandn_mask8 + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: xor <8 x i1> + // OGCG: and <8 x i1> + // OGCG: bitcast <8 x i1> {{.*}} to i8 + + return _kandn_mask8(A, B); +} + +__mmask8 test_kor_mask8(__mmask8 A, __mmask8 B) { + // CIR-LABEL: _kor_mask8 + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.binop(or, {{.*}}, {{.*}}) : !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int> -> !u8i + + // LLVM-LABEL: _kor_mask8 + // LLVM: [[L:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[R:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: or <8 x i1> [[L]], [[R]] + // LLVM: bitcast <8 x i1> {{.*}} to i8 + + // OGCG-LABEL: _kor_mask8 + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: or <8 x i1> + // OGCG: bitcast <8 x i1> {{.*}} to i8 + return _kor_mask8(A, B); +} + +__mmask8 test_kxor_mask8(__mmask8 A, __mmask8 B) { + // CIR-LABEL: _kxor_mask8 + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int> -> !u8i + + // LLVM-LABEL: _kxor_mask8 + // LLVM: [[L:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[R:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: xor <8 x i1> [[L]], [[R]] + // LLVM: bitcast <8 x i1> {{.*}} to i8 + + // OGCG-LABEL: _kxor_mask8 + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: xor <8 x i1> + // OGCG: bitcast <8 x i1> {{.*}} to i8 + return _kxor_mask8(A, B); +} + +__mmask8 test_kxnor_mask8(__mmask8 A, __mmask8 B) { + // CIR-LABEL: _kxnor_mask8 + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<8 x !cir.int> + // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int> -> !u8i + + // LLVM-LABEL: _kxnor_mask8 + // LLVM: [[L:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[R:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[NOT:%.*]] = xor <8 x i1> [[L]], splat (i1 true) + // LLVM: [[RES:%.*]] = xor <8 x i1> [[NOT]], [[R]] + // LLVM: bitcast <8 x i1> [[RES]] to i8 + + // OGCG-LABEL: _kxnor_mask8 + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: xor <8 x i1> + // OGCG: xor <8 x i1> + // OGCG: bitcast <8 x i1> {{.*}} to i8 + return _kxnor_mask8(A, B); +} + + +__mmask8 test_knot_mask8(__mmask8 A) { + // CIR-LABEL: _knot_mask8 + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int> -> !u8i + + // LLVM-LABEL: _knot_mask8 + // LLVM: [[L:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: xor <8 x i1> [[L]], {{.*}} + // LLVM: bitcast <8 x i1> {{.*}} to i8 + + // OGCG-LABEL: _knot_mask8 + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: xor <8 x i1> + // OGCG: bitcast <8 x i1> {{.*}} to i8 + return _knot_mask8(A); +} + +// Multiple user-level mask helpers inline to this same kmov builtin. +// CIR does not implement any special lowering for those helpers. +// +// Therefore, testing the builtin (__builtin_ia32_kmov*) directly is +// sufficient to cover the CIR lowering behavior. Testing each helper +// individually would add no new CIR paths. + +__mmask8 test_kmov_b(__mmask8 A) { + // CIR-LABEL: test_kmov_b + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int> -> !u8i + + // LLVM-LABEL: test_kmov_b + // LLVM: bitcast i8 %{{.*}} to <8 x i1> + // LLVM: bitcast <8 x i1> {{.*}} to i8 + + // OGCG-LABEL: test_kmov_b + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: bitcast <8 x i1> {{.*}} to i8 + return __builtin_ia32_kmovb(A); +} diff --git a/clang/test/CIR/CodeGenBuiltins/X86/avx512f-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx512f-builtins.c new file mode 100644 index 0000000000000..31d6bc3d22408 --- /dev/null +++ b/clang/test/CIR/CodeGenBuiltins/X86/avx512f-builtins.c @@ -0,0 +1,230 @@ +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512f -fclangir -emit-cir -o %t.cir -Wall -Werror -Wsign-conversion +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512f -fclangir -emit-llvm -o %t.ll -Wall -Werror -Wsign-conversion +// RUN: FileCheck --check-prefixes=LLVM --input-file=%t.ll %s + +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512f -fclangir -emit-cir -o %t.cir -Wall -Werror -Wsign-conversion +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512f -fclangir -emit-llvm -o %t.ll -Wall -Werror -Wsign-conversion +// RUN: FileCheck --check-prefixes=LLVM --input-file=%t.ll %s + +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s --check-prefixes=OGCG +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s --check-prefixes=OGCG +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s --check-prefixes=OGCG +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s --check-prefixes=OGCG + +#include + +__m512 test_mm512_undefined(void) { + // CIR-LABEL: _mm512_undefined + // CIR: %[[A:.*]] = cir.const #cir.zero : !cir.vector<8 x !cir.double> + // CIR: %{{.*}} = cir.cast bitcast %[[A]] : !cir.vector<8 x !cir.double> -> !cir.vector<16 x !cir.float> + // CIR: cir.return %{{.*}} : !cir.vector<16 x !cir.float> + + // LLVM-LABEL: test_mm512_undefined + // LLVM: store <16 x float> zeroinitializer, ptr %[[A:.*]], align 64 + // LLVM: %{{.*}} = load <16 x float>, ptr %[[A]], align 64 + // LLVM: ret <16 x float> %{{.*}} + + // OGCG-LABEL: test_mm512_undefined + // OGCG: ret <16 x float> zeroinitializer + return _mm512_undefined(); +} + +__m512 test_mm512_undefined_ps(void) { + // CIR-LABEL: _mm512_undefined_ps + // CIR: %[[A:.*]] = cir.const #cir.zero : !cir.vector<8 x !cir.double> + // CIR: %{{.*}} = cir.cast bitcast %[[A]] : !cir.vector<8 x !cir.double> -> !cir.vector<16 x !cir.float> + // CIR: cir.return %{{.*}} : !cir.vector<16 x !cir.float> + + // LLVM-LABEL: test_mm512_undefined_ps + // LLVM: store <16 x float> zeroinitializer, ptr %[[A:.*]], align 64 + // LLVM: %{{.*}} = load <16 x float>, ptr %[[A]], align 64 + // LLVM: ret <16 x float> %{{.*}} + + // OGCG-LABEL: test_mm512_undefined_ps + // OGCG: ret <16 x float> zeroinitializer + return _mm512_undefined_ps(); +} + +__m512d test_mm512_undefined_pd(void) { + // CIR-LABEL: _mm512_undefined_pd + // CIR: %{{.*}} = cir.const #cir.zero : !cir.vector<8 x !cir.double> + // CIR: cir.return %{{.*}} : !cir.vector<8 x !cir.double> + + // LLVM-LABEL: test_mm512_undefined_pd + // LLVM: store <8 x double> zeroinitializer, ptr %[[A:.*]], align 64 + // LLVM: %{{.*}} = load <8 x double>, ptr %[[A]], align 64 + // LLVM: ret <8 x double> %{{.*}} + + // OGCG-LABEL: test_mm512_undefined_pd + // OGCG: ret <8 x double> zeroinitializer + return _mm512_undefined_pd(); +} + +__m512i test_mm512_undefined_epi32(void) { + // CIR-LABEL: _mm512_undefined_epi32 + // CIR: %[[A:.*]] = cir.const #cir.zero : !cir.vector<8 x !cir.double> + // CIR: %{{.*}} = cir.cast bitcast %[[A]] : !cir.vector<8 x !cir.double> -> !cir.vector<8 x !s64i> + // CIR: cir.return %{{.*}} : !cir.vector<8 x !s64i> + + // LLVM-LABEL: test_mm512_undefined_epi32 + // LLVM: store <8 x i64> zeroinitializer, ptr %[[A:.*]], align 64 + // LLVM: %{{.*}} = load <8 x i64>, ptr %[[A]], align 64 + // LLVM: ret <8 x i64> %{{.*}} + + // OGCG-LABEL: test_mm512_undefined_epi32 + // OGCG: ret <8 x i64> zeroinitializer + return _mm512_undefined_epi32(); +} + +__mmask16 test_mm512_kand(__mmask16 A, __mmask16 B) { + // CIR-LABEL: _mm512_kand + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int> -> !u16i + + // LLVM-LABEL: _mm512_kand + // LLVM: [[L:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[R:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[RES:%.*]] = and <16 x i1> [[L]], [[R]] + // LLVM: bitcast <16 x i1> [[RES]] to i16 + + // OGCG-LABEL: _mm512_kand + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: and <16 x i1> + // OGCG: bitcast <16 x i1> {{.*}} to i16 + return _mm512_kand(A, B); +} + +__mmask16 test_mm512_kandn(__mmask16 A, __mmask16 B) { + // CIR-LABEL: _mm512_kandn + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<16 x !cir.int> + // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int> -> !u16i + + // LLVM-LABEL: _mm512_kandn + // LLVM: [[L:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[R:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: xor <16 x i1> [[L]], splat (i1 true) + // LLVM: and <16 x i1> + // LLVM: bitcast <16 x i1> {{.*}} to i16 + + // OGCG-LABEL: _mm512_kandn + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: xor <16 x i1> + // OGCG: and <16 x i1> + // OGCG: bitcast <16 x i1> {{.*}} to i16 + return _mm512_kandn(A, B); +} + +__mmask16 test_mm512_kor(__mmask16 A, __mmask16 B) { + // CIR-LABEL: _mm512_kor + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.binop(or, {{.*}}, {{.*}}) : !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int> -> !u16i + + // LLVM-LABEL: _mm512_kor + // LLVM: [[L:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[R:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: or <16 x i1> [[L]], [[R]] + // LLVM: bitcast <16 x i1> {{.*}} to i16 + + // OGCG-LABEL: _mm512_kor + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: or <16 x i1> + // OGCG: bitcast <16 x i1> {{.*}} to i16 + return _mm512_kor(A, B); +} + +__mmask16 test_mm512_kxnor(__mmask16 A, __mmask16 B) { + // CIR-LABEL: _mm512_kxnor + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<16 x !cir.int> + // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int> -> !u16i + + // LLVM-LABEL: _mm512_kxnor + // LLVM: [[L:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[R:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[NOT:%.*]] = xor <16 x i1> [[L]], splat (i1 true) + // LLVM: [[RES:%.*]] = xor <16 x i1> [[NOT]], [[R]] + // LLVM: bitcast <16 x i1> [[RES]] to i16 + + // OGCG-LABEL: _mm512_kxnor + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: xor <16 x i1> + // OGCG: xor <16 x i1> + // OGCG: bitcast <16 x i1> {{.*}} to i16 + return _mm512_kxnor(A, B); +} + +__mmask16 test_mm512_kxor(__mmask16 A, __mmask16 B) { + // CIR-LABEL: _mm512_kxor + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int> -> !u16i + + // LLVM-LABEL: _mm512_kxor + // LLVM: [[L:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[R:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: xor <16 x i1> [[L]], [[R]] + // LLVM: bitcast <16 x i1> {{.*}} to i16 + + // OGCG-LABEL: _mm512_kxor + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: xor <16 x i1> + // OGCG: bitcast <16 x i1> {{.*}} to i16 + return _mm512_kxor(A, B); +} + +__mmask16 test_mm512_knot(__mmask16 A) { + // CIR-LABEL: _mm512_knot + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int> -> !u16i + + // LLVM-LABEL: _mm512_knot + // LLVM: bitcast i16 %{{.*}} to <16 x i1> + // LLVM: xor <16 x i1> + // LLVM: bitcast <16 x i1> {{.*}} to i16 + + // OGCG-LABEL: _mm512_knot + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: xor <16 x i1> + // OGCG: bitcast <16 x i1> {{.*}} to i16 + return _mm512_knot(A); +} + +// Multiple user-level mask helpers inline to this same kmov builtin. +// CIR does not implement any special lowering for those helpers. +// +// Therefore, testing the builtin (__builtin_ia32_kmov*) directly is +// sufficient to cover the CIR lowering behavior. Testing each helper +// individually would add no new CIR paths. + +__mmask16 test_kmov_w(__mmask16 A) { + // CIR-LABEL: test_kmov_w + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int> -> !u16i + + // LLVM-LABEL: test_kmov_w + // LLVM: bitcast i16 %{{.*}} to <16 x i1> + // LLVM: bitcast <16 x i1> {{.*}} to i16 + + // OGCG-LABEL: test_kmov_w + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: bitcast <16 x i1> {{.*}} to i16 + return __builtin_ia32_kmovw(A); +} diff --git a/clang/test/CIR/CodeGen/X86/avx512fp16-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx512fp16-builtins.c similarity index 100% rename from clang/test/CIR/CodeGen/X86/avx512fp16-builtins.c rename to clang/test/CIR/CodeGenBuiltins/X86/avx512fp16-builtins.c diff --git a/clang/test/CIR/CodeGen/X86/bmi-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/bmi-builtins.c similarity index 100% rename from clang/test/CIR/CodeGen/X86/bmi-builtins.c rename to clang/test/CIR/CodeGenBuiltins/X86/bmi-builtins.c diff --git a/clang/test/CIR/CodeGen/X86/lzcnt-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/lzcnt-builtins.c similarity index 100% rename from clang/test/CIR/CodeGen/X86/lzcnt-builtins.c rename to clang/test/CIR/CodeGenBuiltins/X86/lzcnt-builtins.c diff --git a/clang/test/CIR/CodeGen/X86/sse-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/sse-builtins.c similarity index 100% rename from clang/test/CIR/CodeGen/X86/sse-builtins.c rename to clang/test/CIR/CodeGenBuiltins/X86/sse-builtins.c diff --git a/clang/test/CIR/CodeGen/X86/sse2-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/sse2-builtins.c similarity index 100% rename from clang/test/CIR/CodeGen/X86/sse2-builtins.c rename to clang/test/CIR/CodeGenBuiltins/X86/sse2-builtins.c diff --git a/clang/test/CIR/CodeGen/builtin-fcmp-sse.c b/clang/test/CIR/CodeGenBuiltins/builtin-fcmp-sse.c similarity index 100% rename from clang/test/CIR/CodeGen/builtin-fcmp-sse.c rename to clang/test/CIR/CodeGenBuiltins/builtin-fcmp-sse.c diff --git a/clang/test/CIR/CodeGen/builtin-isfpclass.c b/clang/test/CIR/CodeGenBuiltins/builtin-isfpclass.c similarity index 100% rename from clang/test/CIR/CodeGen/builtin-isfpclass.c rename to clang/test/CIR/CodeGenBuiltins/builtin-isfpclass.c diff --git a/clang/test/CIR/CodeGen/builtin_bit.cpp b/clang/test/CIR/CodeGenBuiltins/builtin_bit.cpp similarity index 100% rename from clang/test/CIR/CodeGen/builtin_bit.cpp rename to clang/test/CIR/CodeGenBuiltins/builtin_bit.cpp diff --git a/clang/test/CIR/CodeGen/builtin_call.cpp b/clang/test/CIR/CodeGenBuiltins/builtin_call.cpp similarity index 100% rename from clang/test/CIR/CodeGen/builtin_call.cpp rename to clang/test/CIR/CodeGenBuiltins/builtin_call.cpp diff --git a/clang/test/CIR/CodeGen/builtin_inline.c b/clang/test/CIR/CodeGenBuiltins/builtin_inline.c similarity index 100% rename from clang/test/CIR/CodeGen/builtin_inline.c rename to clang/test/CIR/CodeGenBuiltins/builtin_inline.c diff --git a/clang/test/CIR/CodeGen/builtin_new_delete.cpp b/clang/test/CIR/CodeGenBuiltins/builtin_new_delete.cpp similarity index 100% rename from clang/test/CIR/CodeGen/builtin_new_delete.cpp rename to clang/test/CIR/CodeGenBuiltins/builtin_new_delete.cpp diff --git a/clang/test/CIR/CodeGen/builtin_prefetch.c b/clang/test/CIR/CodeGenBuiltins/builtin_prefetch.c similarity index 100% rename from clang/test/CIR/CodeGen/builtin_prefetch.c rename to clang/test/CIR/CodeGenBuiltins/builtin_prefetch.c diff --git a/clang/test/CIR/CodeGen/builtin_printf.cpp b/clang/test/CIR/CodeGenBuiltins/builtin_printf.cpp similarity index 100% rename from clang/test/CIR/CodeGen/builtin_printf.cpp rename to clang/test/CIR/CodeGenBuiltins/builtin_printf.cpp diff --git a/clang/test/CIR/CodeGen/builtins-elementwise.c b/clang/test/CIR/CodeGenBuiltins/builtins-elementwise.c similarity index 100% rename from clang/test/CIR/CodeGen/builtins-elementwise.c rename to clang/test/CIR/CodeGenBuiltins/builtins-elementwise.c diff --git a/clang/test/CIR/CodeGen/builtins-floating-point.c b/clang/test/CIR/CodeGenBuiltins/builtins-floating-point.c similarity index 72% rename from clang/test/CIR/CodeGen/builtins-floating-point.c rename to clang/test/CIR/CodeGenBuiltins/builtins-floating-point.c index 1b7de650662c7..a4307c57b04b6 100644 --- a/clang/test/CIR/CodeGen/builtins-floating-point.c +++ b/clang/test/CIR/CodeGenBuiltins/builtins-floating-point.c @@ -46,3 +46,24 @@ long double expl(long double f) { // LLVM: %{{.*}} = call fp128 @llvm.exp.f128(fp128 %{{.*}}) // OGCG: %{{.*}} = call fp128 @llvm.exp.f128(fp128 %{{.*}}) } + +float exp2f(float f) { + return __builtin_exp2f(f); + // CIR: %{{.*}} = cir.exp2 {{.*}} : !cir.float + // LLVM: %{{.*}} = call float @llvm.exp2.f32(float %{{.*}}) + // OGCG: %{{.*}} = call float @llvm.exp2.f32(float %{{.*}}) +} + +double my_exp2(double f) { + return __builtin_exp2(f); + // CIR: %{{.*}} = cir.exp2 {{.*}} : !cir.double + // LLVM: %{{.*}} = call double @llvm.exp2.f64(double %{{.*}}) + // OGCG: %{{.*}} = call double @llvm.exp2.f64(double %{{.*}}) +} + +long double my_exp2l(long double f) { + return __builtin_exp2l(f); + // CIR: %{{.*}} = cir.exp2 {{.*}} : !cir.long_double + // LLVM: %{{.*}} = call fp128 @llvm.exp2.f128(fp128 %{{.*}}) + // OGCG: %{{.*}} = call fp128 @llvm.exp2.f128(fp128 %{{.*}}) +} diff --git a/clang/test/CIR/CodeGen/builtins-overflow.cpp b/clang/test/CIR/CodeGenBuiltins/builtins-overflow.cpp similarity index 100% rename from clang/test/CIR/CodeGen/builtins-overflow.cpp rename to clang/test/CIR/CodeGenBuiltins/builtins-overflow.cpp diff --git a/clang/test/CIR/CodeGen/builtins.cpp b/clang/test/CIR/CodeGenBuiltins/builtins.cpp similarity index 100% rename from clang/test/CIR/CodeGen/builtins.cpp rename to clang/test/CIR/CodeGenBuiltins/builtins.cpp diff --git a/clang/test/CIR/IR/try-call.cir b/clang/test/CIR/IR/try-call.cir new file mode 100644 index 0000000000000..39db43aee40c1 --- /dev/null +++ b/clang/test/CIR/IR/try-call.cir @@ -0,0 +1,35 @@ +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s + +!s32i = !cir.int + +module { + +cir.func private @division(%a: !s32i, %b: !s32i) -> !s32i + +cir.func @flatten_structure_with_try_call_op() { + %a = cir.const #cir.int<1> : !s32i + %b = cir.const #cir.int<2> : !s32i + %3 = cir.try_call @division(%a, %b) ^normal, ^unwind : (!s32i, !s32i) -> !s32i + ^normal: + cir.br ^end + ^unwind: + cir.br ^end + ^end: + cir.return +} + +// CHECK: cir.func private @division(!s32i, !s32i) -> !s32i + +// CHECK: cir.func @flatten_structure_with_try_call_op() { +// CHECK-NEXT: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: %[[CONST_2:.*]] = cir.const #cir.int<2> : !s32i +// CHECK-NEXT: %[[CALL:.*]] = cir.try_call @division(%[[CONST_1]], %[[CONST_2]]) ^[[NORMAL:.*]], ^[[UNWIND:.*]] : (!s32i, !s32i) -> !s32i +// CHECK-NEXT: ^[[NORMAL]]: +// CHECK-NEXT: cir.br ^[[END:.*]] +// CHECK-NEXT: ^[[UNWIND]]: +// CHECK-NEXT: cir.br ^[[END:.*]] +// CHECK-NEXT: ^[[END]]: +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +} diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_rev.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_rev.c index 839eee402d4b8..5fac1403c48f7 100644 --- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_rev.c +++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_rev.c @@ -24,12 +24,12 @@ // CHECK-LABEL: @test_svrev_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv16i8( [[OP:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv16i8( [[OP:%.*]]) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z13test_svrev_s8u10__SVInt8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv16i8( [[OP:%.*]]) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv16i8( [[OP:%.*]]) // CPP-CHECK-NEXT: ret [[TMP0]] // svint8_t test_svrev_s8(svint8_t op) MODE_ATTR @@ -39,12 +39,12 @@ svint8_t test_svrev_s8(svint8_t op) MODE_ATTR // CHECK-LABEL: @test_svrev_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv8i16( [[OP:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv8i16( [[OP:%.*]]) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z14test_svrev_s16u11__SVInt16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv8i16( [[OP:%.*]]) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv8i16( [[OP:%.*]]) // CPP-CHECK-NEXT: ret [[TMP0]] // svint16_t test_svrev_s16(svint16_t op) MODE_ATTR @@ -54,12 +54,12 @@ svint16_t test_svrev_s16(svint16_t op) MODE_ATTR // CHECK-LABEL: @test_svrev_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv4i32( [[OP:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv4i32( [[OP:%.*]]) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z14test_svrev_s32u11__SVInt32_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv4i32( [[OP:%.*]]) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv4i32( [[OP:%.*]]) // CPP-CHECK-NEXT: ret [[TMP0]] // svint32_t test_svrev_s32(svint32_t op) MODE_ATTR @@ -69,12 +69,12 @@ svint32_t test_svrev_s32(svint32_t op) MODE_ATTR // CHECK-LABEL: @test_svrev_s64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv2i64( [[OP:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv2i64( [[OP:%.*]]) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z14test_svrev_s64u11__SVInt64_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv2i64( [[OP:%.*]]) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv2i64( [[OP:%.*]]) // CPP-CHECK-NEXT: ret [[TMP0]] // svint64_t test_svrev_s64(svint64_t op) MODE_ATTR @@ -84,12 +84,12 @@ svint64_t test_svrev_s64(svint64_t op) MODE_ATTR // CHECK-LABEL: @test_svrev_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv16i8( [[OP:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv16i8( [[OP:%.*]]) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z13test_svrev_u8u11__SVUint8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv16i8( [[OP:%.*]]) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv16i8( [[OP:%.*]]) // CPP-CHECK-NEXT: ret [[TMP0]] // svuint8_t test_svrev_u8(svuint8_t op) MODE_ATTR @@ -99,12 +99,12 @@ svuint8_t test_svrev_u8(svuint8_t op) MODE_ATTR // CHECK-LABEL: @test_svrev_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv8i16( [[OP:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv8i16( [[OP:%.*]]) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z14test_svrev_u16u12__SVUint16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv8i16( [[OP:%.*]]) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv8i16( [[OP:%.*]]) // CPP-CHECK-NEXT: ret [[TMP0]] // svuint16_t test_svrev_u16(svuint16_t op) MODE_ATTR @@ -114,12 +114,12 @@ svuint16_t test_svrev_u16(svuint16_t op) MODE_ATTR // CHECK-LABEL: @test_svrev_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv4i32( [[OP:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv4i32( [[OP:%.*]]) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z14test_svrev_u32u12__SVUint32_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv4i32( [[OP:%.*]]) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv4i32( [[OP:%.*]]) // CPP-CHECK-NEXT: ret [[TMP0]] // svuint32_t test_svrev_u32(svuint32_t op) MODE_ATTR @@ -129,12 +129,12 @@ svuint32_t test_svrev_u32(svuint32_t op) MODE_ATTR // CHECK-LABEL: @test_svrev_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv2i64( [[OP:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv2i64( [[OP:%.*]]) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z14test_svrev_u64u12__SVUint64_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv2i64( [[OP:%.*]]) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv2i64( [[OP:%.*]]) // CPP-CHECK-NEXT: ret [[TMP0]] // svuint64_t test_svrev_u64(svuint64_t op) MODE_ATTR @@ -144,12 +144,12 @@ svuint64_t test_svrev_u64(svuint64_t op) MODE_ATTR // CHECK-LABEL: @test_svrev_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv8f16( [[OP:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv8f16( [[OP:%.*]]) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z14test_svrev_f16u13__SVFloat16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv8f16( [[OP:%.*]]) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv8f16( [[OP:%.*]]) // CPP-CHECK-NEXT: ret [[TMP0]] // svfloat16_t test_svrev_f16(svfloat16_t op) MODE_ATTR @@ -159,12 +159,12 @@ svfloat16_t test_svrev_f16(svfloat16_t op) MODE_ATTR // CHECK-LABEL: @test_svrev_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv4f32( [[OP:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv4f32( [[OP:%.*]]) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z14test_svrev_f32u13__SVFloat32_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv4f32( [[OP:%.*]]) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv4f32( [[OP:%.*]]) // CPP-CHECK-NEXT: ret [[TMP0]] // svfloat32_t test_svrev_f32(svfloat32_t op) MODE_ATTR @@ -174,12 +174,12 @@ svfloat32_t test_svrev_f32(svfloat32_t op) MODE_ATTR // CHECK-LABEL: @test_svrev_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv2f64( [[OP:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv2f64( [[OP:%.*]]) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z14test_svrev_f64u13__SVFloat64_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv2f64( [[OP:%.*]]) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv2f64( [[OP:%.*]]) // CPP-CHECK-NEXT: ret [[TMP0]] // svfloat64_t test_svrev_f64(svfloat64_t op) MODE_ATTR @@ -189,12 +189,12 @@ svfloat64_t test_svrev_f64(svfloat64_t op) MODE_ATTR // CHECK-LABEL: @test_svrev_b8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv16i1( [[OP:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv16i1( [[OP:%.*]]) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z13test_svrev_b8u10__SVBool_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv16i1( [[OP:%.*]]) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv16i1( [[OP:%.*]]) // CPP-CHECK-NEXT: ret [[TMP0]] // svbool_t test_svrev_b8(svbool_t op) MODE_ATTR @@ -249,12 +249,12 @@ svbool_t test_svrev_b64(svbool_t op) MODE_ATTR // CHECK-LABEL: @test_svrev_bf16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv8bf16( [[OP:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv8bf16( [[OP:%.*]]) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z15test_svrev_bf16u14__SVBfloat16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.rev.nxv8bf16( [[OP:%.*]]) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.vector.reverse.nxv8bf16( [[OP:%.*]]) // CPP-CHECK-NEXT: ret [[TMP0]] // svbfloat16_t test_svrev_bf16(svbfloat16_t op) MODE_ATTR diff --git a/clang/test/CodeGen/Sparc/sparcv8-abi.c b/clang/test/CodeGen/Sparc/sparcv8-abi.c index c5faf130890f8..7beddd20e5e4d 100644 --- a/clang/test/CodeGen/Sparc/sparcv8-abi.c +++ b/clang/test/CodeGen/Sparc/sparcv8-abi.c @@ -1,22 +1,52 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --filter "^define |^entry:" --version 6 // RUN: %clang_cc1 -triple sparc-unknown-unknown -emit-llvm %s -o - | FileCheck %s -// CHECK-LABEL: define{{.*}} { float, float } @p(ptr noundef byval({ float, float }) align 4 %a, ptr noundef byval({ float, float }) align 4 %b) #0 { float __complex__ +// CHECK-LABEL: define dso_local { float, float } @p( +// CHECK-SAME: ptr noundef byval({ float, float }) align 4 [[A:%.*]], ptr noundef byval({ float, float }) align 4 [[B:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK: [[ENTRY:.*:]] +// p (float __complex__ a, float __complex__ b) { return 0; } -// CHECK-LABEL: define{{.*}} { double, double } @q(ptr noundef byval({ double, double }) align 8 %a, ptr noundef byval({ double, double }) align 8 %b) #0 { double __complex__ +// CHECK-LABEL: define dso_local { double, double } @q( +// CHECK-SAME: ptr noundef byval({ double, double }) align 8 [[A:%.*]], ptr noundef byval({ double, double }) align 8 [[B:%.*]]) #[[ATTR0]] { +// CHECK: [[ENTRY:.*:]] +// q (double __complex__ a, double __complex__ b) { return 0; } -// CHECK-LABEL: define{{.*}} { i64, i64 } @r(ptr noundef byval({ i64, i64 }) align 8 %a, ptr noundef byval({ i64, i64 }) align 8 %b) #0 { long long __complex__ +// CHECK-LABEL: define dso_local { i64, i64 } @r( +// CHECK-SAME: ptr noundef byval({ i64, i64 }) align 8 [[A:%.*]], ptr noundef byval({ i64, i64 }) align 8 [[B:%.*]]) #[[ATTR0]] { +// CHECK: [[ENTRY:.*:]] +// r (long long __complex__ a, long long __complex__ b) { return 0; } + +long double +// CHECK-LABEL: define dso_local void @s( +// CHECK-SAME: ptr dead_on_unwind noalias writable sret(fp128) align 8 [[AGG_RESULT:%.*]], ptr noundef byval(fp128) align 8 [[TMP0:%.*]]) #[[ATTR0]] { +// CHECK: [[ENTRY:.*:]] +// +s(long double a) +{ + return 0; +} + +long double _Complex +// CHECK-LABEL: define dso_local inreg { fp128, fp128 } @t( +// CHECK-SAME: ptr noundef byval({ fp128, fp128 }) align 8 [[A:%.*]]) #[[ATTR0]] { +// CHECK: [[ENTRY:.*:]] +// +t(long double _Complex a) +{ + return 0; +} diff --git a/clang/test/CodeGen/X86/avx2-builtins.c b/clang/test/CodeGen/X86/avx2-builtins.c index 6a884e98e9f3b..d6facfea8962e 100644 --- a/clang/test/CodeGen/X86/avx2-builtins.c +++ b/clang/test/CodeGen/X86/avx2-builtins.c @@ -1191,6 +1191,37 @@ __m256i test_mm256_sign_epi32(__m256i a, __m256i b) { } TEST_CONSTEXPR(match_v8si(_mm256_sign_epi32((__m256i)(__v8si){0xbeef,0xfeed,0xbead,0xdeed, -1,2,-3,4}, (__m256i)(__v8si){0,0,0,0,-1,-1,-1,-1}), 0,0,0,0, 1,-2,3,-4)); +__m256i test_mm256_sll_epi16(__m256i a, __m128i b) { + // CHECK-LABEL: test_mm256_sll_epi16 + // CHECK: call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %{{.*}}, <8 x i16> %{{.*}}) + return _mm256_sll_epi16(a, b); +} +TEST_CONSTEXPR(match_v16hi(_mm256_sll_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v8hi){1, 0, 0, 0, 0, 0, 0, 0}), 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30)); +TEST_CONSTEXPR(match_v16hi(_mm256_sll_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v8hi){16, 0, 0, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v16hi(_mm256_sll_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v8hi){0, 1, 0, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v16hi(_mm256_sll_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v8hi){0, 0, 1, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v16hi(_mm256_sll_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v8hi){0, 0, 0, 1, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v16hi(_mm256_sll_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v8hi){1, 0, 0, 0, 1, 1, 1, 1}), 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30)); + +__m256i test_mm256_sll_epi32(__m256i a, __m128i b) { + // CHECK-LABEL: test_mm256_sll_epi32 + // CHECK: call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %{{.*}}, <4 x i32> %{{.*}}) + return _mm256_sll_epi32(a, b); +} +TEST_CONSTEXPR(match_v8si(_mm256_sll_epi32((__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v4si){1, 0, 0, 0}), 0, 2, 4, 6, 8, 10, 12, 14)); +TEST_CONSTEXPR(match_v8si(_mm256_sll_epi32((__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v4si){32, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v8si(_mm256_sll_epi32((__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v4si){0, 1, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v8si(_mm256_sll_epi32((__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v4si){1, 0, 1, 1}), 0, 2, 4, 6, 8, 10, 12, 14)); + +__m256i test_mm256_sll_epi64(__m256i a, __m128i b) { + // CHECK-LABEL: test_mm256_sll_epi64 + // CHECK: call {{.*}}<4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %{{.*}}, <2 x i64> %{{.*}}) + return _mm256_sll_epi64(a, b); +} +TEST_CONSTEXPR(match_v4di(_mm256_sll_epi64((__m256i)(__v4di){0, 1, 2, 3}, (__m128i)(__v2di){1, 0}), 0, 2, 4, 6)); +TEST_CONSTEXPR(match_v4di(_mm256_sll_epi64((__m256i)(__v4di){0, 1, 2, 3}, (__m128i)(__v2di){64, 0}), 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v4di(_mm256_sll_epi64((__m256i)(__v4di){0, 1, 2, 3}, (__m128i)(__v2di){1, 1}), 0, 2, 4, 6)); + __m256i test_mm256_slli_epi16(__m256i a) { // CHECK-LABEL: test_mm256_slli_epi16 // CHECK: call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> %{{.*}}, i32 %{{.*}}) @@ -1283,12 +1314,22 @@ __m256i test_mm256_sra_epi16(__m256i a, __m128i b) { // CHECK: call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %{{.*}}, <8 x i16> %{{.*}}) return _mm256_sra_epi16(a, b); } +TEST_CONSTEXPR(match_v16hi(_mm256_sra_epi16((__m256i)(__v16hi){-32768, 32767, -3, -2, -1, 0, 1, 2, -32768, 32767, -3, -2, -1, 0, 1, 2}, (__m128i)(__v8hi){1, 0, 0, 0, 0, 0, 0, 0}), -16384, 16383, -2, -1, -1, 0, 0, 1, -16384, 16383, -2, -1, -1, 0, 0, 1)); +TEST_CONSTEXPR(match_v16hi(_mm256_sra_epi16((__m256i)(__v16hi){-32768, 32767, -3, -2, -1, 0, 1, 2, -32768, 32767, -3, -2, -1, 0, 1, 2}, (__m128i)(__v8hi){16, 0, 0, 0, 0, 0, 0, 0}), -1, 0, -1, -1, -1, 0, 0, 0, -1, 0, -1, -1, -1, 0, 0, 0)); +TEST_CONSTEXPR(match_v16hi(_mm256_sra_epi16((__m256i)(__v16hi){-32768, 32767, -3, -2, -1, 0, 1, 2, -32768, 32767, -3, -2, -1, 0, 1, 2}, (__m128i)(__v8hi){0, 1, 0, 0, 0, 0, 0, 0}), -1, 0, -1, -1, -1, 0, 0, 0, -1, 0, -1, -1, -1, 0, 0, 0)); +TEST_CONSTEXPR(match_v16hi(_mm256_sra_epi16((__m256i)(__v16hi){-32768, 32767, -3, -2, -1, 0, 1, 2, -32768, 32767, -3, -2, -1, 0, 1, 2}, (__m128i)(__v8hi){0, 0, 1, 0, 0, 0, 0, 0}), -1, 0, -1, -1, -1, 0, 0, 0, -1, 0, -1, -1, -1, 0, 0, 0)); +TEST_CONSTEXPR(match_v16hi(_mm256_sra_epi16((__m256i)(__v16hi){-32768, 32767, -3, -2, -1, 0, 1, 2, -32768, 32767, -3, -2, -1, 0, 1, 2}, (__m128i)(__v8hi){0, 0, 0, 1, 0, 0, 0, 0}), -1, 0, -1, -1, -1, 0, 0, 0, -1, 0, -1, -1, -1, 0, 0, 0)); +TEST_CONSTEXPR(match_v16hi(_mm256_sra_epi16((__m256i)(__v16hi){-32768, 32767, -3, -2, -1, 0, 1, 2, -32768, 32767, -3, -2, -1, 0, 1, 2}, (__m128i)(__v8hi){1, 0, 0, 0, 1, 1, 1, 1}), -16384, 16383, -2, -1, -1, 0, 0, 1, -16384, 16383, -2, -1, -1, 0, 0, 1)); __m256i test_mm256_sra_epi32(__m256i a, __m128i b) { // CHECK-LABEL: test_mm256_sra_epi32 // CHECK: call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %{{.*}}, <4 x i32> %{{.*}}) return _mm256_sra_epi32(a, b); } +TEST_CONSTEXPR(match_v8si(_mm256_sra_epi32((__m256i)(__v8si){-32768, 32767, -3, -2, -1, 0, 1, 2}, (__m128i)(__v4si){1, 0, 0, 0}), -16384, 16383, -2, -1, -1, 0, 0, 1)); +TEST_CONSTEXPR(match_v8si(_mm256_sra_epi32((__m256i)(__v8si){-32768, 32767, -3, -2, -1, 0, 1, 2}, (__m128i)(__v4si){32, 0, 0, 0}), -1, 0, -1, -1, -1, 0, 0, 0)); +TEST_CONSTEXPR(match_v8si(_mm256_sra_epi32((__m256i)(__v8si){-32768, 32767, -3, -2, -1, 0, 1, 2}, (__m128i)(__v4si){0, 1, 0, 0}), -1, 0, -1, -1, -1, 0, 0, 0)); +TEST_CONSTEXPR(match_v8si(_mm256_sra_epi32((__m256i)(__v8si){-32768, 32767, -3, -2, -1, 0, 1, 2}, (__m128i)(__v4si){1, 0, 1, 1}), -16384, 16383, -2, -1, -1, 0, 0, 1)); __m256i test_mm256_srai_epi16(__m256i a) { // CHECK-LABEL: test_mm256_srai_epi16 @@ -1335,18 +1376,31 @@ __m256i test_mm256_srl_epi16(__m256i a, __m128i b) { // CHECK: call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %{{.*}}, <8 x i16> %{{.*}}) return _mm256_srl_epi16(a, b); } +TEST_CONSTEXPR(match_v16hi(_mm256_srl_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v8hi){1, 0, 0, 0, 0, 0, 0, 0}), 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7)); +TEST_CONSTEXPR(match_v16hi(_mm256_srl_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v8hi){16, 0, 0, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v16hi(_mm256_srl_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v8hi){0, 1, 0, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v16hi(_mm256_srl_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v8hi){0, 0, 1, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v16hi(_mm256_srl_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v8hi){0, 0, 0, 1, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v16hi(_mm256_srl_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v8hi){1, 0, 0, 0, 1, 1, 1, 1}), 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7)); __m256i test_mm256_srl_epi32(__m256i a, __m128i b) { // CHECK-LABEL: test_mm256_srl_epi32 // CHECK:call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %{{.*}}, <4 x i32> %{{.*}}) return _mm256_srl_epi32(a, b); } +TEST_CONSTEXPR(match_v8si(_mm256_srl_epi32((__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v4si){1, 0, 0, 0}), 0, 0, 1, 1, 2, 2, 3, 3)); +TEST_CONSTEXPR(match_v8si(_mm256_srl_epi32((__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v4si){32, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v8si(_mm256_srl_epi32((__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v4si){0, 1, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v8si(_mm256_srl_epi32((__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v4si){1, 0, 1, 1}), 0, 0, 1, 1, 2, 2, 3, 3)); __m256i test_mm256_srl_epi64(__m256i a, __m128i b) { // CHECK-LABEL: test_mm256_srl_epi64 // CHECK: call {{.*}}<4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %{{.*}}, <2 x i64> %{{.*}}) return _mm256_srl_epi64(a, b); } +TEST_CONSTEXPR(match_v4di(_mm256_srl_epi64((__m256i)(__v4di){0, 1, 2, 3}, (__m128i)(__v2di){1, 0}), 0, 0, 1, 1)); +TEST_CONSTEXPR(match_v4di(_mm256_srl_epi64((__m256i)(__v4di){0, 1, 2, 3}, (__m128i)(__v2di){64, 0}), 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v4di(_mm256_srl_epi64((__m256i)(__v4di){0, 1, 2, 3}, (__m128i)(__v2di){1, 1}), 0, 0, 1, 1)); __m256i test_mm256_srli_epi16(__m256i a) { // CHECK-LABEL: test_mm256_srli_epi16 diff --git a/clang/test/CodeGen/X86/avx512bf16-builtins.c b/clang/test/CodeGen/X86/avx512bf16-builtins.c index 3f544d387f7aa..556c662f13cc0 100644 --- a/clang/test/CodeGen/X86/avx512bf16-builtins.c +++ b/clang/test/CodeGen/X86/avx512bf16-builtins.c @@ -9,6 +9,7 @@ // RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bf16 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s #include +#include "builtin_test_helpers.h" float test_mm_cvtsbh_ss(__bf16 A) { // CHECK-LABEL: test_mm_cvtsbh_ss @@ -16,6 +17,7 @@ float test_mm_cvtsbh_ss(__bf16 A) { // CHECK: ret float %{{.*}} return _mm_cvtsbh_ss(A); } +TEST_CONSTEXPR(_mm_cvtsbh_ss(-1.0f) == -1.0f); __m512bh test_mm512_cvtne2ps_pbh(__m512 A, __m512 B) { // CHECK-LABEL: test_mm512_cvtne2ps_pbh @@ -79,23 +81,23 @@ __m512 test_mm512_mask_dpbf16_ps(__m512 D, __m512bh A, __m512bh B, __mmask16 U) __m512 test_mm512_cvtpbh_ps(__m256bh A) { // CHECK-LABEL: test_mm512_cvtpbh_ps - // CHECK: sext <16 x i16> %{{.*}} to <16 x i32> - // CHECK: call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %{{.*}}, i32 %{{.*}}) + // CHECK: fpext <16 x bfloat> %{{.*}} to <16 x float> return _mm512_cvtpbh_ps(A); } +TEST_CONSTEXPR(match_m512(_mm512_cvtpbh_ps((__m256bh){-0.0f, 1.0f, -2.0f, 4.0f, -8.0f, 16.0f, -32.0f, 64.0f, -128.0f, -0.5f, 0.25f, -0.125f, -4.0f, 2.0f, -1.0f, 0.0f}), -0.0f, 1.0f, -2.0f, 4.0f, -8.0f, 16.0f, -32.0f, 64.0f, -128.0f, -0.5f, 0.25f, -0.125f, -4.0f, 2.0f, -1.0f, 0.0f)); __m512 test_mm512_maskz_cvtpbh_ps(__mmask16 M, __m256bh A) { // CHECK-LABEL: test_mm512_maskz_cvtpbh_ps - // CHECK: sext <16 x i16> %{{.*}} to <16 x i32> - // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}} - // CHECK: call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %{{.*}}, i32 %{{.*}}) + // CHECK: fpext <16 x bfloat> %{{.*}} to <16 x float> + // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}} return _mm512_maskz_cvtpbh_ps(M, A); } +TEST_CONSTEXPR(match_m512(_mm512_maskz_cvtpbh_ps(0xA753, (__m256bh){-0.0f, 1.0f, -2.0f, 4.0f, -8.0f, 16.0f, -32.0f, 64.0f, -128.0f, -0.5f, 0.25f, -0.125f, -4.0f, 2.0f, -1.0f, 0.0f}), -0.0f, 1.0f, 0.0f, 0.0f, -8.0f, 0.0f, -32.0f, 0.0f, -128.0f, -0.5f, 0.25f, 0.0f, 0.0f, 2.0f, 0.0f, 0.0f)); __m512 test_mm512_mask_cvtpbh_ps(__m512 S, __mmask16 M, __m256bh A) { // CHECK-LABEL: test_mm512_mask_cvtpbh_ps - // CHECK: sext <16 x i16> %{{.*}} to <16 x i32> - // CHECK: call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %{{.*}}, i32 %{{.*}}) - // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}} + // CHECK: fpext <16 x bfloat> %{{.*}} to <16 x float> + // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}} return _mm512_mask_cvtpbh_ps(S, M, A); } +TEST_CONSTEXPR(match_m512(_mm512_mask_cvtpbh_ps((__m512){ 99.0f, 99.0f, 99.0f, 99.0f, 99.0f, 99.0f, 99.0f, 99.0f, 99.0f, 99.0f, 99.0f, 99.0f, 99.0f, 99.0f, 99.0f, 99.0f }, 0xA753, (__m256bh){-0.0f, 1.0f, -2.0f, 4.0f, -8.0f, 16.0f, -32.0f, 64.0f, -128.0f, -0.5f, 0.25f, -0.125f, -4.0f, 2.0f, -1.0f, 0.0f}), -0.0f, 1.0f, 99.0f, 99.0f, -8.0f, 99.0f, -32.0f, 99.0f, -128.0f, -0.5f, 0.25f, 99.0f, 99.0f, 2.0f, 99.0f, 0.0f)); diff --git a/clang/test/CodeGen/X86/avx512bw-builtins.c b/clang/test/CodeGen/X86/avx512bw-builtins.c index 091d0c40f0ffa..c9c30dab389db 100644 --- a/clang/test/CodeGen/X86/avx512bw-builtins.c +++ b/clang/test/CodeGen/X86/avx512bw-builtins.c @@ -2297,8 +2297,15 @@ TEST_CONSTEXPR(match_v32hi(_mm512_maskz_sllv_epi16(0xB120676B, (__m512i)(__v32hi __m512i test_mm512_sll_epi16(__m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_sll_epi16 // CHECK: @llvm.x86.avx512.psll.w.512 - return _mm512_sll_epi16(__A, __B); + return _mm512_sll_epi16(__A, __B); } +TEST_CONSTEXPR(match_v32hi(_mm512_sll_epi16((__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, (__m128i)(__v8hi){1, 0, 0, 0, 0, 0, 0, 0}), 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62)); +TEST_CONSTEXPR(match_v32hi(_mm512_sll_epi16((__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, (__m128i)(__v8hi){16, 0, 0, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v32hi(_mm512_sll_epi16((__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, (__m128i)(__v8hi){17, 0, 0, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v32hi(_mm512_sll_epi16((__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, (__m128i)(__v8hi){0, 1, 0, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v32hi(_mm512_sll_epi16((__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, (__m128i)(__v8hi){0, 0, 1, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v32hi(_mm512_sll_epi16((__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, (__m128i)(__v8hi){0, 0, 0, 1, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v32hi(_mm512_sll_epi16((__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, (__m128i)(__v8hi){1, 0, 0, 0, 1, 1, 1, 1}), 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62)); __m512i test_mm512_mask_sll_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_mask_sll_epi16 @@ -2311,8 +2318,10 @@ __m512i test_mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_maskz_sll_epi16 // CHECK: @llvm.x86.avx512.psll.w.512 // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} - return _mm512_maskz_sll_epi16(__U, __A, __B); + return _mm512_maskz_sll_epi16(__U, __A, __B); } +TEST_CONSTEXPR(match_v32hi(_mm512_mask_sll_epi16((__m512i)(__v32hi){99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}, 0x5A5A5A5A, (__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, (__m128i)(__v8hi){1, 0, 0, 0, 0, 0, 0, 0}), 99, 2, 99, 6, 8, 99, 12, 99, 99, 18, 99, 22, 24, 99, 28, 99, 99, 34, 99, 38, 40, 99, 44, 99, 99, 50, 99, 54, 56, 99, 60, 99)); +TEST_CONSTEXPR(match_v32hi(_mm512_maskz_sll_epi16(0xA5A5A5A5, (__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, (__m128i)(__v8hi){1, 0, 0, 0, 0, 0, 0, 0}), 0, 0, 4, 0, 0, 10, 0, 14, 16, 0, 20, 0, 0, 26, 0, 30, 32, 0, 36, 0, 0, 42, 0, 46, 48, 0, 52, 0, 0, 58, 0, 62)); __m512i test_mm512_slli_epi16(__m512i __A) { // CHECK-LABEL: test_mm512_slli_epi16 @@ -2422,8 +2431,15 @@ TEST_CONSTEXPR(match_v32hi(_mm512_maskz_srav_epi16(0xB120676B, (__m512i)(__v32hi __m512i test_mm512_sra_epi16(__m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_sra_epi16 // CHECK: @llvm.x86.avx512.psra.w.512 - return _mm512_sra_epi16(__A, __B); + return _mm512_sra_epi16(__A, __B); } +TEST_CONSTEXPR(match_v32hi(_mm512_sra_epi16((__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){1, 0, 0, 0, 0, 0, 0, 0}), 0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16, -17, 18, -19, 20, -21, 22, -23, 24, -25, 26, -27, 28, -29, 30, -31)); +TEST_CONSTEXPR(match_v32hi(_mm512_sra_epi16((__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){16, 0, 0, 0, 0, 0, 0, 0}), 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1)); +TEST_CONSTEXPR(match_v32hi(_mm512_sra_epi16((__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){17, 0, 0, 0, 0, 0, 0, 0}), 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1)); +TEST_CONSTEXPR(match_v32hi(_mm512_sra_epi16((__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){0, 1, 0, 0, 0, 0, 0, 0}), 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1)); +TEST_CONSTEXPR(match_v32hi(_mm512_sra_epi16((__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){0, 0, 1, 0, 0, 0, 0, 0}), 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1)); +TEST_CONSTEXPR(match_v32hi(_mm512_sra_epi16((__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){0, 0, 0, 1, 0, 0, 0, 0}), 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1)); +TEST_CONSTEXPR(match_v32hi(_mm512_sra_epi16((__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){1, 0, 0, 0, 1, 1, 1, 1}), 0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16, -17, 18, -19, 20, -21, 22, -23, 24, -25, 26, -27, 28, -29, 30, -31)); __m512i test_mm512_mask_sra_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_mask_sra_epi16 @@ -2436,8 +2452,10 @@ __m512i test_mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_maskz_sra_epi16 // CHECK: @llvm.x86.avx512.psra.w.512 // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} - return _mm512_maskz_sra_epi16(__U, __A, __B); + return _mm512_maskz_sra_epi16(__U, __A, __B); } +TEST_CONSTEXPR(match_v32hi(_mm512_mask_sra_epi16((__m512i)(__v32hi){99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}, 0x5A5A5A5A, (__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){1, 0, 0, 0, 0, 0, 0, 0}), 99, -1, 99, -3, 4, 99, 6, 99, 99, -9, 99, -11, 12, 99, 14, 99, 99, -17, 99, -19, 20, 99, 22, 99, 99, -25, 99, -27, 28, 99, 30, 99)); +TEST_CONSTEXPR(match_v32hi(_mm512_maskz_sra_epi16(0xA5A5A5A5, (__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){1, 0, 0, 0, 0, 0, 0, 0}), 0, 0, 2, 0, 0, -5, 0, -7, 8, 0, 10, 0, 0, -13, 0, -15, 16, 0, 18, 0, 0, -21, 0, -23, 24, 0, 26, 0, 0, -29, 0, -31)); __m512i test_mm512_srai_epi16(__m512i __A) { // CHECK-LABEL: test_mm512_srai_epi16 @@ -2485,8 +2503,15 @@ __m512i test_mm512_maskz_srai_epi16_2(__mmask32 __U, __m512i __A, unsigned int _ __m512i test_mm512_srl_epi16(__m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_srl_epi16 // CHECK: @llvm.x86.avx512.psrl.w.512 - return _mm512_srl_epi16(__A, __B); + return _mm512_srl_epi16(__A, __B); } +TEST_CONSTEXPR(match_v32hi(_mm512_srl_epi16((__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){1, 0, 0, 0, 0, 0, 0, 0}), 0, 32767, 2, 32765, 4, 32763, 6, 32761, 8, 32759, 10, 32757, 12, 32755, 14, 32753, 16, 32751, 18, 32749, 20, 32747, 22, 32745, 24, 32743, 26, 32741, 28, 32739, 30, 32737)); +TEST_CONSTEXPR(match_v32hi(_mm512_srl_epi16((__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){16, 0, 0, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v32hi(_mm512_srl_epi16((__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){17, 0, 0, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v32hi(_mm512_srl_epi16((__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){0, 1, 0, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v32hi(_mm512_srl_epi16((__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){0, 0, 1, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v32hi(_mm512_srl_epi16((__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){0, 0, 0, 1, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v32hi(_mm512_srl_epi16((__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){1, 0, 0, 0, 1, 1, 1, 1}), 0, 32767, 2, 32765, 4, 32763, 6, 32761, 8, 32759, 10, 32757, 12, 32755, 14, 32753, 16, 32751, 18, 32749, 20, 32747, 22, 32745, 24, 32743, 26, 32741, 28, 32739, 30, 32737)); __m512i test_mm512_mask_srl_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_mask_srl_epi16 @@ -2499,8 +2524,10 @@ __m512i test_mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_maskz_srl_epi16 // CHECK: @llvm.x86.avx512.psrl.w.512 // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} - return _mm512_maskz_srl_epi16(__U, __A, __B); + return _mm512_maskz_srl_epi16(__U, __A, __B); } +TEST_CONSTEXPR(match_v32hi(_mm512_mask_srl_epi16((__m512i)(__v32hi){99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}, 0x5A5A5A5A, (__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){1, 0, 0, 0, 0, 0, 0, 0}), 99, 32767, 99, 32765, 4, 99, 6, 99, 99, 32759, 99, 32757, 12, 99, 14, 99, 99, 32751, 99, 32749, 20, 99, 22, 99, 99, 32743, 99, 32741, 28, 99, 30, 99)); +TEST_CONSTEXPR(match_v32hi(_mm512_maskz_srl_epi16(0xA5A5A5A5, (__m512i)(__v32hi){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30, 32, -34, 36, -38, 40, -42, 44, -46, 48, -50, 52, -54, 56, -58, 60, -62}, (__m128i)(__v8hi){1, 0, 0, 0, 0, 0, 0, 0}), 0, 0, 2, 0, 0, 32763, 0, 32761, 8, 0, 10, 0, 0, 32755, 0, 32753, 16, 0, 18, 0, 0, 32747, 0, 32745, 24, 0, 26, 0, 0, 32739, 0, 32737)); __m512i test_mm512_srli_epi16(__m512i __A) { // CHECK-LABEL: test_mm512_srli_epi16 diff --git a/clang/test/CodeGen/X86/avx512f-builtins.c b/clang/test/CodeGen/X86/avx512f-builtins.c index a148940033642..6401a0e55a83b 100644 --- a/clang/test/CodeGen/X86/avx512f-builtins.c +++ b/clang/test/CodeGen/X86/avx512f-builtins.c @@ -6282,8 +6282,13 @@ __m512i test_mm512_maskz_srai_epi64_2(__mmask8 __U, __m512i __A, unsigned int __ __m512i test_mm512_sll_epi32(__m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_sll_epi32 // CHECK: @llvm.x86.avx512.psll.d.512 - return _mm512_sll_epi32(__A, __B); + return _mm512_sll_epi32(__A, __B); } +TEST_CONSTEXPR(match_v16si(_mm512_sll_epi32((__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v4si){1, 0, 0, 0}), 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30)); +TEST_CONSTEXPR(match_v16si(_mm512_sll_epi32((__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v4si){32, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v16si(_mm512_sll_epi32((__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v4si){33, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v16si(_mm512_sll_epi32((__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v4si){0, 1, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v16si(_mm512_sll_epi32((__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v4si){1, 0, 1, 1}), 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30)); __m512i test_mm512_mask_sll_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_mask_sll_epi32 @@ -6296,14 +6301,20 @@ __m512i test_mm512_maskz_sll_epi32(__mmask16 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_maskz_sll_epi32 // CHECK: @llvm.x86.avx512.psll.d.512 // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}} - return _mm512_maskz_sll_epi32(__U, __A, __B); + return _mm512_maskz_sll_epi32(__U, __A, __B); } +TEST_CONSTEXPR(match_v16si(_mm512_mask_sll_epi32((__m512i)(__v16si){99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}, 0x5A5A, (__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v4si){1, 0, 0, 0}), 99, 2, 99, 6, 8, 99, 12, 99, 99, 18, 99, 22, 24, 99, 28, 99)); +TEST_CONSTEXPR(match_v16si(_mm512_maskz_sll_epi32(0xA5A5, (__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v4si){1, 0, 0, 0}), 0, 0, 4, 0, 0, 10, 0, 14, 16, 0, 20, 0, 0, 26, 0, 30)); __m512i test_mm512_sll_epi64(__m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_sll_epi64 // CHECK: @llvm.x86.avx512.psll.q.512 - return _mm512_sll_epi64(__A, __B); + return _mm512_sll_epi64(__A, __B); } +TEST_CONSTEXPR(match_v8di(_mm512_sll_epi64((__m512i)(__v8di){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v2di){1, 0}), 0, 2, 4, 6, 8, 10, 12, 14)); +TEST_CONSTEXPR(match_v8di(_mm512_sll_epi64((__m512i)(__v8di){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v2di){64, 0}), 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v8di(_mm512_sll_epi64((__m512i)(__v8di){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v2di){65, 0}), 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v8di(_mm512_sll_epi64((__m512i)(__v8di){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v2di){1, 1}), 0, 2, 4, 6, 8, 10, 12, 14)); __m512i test_mm512_mask_sll_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_mask_sll_epi64 @@ -6316,8 +6327,10 @@ __m512i test_mm512_maskz_sll_epi64(__mmask8 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_maskz_sll_epi64 // CHECK: @llvm.x86.avx512.psll.q.512 // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}} - return _mm512_maskz_sll_epi64(__U, __A, __B); + return _mm512_maskz_sll_epi64(__U, __A, __B); } +TEST_CONSTEXPR(match_v8di(_mm512_mask_sll_epi64((__m512i)(__v8di){99, 99, 99, 99, 99, 99, 99, 99}, 0x5A, (__m512i)(__v8di){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v2di){1, 0}), 99, 2, 99, 6, 8, 99, 12, 99)); +TEST_CONSTEXPR(match_v8di(_mm512_maskz_sll_epi64(0xA5, (__m512i)(__v8di){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v2di){1, 0}), 0, 0, 4, 0, 0, 10, 0, 14)); __m512i test_mm512_sllv_epi32(__m512i __X, __m512i __Y) { // CHECK-LABEL: test_mm512_sllv_epi32 @@ -6368,8 +6381,13 @@ TEST_CONSTEXPR(match_v8di(_mm512_maskz_sllv_epi64(0xE4, (__m512i)(__v8di){ 16, - __m512i test_mm512_sra_epi32(__m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_sra_epi32 // CHECK: @llvm.x86.avx512.psra.d.512 - return _mm512_sra_epi32(__A, __B); + return _mm512_sra_epi32(__A, __B); } +TEST_CONSTEXPR(match_v16si(_mm512_sra_epi32((__m512i)(__v16si){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30}, (__m128i)(__v4si){1, 0, 0, 0}), 0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15)); +TEST_CONSTEXPR(match_v16si(_mm512_sra_epi32((__m512i)(__v16si){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30}, (__m128i)(__v4si){32, 0, 0, 0}), 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1)); +TEST_CONSTEXPR(match_v16si(_mm512_sra_epi32((__m512i)(__v16si){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30}, (__m128i)(__v4si){33, 0, 0, 0}), 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1)); +TEST_CONSTEXPR(match_v16si(_mm512_sra_epi32((__m512i)(__v16si){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30}, (__m128i)(__v4si){0, 1, 0, 0}), 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1)); +TEST_CONSTEXPR(match_v16si(_mm512_sra_epi32((__m512i)(__v16si){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30}, (__m128i)(__v4si){1, 0, 1, 1}), 0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15)); __m512i test_mm512_mask_sra_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_mask_sra_epi32 @@ -6382,14 +6400,20 @@ __m512i test_mm512_maskz_sra_epi32(__mmask16 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_maskz_sra_epi32 // CHECK: @llvm.x86.avx512.psra.d.512 // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}} - return _mm512_maskz_sra_epi32(__U, __A, __B); + return _mm512_maskz_sra_epi32(__U, __A, __B); } +TEST_CONSTEXPR(match_v16si(_mm512_mask_sra_epi32((__m512i)(__v16si){99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}, 0x5A5A, (__m512i)(__v16si){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30}, (__m128i)(__v4si){1, 0, 0, 0}), 99, -1, 99, -3, 4, 99, 6, 99, 99, -9, 99, -11, 12, 99, 14, 99)); +TEST_CONSTEXPR(match_v16si(_mm512_maskz_sra_epi32(0xA5A5, (__m512i)(__v16si){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30}, (__m128i)(__v4si){1, 0, 0, 0}), 0, 0, 2, 0, 0, -5, 0, -7, 8, 0, 10, 0, 0, -13, 0, -15)); __m512i test_mm512_sra_epi64(__m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_sra_epi64 // CHECK: @llvm.x86.avx512.psra.q.512 - return _mm512_sra_epi64(__A, __B); + return _mm512_sra_epi64(__A, __B); } +TEST_CONSTEXPR(match_v8di(_mm512_sra_epi64((__m512i)(__v8di){0, -2, 4, -6, 8, -10, 12, -14}, (__m128i)(__v2di){1, 0}), 0, -1, 2, -3, 4, -5, 6, -7)); +TEST_CONSTEXPR(match_v8di(_mm512_sra_epi64((__m512i)(__v8di){0, -2, 4, -6, 8, -10, 12, -14}, (__m128i)(__v2di){64, 0}), 0, -1, 0, -1, 0, -1, 0, -1)); +TEST_CONSTEXPR(match_v8di(_mm512_sra_epi64((__m512i)(__v8di){0, -2, 4, -6, 8, -10, 12, -14}, (__m128i)(__v2di){65, 0}), 0, -1, 0, -1, 0, -1, 0, -1)); +TEST_CONSTEXPR(match_v8di(_mm512_sra_epi64((__m512i)(__v8di){0, -2, 4, -6, 8, -10, 12, -14}, (__m128i)(__v2di){1, 1}), 0, -1, 2, -3, 4, -5, 6, -7)); __m512i test_mm512_mask_sra_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_mask_sra_epi64 @@ -6402,8 +6426,10 @@ __m512i test_mm512_maskz_sra_epi64(__mmask8 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_maskz_sra_epi64 // CHECK: @llvm.x86.avx512.psra.q.512 // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}} - return _mm512_maskz_sra_epi64(__U, __A, __B); + return _mm512_maskz_sra_epi64(__U, __A, __B); } +TEST_CONSTEXPR(match_v8di(_mm512_mask_sra_epi64((__m512i)(__v8di){99, 99, 99, 99, 99, 99, 99, 99}, 0x5A, (__m512i)(__v8di){0, -2, 4, -6, 8, -10, 12, -14}, (__m128i)(__v2di){1, 0}), 99, -1, 99, -3, 4, 99, 6, 99)); +TEST_CONSTEXPR(match_v8di(_mm512_maskz_sra_epi64(0xA5, (__m512i)(__v8di){0, -2, 4, -6, 8, -10, 12, -14}, (__m128i)(__v2di){1, 0}), 0, 0, 2, 0, 0, -5, 0, -7)); __m512i test_mm512_srav_epi32(__m512i __X, __m512i __Y) { // CHECK-LABEL: test_mm512_srav_epi32 @@ -6454,8 +6480,13 @@ TEST_CONSTEXPR(match_v8di(_mm512_maskz_srav_epi64(0xE4, (__m512i)(__v8di){ 16, - __m512i test_mm512_srl_epi32(__m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_srl_epi32 // CHECK: @llvm.x86.avx512.psrl.d.512 - return _mm512_srl_epi32(__A, __B); + return _mm512_srl_epi32(__A, __B); } +TEST_CONSTEXPR(match_v16si(_mm512_srl_epi32((__m512i)(__v16si){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30}, (__m128i)(__v4si){1, 0, 0, 0}), 0, 2147483647, 2, 2147483645, 4, 2147483643, 6, 2147483641, 8, 2147483639, 10, 2147483637, 12, 2147483635, 14, 2147483633)); +TEST_CONSTEXPR(match_v16si(_mm512_srl_epi32((__m512i)(__v16si){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30}, (__m128i)(__v4si){32, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v16si(_mm512_srl_epi32((__m512i)(__v16si){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30}, (__m128i)(__v4si){33, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v16si(_mm512_srl_epi32((__m512i)(__v16si){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30}, (__m128i)(__v4si){0, 1, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v16si(_mm512_srl_epi32((__m512i)(__v16si){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30}, (__m128i)(__v4si){1, 0, 1, 1}), 0, 2147483647, 2, 2147483645, 4, 2147483643, 6, 2147483641, 8, 2147483639, 10, 2147483637, 12, 2147483635, 14, 2147483633)); __m512i test_mm512_mask_srl_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_mask_srl_epi32 @@ -6468,14 +6499,19 @@ __m512i test_mm512_maskz_srl_epi32(__mmask16 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_maskz_srl_epi32 // CHECK: @llvm.x86.avx512.psrl.d.512 // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}} - return _mm512_maskz_srl_epi32(__U, __A, __B); + return _mm512_maskz_srl_epi32(__U, __A, __B); } +TEST_CONSTEXPR(match_v16si(_mm512_mask_srl_epi32((__m512i)(__v16si){99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}, 0x5A5A, (__m512i)(__v16si){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30}, (__m128i)(__v4si){1, 0, 0, 0}), 99, 2147483647, 99, 2147483645, 4, 99, 6, 99, 99, 2147483639, 99, 2147483637, 12, 99, 14, 99)); +TEST_CONSTEXPR(match_v16si(_mm512_maskz_srl_epi32(0xA5A5, (__m512i)(__v16si){0, -2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24, -26, 28, -30}, (__m128i)(__v4si){1, 0, 0, 0}), 0, 0, 2, 0, 0, 2147483643, 0, 2147483641, 8, 0, 10, 0, 0, 2147483635, 0, 2147483633)); __m512i test_mm512_srl_epi64(__m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_srl_epi64 // CHECK: @llvm.x86.avx512.psrl.q.512 - return _mm512_srl_epi64(__A, __B); + return _mm512_srl_epi64(__A, __B); } +TEST_CONSTEXPR(match_v8di(_mm512_srl_epi64((__m512i)(__v8di){0, -2, 4, -6, 8, -10, 12, -14}, (__m128i)(__v2di){1, 0}), 0, 9223372036854775807, 2, 9223372036854775805, 4, 9223372036854775803, 6, 9223372036854775801)); +TEST_CONSTEXPR(match_v8di(_mm512_srl_epi64((__m512i)(__v8di){0, -2, 4, -6, 8, -10, 12, -14}, (__m128i)(__v2di){64, 0}), 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v8di(_mm512_srl_epi64((__m512i)(__v8di){0, -2, 4, -6, 8, -10, 12, -14}, (__m128i)(__v2di){1, 1}), 0, 9223372036854775807, 2, 9223372036854775805, 4, 9223372036854775803, 6, 9223372036854775801)); __m512i test_mm512_mask_srl_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_mask_srl_epi64 @@ -6488,8 +6524,10 @@ __m512i test_mm512_maskz_srl_epi64(__mmask8 __U, __m512i __A, __m128i __B) { // CHECK-LABEL: test_mm512_maskz_srl_epi64 // CHECK: @llvm.x86.avx512.psrl.q.512 // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}} - return _mm512_maskz_srl_epi64(__U, __A, __B); + return _mm512_maskz_srl_epi64(__U, __A, __B); } +TEST_CONSTEXPR(match_v8di(_mm512_mask_srl_epi64((__m512i)(__v8di){99, 99, 99, 99, 99, 99, 99, 99}, 0x5A, (__m512i)(__v8di){0, -2, 4, -6, 8, -10, 12, -14}, (__m128i)(__v2di){1, 0}), 99, 9223372036854775807, 99, 9223372036854775805, 4, 99, 6, 99)); +TEST_CONSTEXPR(match_v8di(_mm512_maskz_srl_epi64(0xA5, (__m512i)(__v8di){0, -2, 4, -6, 8, -10, 12, -14}, (__m128i)(__v2di){1, 0}), 0, 0, 2, 0, 0, 9223372036854775803, 0, 9223372036854775801)); __m512i test_mm512_srlv_epi32(__m512i __X, __m512i __Y) { // CHECK-LABEL: test_mm512_srlv_epi32 @@ -6892,6 +6930,7 @@ __m512 test_mm512_shuffle_f32x4(__m512 __A, __m512 __B) { // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <16 x i32> return _mm512_shuffle_f32x4(__A, __B, 4); } +TEST_CONSTEXPR(match_m512(_mm512_shuffle_f32x4(((__m512){1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}), ((__m512){10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 110.0, 120.0, 130.0, 140.0, 150.0, 160.0}), 0b11111111), 13.0f, 14.0f, 15.0f, 16.0f, 13.0f, 14.0f, 15.0f, 16.0f, 130.0, 140.0, 150.0, 160.0, 130.0, 140.0, 150.0, 160.0)); __m512 test_mm512_mask_shuffle_f32x4(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { // CHECK-LABEL: test_mm512_mask_shuffle_f32x4 @@ -6899,6 +6938,7 @@ __m512 test_mm512_mask_shuffle_f32x4(__m512 __W, __mmask16 __U, __m512 __A, __m5 // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}} return _mm512_mask_shuffle_f32x4(__W, __U, __A, __B, 4); } +TEST_CONSTEXPR(match_m512(_mm512_mask_shuffle_f32x4(((__m512){100.0f, 200.0f, 300.0f, 400.0f, 500.0f, 600.0f, 700.0f, 800.0f, 900.0f, 1000.0f, 1100.0f, 1200.0f, 1300.0f, 1400.0f, 1500.0f, 1600.0f}), 0b1111111111111110, ((__m512){1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}), ((__m512){10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 110.0, 120.0, 130.0, 140.0, 150.0, 160.0}), 0b11111111), 100.0f, 14.0f, 15.0f, 16.0f, 13.0f, 14.0f, 15.0f, 16.0f, 130.0, 140.0, 150.0, 160.0, 130.0, 140.0, 150.0, 160.0)); __m512 test_mm512_maskz_shuffle_f32x4(__mmask16 __U, __m512 __A, __m512 __B) { // CHECK-LABEL: test_mm512_maskz_shuffle_f32x4 @@ -6906,12 +6946,14 @@ __m512 test_mm512_maskz_shuffle_f32x4(__mmask16 __U, __m512 __A, __m512 __B) { // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}} return _mm512_maskz_shuffle_f32x4(__U, __A, __B, 4); } +TEST_CONSTEXPR(match_m512(_mm512_maskz_shuffle_f32x4(0b1111111111110111, ((__m512){1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}), ((__m512){10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 110.0, 120.0, 130.0, 140.0, 150.0, 160.0}), 0b11111111), 13.0f, 14.0f, 15.0f, 0.0f, 13.0f, 14.0f, 15.0f, 16.0f, 130.0, 140.0, 150.0, 160.0, 130.0, 140.0, 150.0, 160.0)); __m512d test_mm512_shuffle_f64x2(__m512d __A, __m512d __B) { // CHECK-LABEL: test_mm512_shuffle_f64x2 // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> return _mm512_shuffle_f64x2(__A, __B, 4); } +TEST_CONSTEXPR(match_m512d(_mm512_shuffle_f64x2(((__m512d){1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0}), ((__m512d){10.0,20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0}), 0b10101100), 1.0, 2.0, 7.0, 8.0, 50.0, 60.0, 50.0, 60.0)); __m512d test_mm512_mask_shuffle_f64x2(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { // CHECK-LABEL: test_mm512_mask_shuffle_f64x2 @@ -6919,6 +6961,7 @@ __m512d test_mm512_mask_shuffle_f64x2(__m512d __W, __mmask8 __U, __m512d __A, __ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}} return _mm512_mask_shuffle_f64x2(__W, __U, __A, __B, 4); } +TEST_CONSTEXPR(match_m512d(_mm512_mask_shuffle_f64x2(((__m512d){100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0}), 0b11110000, ((__m512d){1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0}), ((__m512d){10.0,20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0}), 0b10101100), 100.0, 200.0, 300.0, 400.0, 50.0, 60.0, 50.0, 60.0)); __m512d test_mm512_maskz_shuffle_f64x2(__mmask8 __U, __m512d __A, __m512d __B) { // CHECK-LABEL: test_mm512_maskz_shuffle_f64x2 @@ -6926,12 +6969,15 @@ __m512d test_mm512_maskz_shuffle_f64x2(__mmask8 __U, __m512d __A, __m512d __B) { // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}} return _mm512_maskz_shuffle_f64x2(__U, __A, __B, 4); } +TEST_CONSTEXPR(match_m512d(_mm512_maskz_shuffle_f64x2(0b11110100, ((__m512d){1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0}), ((__m512d){10.0,20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0}), 0b10101100), 0.0, 0.0, 7.0, 0.0, 50.0, 60.0, 50.0, 60.0)); __m512i test_mm512_shuffle_i32x4(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_shuffle_i32x4 // CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> return _mm512_shuffle_i32x4(__A, __B, 4); } +TEST_CONSTEXPR(match_v16si(_mm512_shuffle_i32x4(((__m512i)(__v16si){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), ((__m512i)(__v16si){10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160}), 0), 1, 2, 3, 4, 1, 2, 3, 4, 10, 20, 30, 40, 10, 20, 30, 40)); + __m512i test_mm512_mask_shuffle_i32x4(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_mask_shuffle_i32x4 @@ -6939,6 +6985,7 @@ __m512i test_mm512_mask_shuffle_i32x4(__m512i __W, __mmask16 __U, __m512i __A, _ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}} return _mm512_mask_shuffle_i32x4(__W, __U, __A, __B, 4); } +TEST_CONSTEXPR(match_v16si(_mm512_mask_shuffle_i32x4(((__m512i)(__v16si){100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600}), 0b1111111111111011, ((__m512i)(__v16si){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), ((__m512i)(__v16si){10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160}), 0), 1, 2, 300, 4, 1, 2, 3, 4, 10, 20, 30, 40, 10, 20, 30, 40)); __m512i test_mm512_maskz_shuffle_i32x4(__mmask16 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_shuffle_i32x4 @@ -6946,12 +6993,14 @@ __m512i test_mm512_maskz_shuffle_i32x4(__mmask16 __U, __m512i __A, __m512i __B) // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}} return _mm512_maskz_shuffle_i32x4(__U, __A, __B, 4); } +TEST_CONSTEXPR(match_v16si(_mm512_maskz_shuffle_i32x4(0b1011111111111111, ((__m512i)(__v16si){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), ((__m512i)(__v16si){10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160}), 0), 1, 2, 3, 4, 1, 2, 3, 4, 10, 20, 30, 40, 10, 20, 0, 40)); __m512i test_mm512_shuffle_i64x2(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_shuffle_i64x2 // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> return _mm512_shuffle_i64x2(__A, __B, 4); } +TEST_CONSTEXPR(match_m512i(_mm512_shuffle_i64x2(((__m512i){1, 2, 3, 4, 5, 6, 7, 8}), ((__m512i){10, 20, 30, 40, 50, 60, 70, 80}), 0b11000110), 5, 6, 3, 4, 10, 20, 70, 80)); __m512i test_mm512_mask_shuffle_i64x2(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_mask_shuffle_i64x2 @@ -6959,6 +7008,7 @@ __m512i test_mm512_mask_shuffle_i64x2(__m512i __W, __mmask8 __U, __m512i __A, __ // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}} return _mm512_mask_shuffle_i64x2(__W, __U, __A, __B, 4); } +TEST_CONSTEXPR(match_m512i(_mm512_mask_shuffle_i64x2(((__m512i){100, 200, 300, 400, 500, 600, 700, 800}), 0b11111101, ((__m512i){1, 2, 3, 4, 5, 6, 7, 8}), ((__m512i){10, 20, 30, 40, 50, 60, 70, 80}), 0b11000110), 5, 200, 3, 4, 10, 20, 70, 80)); __m512i test_mm512_maskz_shuffle_i64x2(__mmask8 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_shuffle_i64x2 @@ -6966,6 +7016,7 @@ __m512i test_mm512_maskz_shuffle_i64x2(__mmask8 __U, __m512i __A, __m512i __B) { // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}} return _mm512_maskz_shuffle_i64x2(__U, __A, __B, 4); } +TEST_CONSTEXPR(match_m512i(_mm512_maskz_shuffle_i64x2(0b00111101, ((__m512i){1, 2, 3, 4, 5, 6, 7, 8}), ((__m512i){10, 20, 30, 40, 50, 60, 70, 80}), 0b11000110), 5, 0, 3, 4, 10, 20, 0, 0)); __m512d test_mm512_shuffle_pd(__m512d __M, __m512d __V) { // CHECK-LABEL: test_mm512_shuffle_pd diff --git a/clang/test/CodeGen/X86/avx512vbmi-builtins.c b/clang/test/CodeGen/X86/avx512vbmi-builtins.c index 7d506db92faeb..854cc095e58ba 100644 --- a/clang/test/CodeGen/X86/avx512vbmi-builtins.c +++ b/clang/test/CodeGen/X86/avx512vbmi-builtins.c @@ -207,22 +207,149 @@ __m512i test_mm512_mask_permutexvar_epi8(__m512i __W, __mmask64 __M, __m512i __A return _mm512_mask_permutexvar_epi8(__W, __M, __A, __B); } +__m512i test_mm512_multishift_epi64_epi8(__m512i __X, __m512i __Y) { + // CHECK-LABEL: test_mm512_multishift_epi64_epi8 + // CHECK: call <64 x i8> @llvm.x86.avx512.pmultishift.qb.512(<64 x i8> %{{.*}}, <64 x i8> %{{.*}}) + return _mm512_multishift_epi64_epi8(__X, __Y); +} + +TEST_CONSTEXPR(match_v64qu( + _mm512_multishift_epi64_epi8( + (__m512i)(__v64qu){ + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56}, + (__m512i)(__v64qu){ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, + 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, + 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78}), + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, + 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, + 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78)); + +TEST_CONSTEXPR(match_v64qu( + _mm512_multishift_epi64_epi8( + (__m512i)(__v64qu){ + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4}, + (__m512i)(__v64qu){ + 0x10, 0x32, 0x54, 0x76, 0x98, 0xBA, 0xDC, 0xFE, + 0x10, 0x32, 0x54, 0x76, 0x98, 0xBA, 0xDC, 0xFE, + 0x10, 0x32, 0x54, 0x76, 0x98, 0xBA, 0xDC, 0xFE, + 0x10, 0x32, 0x54, 0x76, 0x98, 0xBA, 0xDC, 0xFE, + 0x10, 0x32, 0x54, 0x76, 0x98, 0xBA, 0xDC, 0xFE, + 0x10, 0x32, 0x54, 0x76, 0x98, 0xBA, 0xDC, 0xFE, + 0x10, 0x32, 0x54, 0x76, 0x98, 0xBA, 0xDC, 0xFE, + 0x10, 0x32, 0x54, 0x76, 0x98, 0xBA, 0xDC, 0xFE}), + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21)); + __m512i test_mm512_mask_multishift_epi64_epi8(__m512i __W, __mmask64 __M, __m512i __X, __m512i __Y) { // CHECK-LABEL: test_mm512_mask_multishift_epi64_epi8 // CHECK: call <64 x i8> @llvm.x86.avx512.pmultishift.qb.512(<64 x i8> %{{.*}}, <64 x i8> %{{.*}}) // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} - return _mm512_mask_multishift_epi64_epi8(__W, __M, __X, __Y); + return _mm512_mask_multishift_epi64_epi8(__W, __M, __X, __Y); } +TEST_CONSTEXPR(match_v64qu( + _mm512_mask_multishift_epi64_epi8( + (__m512i)(__v64qu){ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + 0xAAAAAAAAAAAAAAAAULL, + (__m512i)(__v64qu){ + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56}, + (__m512i)(__v64qu){ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, + 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, + 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78}), + 0xFF, 0x02, 0xFF, 0x04, 0xFF, 0x06, 0xFF, 0x08, + 0xFF, 0x12, 0xFF, 0x14, 0xFF, 0x16, 0xFF, 0x18, + 0xFF, 0x22, 0xFF, 0x24, 0xFF, 0x26, 0xFF, 0x28, + 0xFF, 0x32, 0xFF, 0x34, 0xFF, 0x36, 0xFF, 0x38, + 0xFF, 0x42, 0xFF, 0x44, 0xFF, 0x46, 0xFF, 0x48, + 0xFF, 0x52, 0xFF, 0x54, 0xFF, 0x56, 0xFF, 0x58, + 0xFF, 0x62, 0xFF, 0x64, 0xFF, 0x66, 0xFF, 0x68, + 0xFF, 0x72, 0xFF, 0x74, 0xFF, 0x76, 0xFF, 0x78)); + __m512i test_mm512_maskz_multishift_epi64_epi8(__mmask64 __M, __m512i __X, __m512i __Y) { // CHECK-LABEL: test_mm512_maskz_multishift_epi64_epi8 // CHECK: call <64 x i8> @llvm.x86.avx512.pmultishift.qb.512(<64 x i8> %{{.*}}, <64 x i8> %{{.*}}) // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} - return _mm512_maskz_multishift_epi64_epi8(__M, __X, __Y); + return _mm512_maskz_multishift_epi64_epi8(__M, __X, __Y); } -__m512i test_mm512_multishift_epi64_epi8(__m512i __X, __m512i __Y) { - // CHECK-LABEL: test_mm512_multishift_epi64_epi8 - // CHECK: call <64 x i8> @llvm.x86.avx512.pmultishift.qb.512(<64 x i8> %{{.*}}, <64 x i8> %{{.*}}) - return _mm512_multishift_epi64_epi8(__X, __Y); -} +TEST_CONSTEXPR(match_v64qu( + _mm512_maskz_multishift_epi64_epi8( + 0x5555555555555555ULL, + (__m512i)(__v64qu){ + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56}, + (__m512i)(__v64qu){ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, + 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, + 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78}), + 0x01, 0, 0x03, 0, 0x05, 0, 0x07, 0, + 0x11, 0, 0x13, 0, 0x15, 0, 0x17, 0, + 0x21, 0, 0x23, 0, 0x25, 0, 0x27, 0, + 0x31, 0, 0x33, 0, 0x35, 0, 0x37, 0, + 0x41, 0, 0x43, 0, 0x45, 0, 0x47, 0, + 0x51, 0, 0x53, 0, 0x55, 0, 0x57, 0, + 0x61, 0, 0x63, 0, 0x65, 0, 0x67, 0, + 0x71, 0, 0x73, 0, 0x75, 0, 0x77, 0)); diff --git a/clang/test/CodeGen/X86/avx512vbmivl-builtin.c b/clang/test/CodeGen/X86/avx512vbmivl-builtin.c index 49b7a1a721195..76a11c3ce406e 100644 --- a/clang/test/CodeGen/X86/avx512vbmivl-builtin.c +++ b/clang/test/CodeGen/X86/avx512vbmivl-builtin.c @@ -162,43 +162,125 @@ TEST_CONSTEXPR(match_v32qu( 216, 109, 218, 110, 220, 111, 222, 112, 224, 113, 226, 114, 228, 115, 230, 116)); +__m128i test_mm_multishift_epi64_epi8(__m128i __X, __m128i __Y) { + // CHECK-LABEL: test_mm_multishift_epi64_epi8 + // CHECK: call <16 x i8> @llvm.x86.avx512.pmultishift.qb.128(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + return _mm_multishift_epi64_epi8(__X, __Y); +} + +TEST_CONSTEXPR(match_v16qu( + _mm_multishift_epi64_epi8( + (__m128i)(__v16qu){0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56}, + (__m128i)(__v16qu){0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}), + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18)); + __m128i test_mm_mask_multishift_epi64_epi8(__m128i __W, __mmask16 __M, __m128i __X, __m128i __Y) { // CHECK-LABEL: test_mm_mask_multishift_epi64_epi8 // CHECK: call <16 x i8> @llvm.x86.avx512.pmultishift.qb.128(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} - return _mm_mask_multishift_epi64_epi8(__W, __M, __X, __Y); + return _mm_mask_multishift_epi64_epi8(__W, __M, __X, __Y); } +TEST_CONSTEXPR(match_v16qu( + _mm_mask_multishift_epi64_epi8( + (__m128i)(__v16qu){0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + 0xAAAA, + (__m128i)(__v16qu){0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56}, + (__m128i)(__v16qu){0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}), + 0xFF, 0x02, 0xFF, 0x04, 0xFF, 0x06, 0xFF, 0x08, + 0xFF, 0x12, 0xFF, 0x14, 0xFF, 0x16, 0xFF, 0x18)); + __m128i test_mm_maskz_multishift_epi64_epi8(__mmask16 __M, __m128i __X, __m128i __Y) { // CHECK-LABEL: test_mm_maskz_multishift_epi64_epi8 // CHECK: call <16 x i8> @llvm.x86.avx512.pmultishift.qb.128(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} - return _mm_maskz_multishift_epi64_epi8(__M, __X, __Y); + return _mm_maskz_multishift_epi64_epi8(__M, __X, __Y); } -__m128i test_mm_multishift_epi64_epi8(__m128i __X, __m128i __Y) { - // CHECK-LABEL: test_mm_multishift_epi64_epi8 - // CHECK: call <16 x i8> @llvm.x86.avx512.pmultishift.qb.128(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) - return _mm_multishift_epi64_epi8(__X, __Y); +TEST_CONSTEXPR(match_v16qu( + _mm_maskz_multishift_epi64_epi8( + 0x5555, + (__m128i)(__v16qu){0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56}, + (__m128i)(__v16qu){0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}), + 0x01, 0, 0x03, 0, 0x05, 0, 0x07, 0, + 0x11, 0, 0x13, 0, 0x15, 0, 0x17, 0)); + +__m256i test_mm256_multishift_epi64_epi8(__m256i __X, __m256i __Y) { + // CHECK-LABEL: test_mm256_multishift_epi64_epi8 + // CHECK: call <32 x i8> @llvm.x86.avx512.pmultishift.qb.256(<32 x i8> %{{.*}}, <32 x i8> %{{.*}}) + return _mm256_multishift_epi64_epi8(__X, __Y); } +TEST_CONSTEXPR(match_v32qu( + _mm256_multishift_epi64_epi8( + (__m256i)(__v32qu){0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56}, + (__m256i)(__v32qu){0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38}), + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38)); + __m256i test_mm256_mask_multishift_epi64_epi8(__m256i __W, __mmask32 __M, __m256i __X, __m256i __Y) { // CHECK-LABEL: test_mm256_mask_multishift_epi64_epi8 // CHECK: call <32 x i8> @llvm.x86.avx512.pmultishift.qb.256(<32 x i8> %{{.*}}, <32 x i8> %{{.*}}) // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} - return _mm256_mask_multishift_epi64_epi8(__W, __M, __X, __Y); + return _mm256_mask_multishift_epi64_epi8(__W, __M, __X, __Y); } +TEST_CONSTEXPR(match_v32qu( + _mm256_mask_multishift_epi64_epi8( + (__m256i)(__v32qu){0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + 0xAAAAAAAA, + (__m256i)(__v32qu){0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56}, + (__m256i)(__v32qu){0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38}), + 0xFF, 0x02, 0xFF, 0x04, 0xFF, 0x06, 0xFF, 0x08, + 0xFF, 0x12, 0xFF, 0x14, 0xFF, 0x16, 0xFF, 0x18, + 0xFF, 0x22, 0xFF, 0x24, 0xFF, 0x26, 0xFF, 0x28, + 0xFF, 0x32, 0xFF, 0x34, 0xFF, 0x36, 0xFF, 0x38)); + __m256i test_mm256_maskz_multishift_epi64_epi8(__mmask32 __M, __m256i __X, __m256i __Y) { // CHECK-LABEL: test_mm256_maskz_multishift_epi64_epi8 // CHECK: call <32 x i8> @llvm.x86.avx512.pmultishift.qb.256(<32 x i8> %{{.*}}, <32 x i8> %{{.*}}) // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} - return _mm256_maskz_multishift_epi64_epi8(__M, __X, __Y); + return _mm256_maskz_multishift_epi64_epi8(__M, __X, __Y); } -__m256i test_mm256_multishift_epi64_epi8(__m256i __X, __m256i __Y) { - // CHECK-LABEL: test_mm256_multishift_epi64_epi8 - // CHECK: call <32 x i8> @llvm.x86.avx512.pmultishift.qb.256(<32 x i8> %{{.*}}, <32 x i8> %{{.*}}) - return _mm256_multishift_epi64_epi8(__X, __Y); -} +TEST_CONSTEXPR(match_v32qu( + _mm256_maskz_multishift_epi64_epi8( + 0x55555555, + (__m256i)(__v32qu){0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56, + 0, 8, 16, 24, 32, 40, 48, 56}, + (__m256i)(__v32qu){0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38}), + 0x01, 0, 0x03, 0, 0x05, 0, 0x07, 0, + 0x11, 0, 0x13, 0, 0x15, 0, 0x17, 0, + 0x21, 0, 0x23, 0, 0x25, 0, 0x27, 0, + 0x31, 0, 0x33, 0, 0x35, 0, 0x37, 0)); diff --git a/clang/test/CodeGen/X86/avx512vl-builtins.c b/clang/test/CodeGen/X86/avx512vl-builtins.c index 58bb8bef6fb46..5f6d8360888f5 100644 --- a/clang/test/CodeGen/X86/avx512vl-builtins.c +++ b/clang/test/CodeGen/X86/avx512vl-builtins.c @@ -6746,8 +6746,12 @@ __m256i test_mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B) { // CHECK-LABEL: test_mm256_maskz_srl_epi32 // CHECK: @llvm.x86.avx2.psrl.d // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}} - return _mm256_maskz_srl_epi32(__U, __A, __B); + return _mm256_maskz_srl_epi32(__U, __A, __B); } +TEST_CONSTEXPR(match_v4si(_mm_mask_srl_epi32((__m128i)(__v4si){99, 99, 99, 99}, 0x5, (__m128i)(__v4si){-2, 4, -6, 8}, (__m128i)(__v4si){1, 0, 0, 0}), 2147483647, 99, 2147483645, 99)); +TEST_CONSTEXPR(match_v4si(_mm_maskz_srl_epi32(0xA, (__m128i)(__v4si){-2, 4, -6, 8}, (__m128i)(__v4si){1, 0, 0, 0}), 0, 2, 0, 4)); +TEST_CONSTEXPR(match_v8si(_mm256_mask_srl_epi32((__m256i)(__v8si){99, 99, 99, 99, 99, 99, 99, 99}, 0x5A, (__m256i)(__v8si){0, -2, 4, -6, 8, -10, 12, -14}, (__m128i)(__v4si){1, 0, 0, 0}), 99, 2147483647, 99, 2147483645, 4, 99, 6, 99)); +TEST_CONSTEXPR(match_v8si(_mm256_maskz_srl_epi32(0xA5, (__m256i)(__v8si){0, -2, 4, -6, 8, -10, 12, -14}, (__m128i)(__v4si){1, 0, 0, 0}), 0, 0, 2, 0, 0, 2147483643, 0, 2147483641)); __m128i test_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A) { // CHECK-LABEL: test_mm_mask_srli_epi32 @@ -6831,8 +6835,12 @@ __m256i test_mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B) { // CHECK-LABEL: test_mm256_maskz_srl_epi64 // CHECK: @llvm.x86.avx2.psrl.q // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}} - return _mm256_maskz_srl_epi64(__U, __A, __B); + return _mm256_maskz_srl_epi64(__U, __A, __B); } +TEST_CONSTEXPR(match_v2di(_mm_mask_srl_epi64((__m128i)(__v2di){99, 99}, 0x1, (__m128i)(__v2di){-2, 4}, (__m128i)(__v2di){1, 0}), 9223372036854775807, 99)); +TEST_CONSTEXPR(match_v2di(_mm_maskz_srl_epi64(0x2, (__m128i)(__v2di){-2, 4}, (__m128i)(__v2di){1, 0}), 0, 2)); +TEST_CONSTEXPR(match_v4di(_mm256_mask_srl_epi64((__m256i)(__v4di){99, 99, 99, 99}, 0x5, (__m256i)(__v4di){0, -2, 4, -6}, (__m128i)(__v2di){1, 0}), 0, 99, 2, 99)); +TEST_CONSTEXPR(match_v4di(_mm256_maskz_srl_epi64(0xA, (__m256i)(__v4di){0, -2, 4, -6}, (__m128i)(__v2di){1, 0}), 0, 9223372036854775807, 0, 9223372036854775805)); __m128i test_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A) { // CHECK-LABEL: test_mm_mask_srli_epi64 @@ -6917,8 +6925,12 @@ __m256i test_mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B) { // CHECK-LABEL: test_mm256_maskz_sll_epi32 // CHECK: @llvm.x86.avx2.psll.d // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}} - return _mm256_maskz_sll_epi32(__U, __A, __B); + return _mm256_maskz_sll_epi32(__U, __A, __B); } +TEST_CONSTEXPR(match_v4si(_mm_mask_sll_epi32((__m128i)(__v4si){99, 99, 99, 99}, 0x5, (__m128i)(__v4si){1, 2, 3, 4}, (__m128i)(__v4si){1, 0, 0, 0}), 2, 99, 6, 99)); +TEST_CONSTEXPR(match_v4si(_mm_maskz_sll_epi32(0xA, (__m128i)(__v4si){1, 2, 3, 4}, (__m128i)(__v4si){1, 0, 0, 0}), 0, 4, 0, 8)); +TEST_CONSTEXPR(match_v8si(_mm256_mask_sll_epi32((__m256i)(__v8si){99, 99, 99, 99, 99, 99, 99, 99}, 0x5A, (__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v4si){1, 0, 0, 0}), 99, 2, 99, 6, 8, 99, 12, 99)); +TEST_CONSTEXPR(match_v8si(_mm256_maskz_sll_epi32(0xA5, (__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v4si){1, 0, 0, 0}), 0, 0, 4, 0, 0, 10, 0, 14)); __m128i test_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A) { // CHECK-LABEL: test_mm_mask_slli_epi32 @@ -7007,8 +7019,12 @@ __m256i test_mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B) { // CHECK-LABEL: test_mm256_maskz_sll_epi64 // CHECK: @llvm.x86.avx2.psll.q // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}} - return _mm256_maskz_sll_epi64(__U, __A, __B); + return _mm256_maskz_sll_epi64(__U, __A, __B); } +TEST_CONSTEXPR(match_v2di(_mm_mask_sll_epi64((__m128i)(__v2di){99, 99}, 0x1, (__m128i)(__v2di){1, 2}, (__m128i)(__v2di){1, 0}), 2, 99)); +TEST_CONSTEXPR(match_v2di(_mm_maskz_sll_epi64(0x2, (__m128i)(__v2di){1, 2}, (__m128i)(__v2di){1, 0}), 0, 4)); +TEST_CONSTEXPR(match_v4di(_mm256_mask_sll_epi64((__m256i)(__v4di){99, 99, 99, 99}, 0x5, (__m256i)(__v4di){0, 1, 2, 3}, (__m128i)(__v2di){1, 0}), 0, 99, 4, 99)); +TEST_CONSTEXPR(match_v4di(_mm256_maskz_sll_epi64(0xA, (__m256i)(__v4di){0, 1, 2, 3}, (__m128i)(__v2di){1, 0}), 0, 2, 0, 6)); __m128i test_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A) { // CHECK-LABEL: test_mm_mask_slli_epi64 @@ -8478,8 +8494,12 @@ __m256i test_mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B) { // CHECK-LABEL: test_mm256_maskz_sra_epi32 // CHECK: @llvm.x86.avx2.psra.d // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}} - return _mm256_maskz_sra_epi32(__U, __A, __B); + return _mm256_maskz_sra_epi32(__U, __A, __B); } +TEST_CONSTEXPR(match_v4si(_mm_mask_sra_epi32((__m128i)(__v4si){99, 99, 99, 99}, 0x5, (__m128i)(__v4si){-2, 4, -6, 8}, (__m128i)(__v4si){1, 0, 0, 0}), -1, 99, -3, 99)); +TEST_CONSTEXPR(match_v4si(_mm_maskz_sra_epi32(0xA, (__m128i)(__v4si){-2, 4, -6, 8}, (__m128i)(__v4si){1, 0, 0, 0}), 0, 2, 0, 4)); +TEST_CONSTEXPR(match_v8si(_mm256_mask_sra_epi32((__m256i)(__v8si){99, 99, 99, 99, 99, 99, 99, 99}, 0x5A, (__m256i)(__v8si){0, -2, 4, -6, 8, -10, 12, -14}, (__m128i)(__v4si){1, 0, 0, 0}), 99, -1, 99, -3, 4, 99, 6, 99)); +TEST_CONSTEXPR(match_v8si(_mm256_maskz_sra_epi32(0xA5, (__m256i)(__v8si){0, -2, 4, -6, 8, -10, 12, -14}, (__m128i)(__v4si){1, 0, 0, 0}), 0, 0, 2, 0, 0, -5, 0, -7)); __m128i test_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A) { // CHECK-LABEL: test_mm_mask_srai_epi32 @@ -8542,8 +8562,10 @@ __m256i test_mm256_maskz_srai_epi32_2(__mmask8 __U, __m256i __A, unsigned int __ __m128i test_mm_sra_epi64(__m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_sra_epi64 // CHECK: @llvm.x86.avx512.psra.q.128 - return _mm_sra_epi64(__A, __B); + return _mm_sra_epi64(__A, __B); } +TEST_CONSTEXPR(match_v2di(_mm_sra_epi64((__m128i)(__v2di){-2, 4}, (__m128i)(__v2di){1, 0}), -1, 2)); +TEST_CONSTEXPR(match_v2di(_mm_sra_epi64((__m128i)(__v2di){-2, 4}, (__m128i)(__v2di){64, 0}), -1, 0)); __m128i test_mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_sra_epi64 @@ -8576,8 +8598,13 @@ __m256i test_mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B) { // CHECK-LABEL: test_mm256_maskz_sra_epi64 // CHECK: @llvm.x86.avx512.psra.q.256 // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}} - return _mm256_maskz_sra_epi64(__U, __A, __B); + return _mm256_maskz_sra_epi64(__U, __A, __B); } +TEST_CONSTEXPR(match_v2di(_mm_mask_sra_epi64((__m128i)(__v2di){99, 99}, 0x1, (__m128i)(__v2di){-2, 4}, (__m128i)(__v2di){1, 0}), -1, 99)); +TEST_CONSTEXPR(match_v2di(_mm_maskz_sra_epi64(0x2, (__m128i)(__v2di){-2, 4}, (__m128i)(__v2di){1, 0}), 0, 2)); +TEST_CONSTEXPR(match_v4di(_mm256_sra_epi64((__m256i)(__v4di){-2, 4, -6, 8}, (__m128i)(__v2di){1, 0}), -1, 2, -3, 4)); +TEST_CONSTEXPR(match_v4di(_mm256_mask_sra_epi64((__m256i)(__v4di){99, 99, 99, 99}, 0x5, (__m256i)(__v4di){0, -2, 4, -6}, (__m128i)(__v2di){1, 0}), 0, 99, 2, 99)); +TEST_CONSTEXPR(match_v4di(_mm256_maskz_sra_epi64(0xA, (__m256i)(__v4di){0, -2, 4, -6}, (__m128i)(__v2di){1, 0}), 0, -1, 0, -3)); __m128i test_mm_srai_epi64(__m128i __A) { // CHECK-LABEL: test_mm_srai_epi64 @@ -9078,6 +9105,7 @@ __m256 test_mm256_shuffle_f32x4(__m256 __A, __m256 __B) { // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> return _mm256_shuffle_f32x4(__A, __B, 3); } +TEST_CONSTEXPR(match_m256(_mm256_shuffle_f32x4(((__m256){1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}), ((__m256){10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0}), 1), 5.0, 6.0, 7.0, 8.0, 10.0, 20.0, 30.0, 40.0)); __m256 test_mm256_mask_shuffle_f32x4(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { // CHECK-LABEL: test_mm256_mask_shuffle_f32x4 @@ -9085,6 +9113,7 @@ __m256 test_mm256_mask_shuffle_f32x4(__m256 __W, __mmask8 __U, __m256 __A, __m25 // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_mask_shuffle_f32x4(__W, __U, __A, __B, 3); } +TEST_CONSTEXPR(match_m256(_mm256_mask_shuffle_f32x4(((__m256){100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0}), 0b10101010, ((__m256){1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}), ((__m256){10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0}), 1), 100.0, 6.0, 300.0, 8.0, 500.0, 20.0, 700.0, 40.0)); __m256 test_mm256_maskz_shuffle_f32x4(__mmask8 __U, __m256 __A, __m256 __B) { // CHECK-LABEL: test_mm256_maskz_shuffle_f32x4 @@ -9092,12 +9121,14 @@ __m256 test_mm256_maskz_shuffle_f32x4(__mmask8 __U, __m256 __A, __m256 __B) { // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_maskz_shuffle_f32x4(__U, __A, __B, 3); } +TEST_CONSTEXPR(match_m256(_mm256_maskz_shuffle_f32x4(0b01010101, ((__m256){1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}), ((__m256){10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0}), 1), 5.0, 0.0, 7.0, 0.0, 10.0, 0.0, 30.0, 0.0)); __m256d test_mm256_shuffle_f64x2(__m256d __A, __m256d __B) { // CHECK-LABEL: test_mm256_shuffle_f64x2 // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> return _mm256_shuffle_f64x2(__A, __B, 3); } +TEST_CONSTEXPR(match_m256d(_mm256_shuffle_f64x2(((__m256d){1.0, 2.0, 3.0, 4.0}), ((__m256d){10.0, 20.0, 30.0, 40.0}), 3), 3.0, 4.0, 30.0, 40.0)); __m256d test_mm256_mask_shuffle_f64x2(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: test_mm256_mask_shuffle_f64x2 @@ -9106,6 +9137,7 @@ __m256d test_mm256_mask_shuffle_f64x2(__m256d __W, __mmask8 __U, __m256d __A, __ // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_mask_shuffle_f64x2(__W, __U, __A, __B, 3); } +TEST_CONSTEXPR(match_m256d(_mm256_mask_shuffle_f64x2(((__m256d){100.0, 200.0, 300.0, 400.0}), 0b00001111, ((__m256d){1.0, 2.0, 3.0, 4.0}), ((__m256d){10.0, 20.0, 30.0, 40.0}), 3), 3.0, 4.0, 30.0, 40.0)); __m256d test_mm256_maskz_shuffle_f64x2(__mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: test_mm256_maskz_shuffle_f64x2 @@ -9114,12 +9146,14 @@ __m256d test_mm256_maskz_shuffle_f64x2(__mmask8 __U, __m256d __A, __m256d __B) { // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_maskz_shuffle_f64x2(__U, __A, __B, 3); } +TEST_CONSTEXPR(match_m256d(_mm256_maskz_shuffle_f64x2(0b00001011, ((__m256d){1.0, 2.0, 3.0, 4.0}), ((__m256d){10.0, 20.0, 30.0, 40.0}), 3), 3.0, 4.0, 0.0, 40.0)); __m256i test_mm256_shuffle_i32x4(__m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_shuffle_i32x4 // CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> return _mm256_shuffle_i32x4(__A, __B, 3); } +TEST_CONSTEXPR(match_v8si(_mm256_shuffle_i32x4(((__m256i)(__v8si){1, 2, 3, 4, 5, 6, 7, 8}), ((__m256i)(__v8si){10, 20, 30, 40, 50, 60, 70, 80}), 0), 1, 2, 3, 4, 10, 20, 30, 40)); __m256i test_mm256_mask_shuffle_i32x4(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_shuffle_i32x4 @@ -9127,6 +9161,7 @@ __m256i test_mm256_mask_shuffle_i32x4(__m256i __W, __mmask8 __U, __m256i __A, __ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}} return _mm256_mask_shuffle_i32x4(__W, __U, __A, __B, 3); } +TEST_CONSTEXPR(match_v8si(_mm256_mask_shuffle_i32x4(((__m256i)(__v8si){100, 200, 300, 400, 500, 600, 700, 800}), 0b00000000, ((__m256i)(__v8si){1, 2, 3, 4, 5, 6, 7, 8}), ((__m256i)(__v8si){10, 20, 30, 40, 50, 60, 70, 80}), 0), 100, 200, 300, 400, 500, 600, 700, 800)); __m256i test_mm256_maskz_shuffle_i32x4(__mmask8 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_shuffle_i32x4 @@ -9134,12 +9169,14 @@ __m256i test_mm256_maskz_shuffle_i32x4(__mmask8 __U, __m256i __A, __m256i __B) { // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}} return _mm256_maskz_shuffle_i32x4(__U, __A, __B, 3); } +TEST_CONSTEXPR(match_v8si(_mm256_maskz_shuffle_i32x4(0b11111111, ((__m256i)(__v8si){1, 2, 3, 4, 5, 6, 7, 8}), ((__m256i)(__v8si){10, 20, 30, 40, 50, 60, 70, 80}), 0), 1, 2, 3, 4, 10, 20, 30, 40)); __m256i test_mm256_shuffle_i64x2(__m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_shuffle_i64x2 // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> return _mm256_shuffle_i64x2(__A, __B, 3); } +TEST_CONSTEXPR(match_m256i(_mm256_shuffle_i64x2(((__m256i){1ULL, 2ULL, 3ULL, 4ULL}), ((__m256i){10ULL, 20ULL, 30ULL, 40ULL}), 2), 1ULL, 2ULL, 30ULL, 40ULL)); __m256i test_mm256_mask_shuffle_i64x2(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_shuffle_i64x2 @@ -9148,6 +9185,7 @@ __m256i test_mm256_mask_shuffle_i64x2(__m256i __W, __mmask8 __U, __m256i __A, __ // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}} return _mm256_mask_shuffle_i64x2(__W, __U, __A, __B, 3); } +TEST_CONSTEXPR(match_m256i(_mm256_mask_shuffle_i64x2(((__m256i){100ULL, 200ULL, 300ULL, 400ULL}), 0b00001101, ((__m256i){1ULL, 2ULL, 3ULL, 4ULL}), ((__m256i){10ULL, 20ULL, 30ULL, 40ULL}), 2), 1ULL, 200ULL, 30ULL, 40ULL)); __m256i test_mm256_maskz_shuffle_i64x2(__mmask8 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_shuffle_i64x2 @@ -9156,6 +9194,7 @@ __m256i test_mm256_maskz_shuffle_i64x2(__mmask8 __U, __m256i __A, __m256i __B) { // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}} return _mm256_maskz_shuffle_i64x2(__U, __A, __B, 3); } +TEST_CONSTEXPR(match_m256i(_mm256_maskz_shuffle_i64x2( 0b00000110, ((__m256i){1ULL, 2ULL, 3ULL, 4ULL}), ((__m256i){10ULL, 20ULL, 30ULL, 40ULL}), 2), 0ULL, 2ULL, 30ULL, 0ULL)); __m128d test_mm_mask_shuffle_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { // CHECK-LABEL: test_mm_mask_shuffle_pd diff --git a/clang/test/CodeGen/X86/avx512vlbf16-builtins.c b/clang/test/CodeGen/X86/avx512vlbf16-builtins.c index d59b254520774..a5adae0cbb935 100644 --- a/clang/test/CodeGen/X86/avx512vlbf16-builtins.c +++ b/clang/test/CodeGen/X86/avx512vlbf16-builtins.c @@ -9,6 +9,7 @@ // RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bf16 -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s #include +#include "builtin_test_helpers.h" __m128bh test_mm_cvtne2ps2bf16(__m128 A, __m128 B) { // CHECK-LABEL: test_mm_cvtne2ps2bf16 @@ -156,46 +157,49 @@ __bf16 test_mm_cvtness_sbh(float A) { __m128 test_mm_cvtpbh_ps(__m128bh A) { // CHECK-LABEL: test_mm_cvtpbh_ps - // CHECK: sext <4 x i16> %{{.*}} to <4 x i32> - // CHECK: call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %{{.*}}, i32 %{{.*}}) + // CHECK: fpext <8 x bfloat> %{{.*}} to <8 x float> + // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <4 x i32> return _mm_cvtpbh_ps(A); } +TEST_CONSTEXPR(match_m128(_mm_cvtpbh_ps((__m128bh){-8.0f, 16.0f, -32.0f, 64.0f, -0.0f, 1.0f, -2.0f, 4.0f}), -8.0f, 16.0f, -32.0f, 64.0f)); __m256 test_mm256_cvtpbh_ps(__m128bh A) { // CHECK-LABEL: test_mm256_cvtpbh_ps - // CHECK: sext <8 x i16> %{{.*}} to <8 x i32> - // CHECK: call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %{{.*}}, i32 %{{.*}}) + // CHECK: fpext <8 x bfloat> %{{.*}} to <8 x float> return _mm256_cvtpbh_ps(A); } +TEST_CONSTEXPR(match_m256(_mm256_cvtpbh_ps((__m128bh){-0.0f, 1.0f, -2.0f, 4.0f, -8.0f, 16.0f, -32.0f, 64.0f}), -0.0f, 1.0f, -2.0f, 4.0f, -8.0f, 16.0f, -32.0f, 64.0f)); __m128 test_mm_maskz_cvtpbh_ps(__mmask8 M, __m128bh A) { // CHECK-LABEL: test_mm_maskz_cvtpbh_ps - // CHECK: sext <4 x i16> %{{.*}} to <4 x i32> - // CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}} - // CHECK: call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %{{.*}}, i32 %{{.*}}) + // CHECK: fpext <8 x bfloat> %{{.*}} to <8 x float> + // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <4 x i32> + // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}} return _mm_maskz_cvtpbh_ps(M, A); } +TEST_CONSTEXPR(match_m128(_mm_maskz_cvtpbh_ps(0x01, (__m128bh){-0.0f, 1.0f, -2.0f, 4.0f, -8.0f, 16.0f, -32.0f, 64.0f}), -0.0f, 0.0f, 0.0f, 0.0f)); __m256 test_mm256_maskz_cvtpbh_ps(__mmask8 M, __m128bh A) { // CHECK-LABEL: test_mm256_maskz_cvtpbh_ps - // CHECK: sext <8 x i16> %{{.*}} to <8 x i32> - // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}} - // CHECK: call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %{{.*}}, i32 %{{.*}}) + // CHECK: fpext <8 x bfloat> %{{.*}} to <8 x float> + // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_maskz_cvtpbh_ps(M, A); } +TEST_CONSTEXPR(match_m256(_mm256_maskz_cvtpbh_ps(0x73, (__m128bh){-0.0f, 1.0f, -2.0f, 4.0f, -8.0f, 16.0f, -32.0f, 64.0f}), -0.0f, 1.0f, 0.0f, 0.0f, -8.0f, 16.0f, -32.0f, 0.0f)); __m128 test_mm_mask_cvtpbh_ps(__m128 S, __mmask8 M, __m128bh A) { // CHECK-LABEL: test_mm_mask_cvtpbh_ps - // CHECK: sext <4 x i16> %{{.*}} to <4 x i32> - // CHECK: call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %{{.*}}, i32 %{{.*}}) - // CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}} + // CHECK: fpext <8 x bfloat> %{{.*}} to <8 x float> + // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <4 x i32> + // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}} return _mm_mask_cvtpbh_ps(S, M, A); } +TEST_CONSTEXPR(match_m128(_mm_mask_cvtpbh_ps((__m128){ 99.0f, 99.0f, 99.0f, 99.0f }, 0x03, (__m128bh){-0.0f, 1.0f, -2.0f, 4.0f, -8.0f, 16.0f, -32.0f, 64.0f}), -0.0f, 1.0f, 99.0f, 99.0f)); __m256 test_mm256_mask_cvtpbh_ps(__m256 S, __mmask8 M, __m128bh A) { // CHECK-LABEL: test_mm256_mask_cvtpbh_ps - // CHECK: sext <8 x i16> %{{.*}} to <8 x i32> - // CHECK: call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %{{.*}}, i32 %{{.*}}) - // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}} + // CHECK: fpext <8 x bfloat> %{{.*}} to <8 x float> + // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_mask_cvtpbh_ps(S, M, A); } +TEST_CONSTEXPR(match_m256(_mm256_mask_cvtpbh_ps((__m256){ 99.0f, 99.0f, 99.0f, 99.0f, 99.0f, 99.0f, 99.0f, 99.0f }, 0x37, (__m128bh){-0.0f, 1.0f, -2.0f, 4.0f, -8.0f, 16.0f, -32.0f, 64.0f}), -0.0f, 1.0f, -2.0f, 99.0f, -8.0f, 16.0f, 99.0f, 99.0f)); diff --git a/clang/test/CodeGen/X86/avx512vlbw-builtins.c b/clang/test/CodeGen/X86/avx512vlbw-builtins.c index 16e113031bfda..f6f27d9c3da3d 100644 --- a/clang/test/CodeGen/X86/avx512vlbw-builtins.c +++ b/clang/test/CodeGen/X86/avx512vlbw-builtins.c @@ -475,6 +475,7 @@ __mmask8 test_mm_cmplt_epu16_mask(__m128i __a, __m128i __b) { // CHECK: icmp ult <8 x i16> %{{.*}}, %{{.*}} return (__mmask8)_mm_cmplt_epu16_mask(__a, __b); } +TEST_CONSTEXPR(_mm_cmplt_epu16_mask(((__m128i)(__v8hu){12351, 47995, 11802, 16970, 16956, 13965, 33529, 18928}), ((__m128i)(__v8hu){48792, 59915, 50576, 62643, 3758, 16415, 7966, 39475})) == (__mmask8)0xAF); __mmask8 test_mm_mask_cmplt_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) { // CHECK-LABEL: test_mm_mask_cmplt_epu16_mask diff --git a/clang/test/CodeGen/X86/f16c-builtins.c b/clang/test/CodeGen/X86/f16c-builtins.c index c08ef76d56981..2ae4bc857b431 100755 --- a/clang/test/CodeGen/X86/f16c-builtins.c +++ b/clang/test/CodeGen/X86/f16c-builtins.c @@ -46,6 +46,31 @@ __m128 test_mm_cvtph_ps(__m128i a) { return _mm_cvtph_ps(a); } +// A value exactly halfway between 1.0 and the next representable FP16 number. +// In binary, its significand ends in ...000, followed by a tie-bit 1. +#define POS_HALFWAY (1.0f + 0.00048828125f) // 1.0 + 2^-11, a tie-breaking case + +// +// _mm_cvtps_ph (128-bit, 4 floats -> 8 shorts, 4 are zero-padded) +// +// Test values: -2.5f, 1.123f, POS_HALFWAY +TEST_CONSTEXPR(match_v8hi( + _mm_cvtps_ph(_mm_setr_ps(-2.5f, 1.123f, POS_HALFWAY, 0.0f), _MM_FROUND_TO_NEAREST_INT), + 0xC100, 0x3C7E, 0x3C00, 0x0000, 0, 0, 0, 0 +)); +TEST_CONSTEXPR(match_v8hi( + _mm_cvtps_ph(_mm_setr_ps(-2.5f, 1.123f, POS_HALFWAY, 0.0f), _MM_FROUND_TO_NEG_INF), + 0xC100, 0x3C7D, 0x3C00, 0x0000, 0, 0, 0, 0 +)); +TEST_CONSTEXPR(match_v8hi( + _mm_cvtps_ph(_mm_setr_ps(-2.5f, 1.123f, POS_HALFWAY, 0.0f), _MM_FROUND_TO_POS_INF), + 0xC100, 0x3C7E, 0x3C01, 0x0000, 0, 0, 0, 0 +)); +TEST_CONSTEXPR(match_v8hi( + _mm_cvtps_ph(_mm_setr_ps(-2.5f, 1.123f, POS_HALFWAY, 0.0f), _MM_FROUND_TO_ZERO), + 0xC100, 0x3C7D, 0x3C00, 0x0000, 0, 0, 0, 0 +)); + __m256 test_mm256_cvtph_ps(__m128i a) { // CHECK-LABEL: test_mm256_cvtph_ps // CHECK: fpext <8 x half> %{{.*}} to <8 x float> @@ -56,12 +81,44 @@ TEST_CONSTEXPR(match_m256( 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 0.5f, -2.0f, 0.0f )); +// +// _mm256_cvtps_ph (256-bit, 8 floats -> 8 shorts) +// +// Test values: -2.5f, 1.123f, POS_HALFWAY +TEST_CONSTEXPR(match_v8hi( + _mm256_cvtps_ph(_mm256_setr_ps(-2.5f, 1.123f, POS_HALFWAY, 0.0f, -2.5f, 1.123f, POS_HALFWAY, 0.0f), _MM_FROUND_TO_NEAREST_INT), + 0xC100, 0x3C7E, 0x3C00, 0x0000, 0xC100, 0x3C7E, 0x3C00, 0x0000 +)); +TEST_CONSTEXPR(match_v8hi( + _mm256_cvtps_ph(_mm256_setr_ps(-2.5f, 1.123f, POS_HALFWAY, 0.0f, -2.5f, 1.123f, POS_HALFWAY, 0.0f), _MM_FROUND_TO_NEG_INF), + 0xC100, 0x3C7D, 0x3C00, 0x0000, 0xC100, 0x3C7D, 0x3C00, 0x0000 +)); +TEST_CONSTEXPR(match_v8hi( + _mm256_cvtps_ph(_mm256_setr_ps(-2.5f, 1.123f, POS_HALFWAY, 0.0f, -2.5f, 1.123f, POS_HALFWAY, 0.0f), _MM_FROUND_TO_POS_INF), + 0xC100, 0x3C7E, 0x3C01, 0x0000, 0xC100, 0x3C7E, 0x3C01, 0x0000 +)); +TEST_CONSTEXPR(match_v8hi( + _mm256_cvtps_ph(_mm256_setr_ps(-2.5f, 1.123f, POS_HALFWAY, 0.0f, -2.5f, 1.123f, POS_HALFWAY, 0.0f), _MM_FROUND_TO_ZERO), + 0xC100, 0x3C7D, 0x3C00, 0x0000, 0xC100, 0x3C7D, 0x3C00, 0x0000 +)); + __m128i test_mm_cvtps_ph(__m128 a) { // CHECK-LABEL: test_mm_cvtps_ph // CHECK: call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %{{.*}}, i32 0) return _mm_cvtps_ph(a, 0); } +// +// Tests for Exact Dynamic Rounding +// +// Test that dynamic rounding SUCCEEDS for exactly representable values. +// We use _MM_FROUND_CUR_DIRECTION (value 4) to specify dynamic rounding. +// Inputs: -2.5f, 0.125f, -16.0f are all exactly representable in FP16. +TEST_CONSTEXPR(match_v8hi( + __builtin_ia32_vcvtps2ph256(_mm256_setr_ps(-2.5f, 0.125f, -16.0f, 0.0f, -2.5f, 0.125f, -16.0f, 0.0f), _MM_FROUND_CUR_DIRECTION), + 0xC100, 0x3000, 0xCC00, 0x0000, 0xC100, 0x3000, 0xCC00, 0x0000 +)); + __m128i test_mm256_cvtps_ph(__m256 a) { // CHECK-LABEL: test_mm256_cvtps_ph // CHECK: call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %{{.*}}, i32 0) diff --git a/clang/test/CodeGen/X86/mmx-builtins.c b/clang/test/CodeGen/X86/mmx-builtins.c index ad8a81c61ad43..37d6306ecdb7d 100644 --- a/clang/test/CodeGen/X86/mmx-builtins.c +++ b/clang/test/CodeGen/X86/mmx-builtins.c @@ -632,18 +632,24 @@ __m64 test_mm_sll_pi16(__m64 a, __m64 b) { // CHECK: call <8 x i16> @llvm.x86.sse2.psll.w( return _mm_sll_pi16(a, b); } +TEST_CONSTEXPR(match_v4hi(_mm_sll_pi16((__m64)(__v4hi){1, 2, 3, 4}, (__m64)(__v4hi){1, 0, 0, 0}), 2, 4, 6, 8)); +TEST_CONSTEXPR(match_v4hi(_mm_sll_pi16((__m64)(__v4hi){1, 2, 3, 4}, (__m64)(__v4hi){16, 0, 0, 0}), 0, 0, 0, 0)); __m64 test_mm_sll_pi32(__m64 a, __m64 b) { // CHECK-LABEL: test_mm_sll_pi32 // CHECK: call <4 x i32> @llvm.x86.sse2.psll.d( return _mm_sll_pi32(a, b); } +TEST_CONSTEXPR(match_v2si(_mm_sll_pi32((__m64)(__v2si){1, 2}, (__m64)(__v2si){1, 0}), 2, 4)); +TEST_CONSTEXPR(match_v2si(_mm_sll_pi32((__m64)(__v2si){1, 2}, (__m64)(__v2si){32, 0}), 0, 0)); __m64 test_mm_sll_si64(__m64 a, __m64 b) { // CHECK-LABEL: test_mm_sll_si64 // CHECK: call <2 x i64> @llvm.x86.sse2.psll.q( return _mm_sll_si64(a, b); } +TEST_CONSTEXPR(match_v1di(_mm_sll_si64((__m64)(__v1di){1}, (__m64)(__v1di){1}), 2)); +TEST_CONSTEXPR(match_v1di(_mm_sll_si64((__m64)(__v1di){1}, (__m64)(__v1di){64}), 0)); __m64 test_mm_slli_pi16(__m64 a) { // CHECK-LABEL: test_mm_slli_pi16 @@ -685,12 +691,16 @@ __m64 test_mm_sra_pi16(__m64 a, __m64 b) { // CHECK: call <8 x i16> @llvm.x86.sse2.psra.w( return _mm_sra_pi16(a, b); } +TEST_CONSTEXPR(match_v4hi(_mm_sra_pi16((__m64)(__v4hi){-16, 16, -8, 8}, (__m64)(__v4hi){1, 0, 0, 0}), -8, 8, -4, 4)); +TEST_CONSTEXPR(match_v4hi(_mm_sra_pi16((__m64)(__v4hi){-16, 16, -8, 8}, (__m64)(__v4hi){16, 0, 0, 0}), -1, 0, -1, 0)); __m64 test_mm_sra_pi32(__m64 a, __m64 b) { // CHECK-LABEL: test_mm_sra_pi32 // CHECK: call <4 x i32> @llvm.x86.sse2.psra.d( return _mm_sra_pi32(a, b); } +TEST_CONSTEXPR(match_v2si(_mm_sra_pi32((__m64)(__v2si){-16, 16}, (__m64)(__v2si){1, 0}), -8, 8)); +TEST_CONSTEXPR(match_v2si(_mm_sra_pi32((__m64)(__v2si){-16, 16}, (__m64)(__v2si){32, 0}), -1, 0)); __m64 test_mm_srai_pi16(__m64 a) { // CHECK-LABEL: test_mm_srai_pi16 @@ -722,18 +732,24 @@ __m64 test_mm_srl_pi16(__m64 a, __m64 b) { // CHECK: call <8 x i16> @llvm.x86.sse2.psrl.w( return _mm_srl_pi16(a, b); } +TEST_CONSTEXPR(match_v4hu(_mm_srl_pi16((__m64)(__v4hu){0x8000, 16, 8, 4}, (__m64)(__v4hi){1, 0, 0, 0}), 0x4000, 8, 4, 2)); +TEST_CONSTEXPR(match_v4hi(_mm_srl_pi16((__m64)(__v4hi){-1, 16, 8, 4}, (__m64)(__v4hi){16, 0, 0, 0}), 0, 0, 0, 0)); __m64 test_mm_srl_pi32(__m64 a, __m64 b) { // CHECK-LABEL: test_mm_srl_pi32 // CHECK: call <4 x i32> @llvm.x86.sse2.psrl.d( return _mm_srl_pi32(a, b); } +TEST_CONSTEXPR(match_v2su(_mm_srl_pi32((__m64)(__v2su){0x80000000, 16}, (__m64)(__v2si){1, 0}), 0x40000000, 8)); +TEST_CONSTEXPR(match_v2si(_mm_srl_pi32((__m64)(__v2si){-1, 16}, (__m64)(__v2si){32, 0}), 0, 0)); __m64 test_mm_srl_si64(__m64 a, __m64 b) { // CHECK-LABEL: test_mm_srl_si64 // CHECK: call <2 x i64> @llvm.x86.sse2.psrl.q( return _mm_srl_si64(a, b); } +TEST_CONSTEXPR(match_v1du(_mm_srl_si64((__m64)(__v1du){0x8000000000000000ULL}, (__m64)(__v1di){1}), 0x4000000000000000ULL)); +TEST_CONSTEXPR(match_v1di(_mm_srl_si64((__m64)(__v1di){-1}, (__m64)(__v1di){64}), 0)); __m64 test_mm_srli_pi16(__m64 a) { // CHECK-LABEL: test_mm_srli_pi16 diff --git a/clang/test/CodeGen/X86/sse-builtins-constrained.c b/clang/test/CodeGen/X86/sse-builtins-constrained.c index 92240bbc5bb31..f3b8d20944bd4 100644 --- a/clang/test/CodeGen/X86/sse-builtins-constrained.c +++ b/clang/test/CodeGen/X86/sse-builtins-constrained.c @@ -28,11 +28,10 @@ __m128 test_mm_sqrt_ps(__m128 x) { __m128 test_sqrt_ss(__m128 x) { // COMMON-LABEL: test_sqrt_ss - // COMMONIR: extractelement <4 x float> {{.*}}, i64 0 + // COMMONIR: extractelement <4 x float> {{.*}}, i32 0 // UNCONSTRAINED: call float @llvm.sqrt.f32(float {{.*}}) // CONSTRAINED: call float @llvm.experimental.constrained.sqrt.f32(float {{.*}}, metadata !{{.*}}) // CHECK-ASM: sqrtss - // COMMONIR: insertelement <4 x float> {{.*}}, float {{.*}}, i64 0 + // COMMONIR: insertelement <4 x float> {{.*}}, float {{.*}}, i32 0 return _mm_sqrt_ss(x); } - diff --git a/clang/test/CodeGen/X86/sse-builtins.c b/clang/test/CodeGen/X86/sse-builtins.c index 6c5297e45dc82..fd4775739fad8 100644 --- a/clang/test/CodeGen/X86/sse-builtins.c +++ b/clang/test/CodeGen/X86/sse-builtins.c @@ -751,9 +751,9 @@ __m128 test_mm_sqrt_ps(__m128 x) { __m128 test_mm_sqrt_ss(__m128 x) { // CHECK-LABEL: test_mm_sqrt_ss - // CHECK: extractelement <4 x float> {{.*}}, i64 0 + // CHECK: extractelement <4 x float> {{.*}}, i32 0 // CHECK: call float @llvm.sqrt.f32(float {{.*}}) - // CHECK: insertelement <4 x float> {{.*}}, float {{.*}}, i64 0 + // CHECK: insertelement <4 x float> {{.*}}, float {{.*}}, i32 0 return _mm_sqrt_ss(x); } diff --git a/clang/test/CodeGen/X86/sse2-builtins-constrained.c b/clang/test/CodeGen/X86/sse2-builtins-constrained.c index 587fd3aa7c92f..a4a0829720501 100644 --- a/clang/test/CodeGen/X86/sse2-builtins-constrained.c +++ b/clang/test/CodeGen/X86/sse2-builtins-constrained.c @@ -28,11 +28,10 @@ __m128d test_mm_sqrt_pd(__m128d x) { __m128d test_sqrt_sd(__m128d x, __m128d y) { // COMMON-LABEL: test_sqrt_sd - // COMMONIR: extractelement <2 x double> {{.*}}, i64 0 + // COMMONIR: extractelement <2 x double> {{.*}}, i32 0 // UNCONSTRAINED: call double @llvm.sqrt.f64(double {{.*}}) // CONSTRAINED: call double @llvm.experimental.constrained.sqrt.f64(double {{.*}}, metadata !{{.*}}) // CHECK-ASM: sqrtsd - // COMMONIR: insertelement <2 x double> {{.*}}, double {{.*}}, i64 0 + // COMMONIR: insertelement <2 x double> {{.*}}, double {{.*}}, i32 0 return _mm_sqrt_sd(x, y); } - diff --git a/clang/test/CodeGen/X86/sse2-builtins.c b/clang/test/CodeGen/X86/sse2-builtins.c index 379ae48995d26..ed1ac84b8c4a3 100644 --- a/clang/test/CodeGen/X86/sse2-builtins.c +++ b/clang/test/CodeGen/X86/sse2-builtins.c @@ -1336,18 +1336,31 @@ __m128i test_mm_sll_epi16(__m128i A, __m128i B) { // CHECK: call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) return _mm_sll_epi16(A, B); } +TEST_CONSTEXPR(match_v8hi(_mm_sll_epi16((__m128i)(__v8hi){1, 2, 3, 4, 5, 6, 7, 8}, (__m128i)(__v8hi){1, 0, 0, 0, 0, 0, 0, 0}), 2, 4, 6, 8, 10, 12, 14, 16)); +TEST_CONSTEXPR(match_v8hi(_mm_sll_epi16((__m128i)(__v8hi){1, 2, 3, 4, 5, 6, 7, 8}, (__m128i)(__v8hi){16, 0, 0, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v8hi(_mm_sll_epi16((__m128i)(__v8hi){1, 2, 3, 4, 5, 6, 7, 8}, (__m128i)(__v8hi){0, 1, 0, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v8hi(_mm_sll_epi16((__m128i)(__v8hi){1, 2, 3, 4, 5, 6, 7, 8}, (__m128i)(__v8hi){0, 0, 1, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v8hi(_mm_sll_epi16((__m128i)(__v8hi){1, 2, 3, 4, 5, 6, 7, 8}, (__m128i)(__v8hi){0, 0, 0, 1, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v8hi(_mm_sll_epi16((__m128i)(__v8hi){1, 2, 3, 4, 5, 6, 7, 8}, (__m128i)(__v8hi){1, 0, 0, 0, 1, 1, 1, 1}), 2, 4, 6, 8, 10, 12, 14, 16)); __m128i test_mm_sll_epi32(__m128i A, __m128i B) { // CHECK-LABEL: test_mm_sll_epi32 // CHECK: call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) return _mm_sll_epi32(A, B); } +TEST_CONSTEXPR(match_v4si(_mm_sll_epi32((__m128i)(__v4si){1, 2, 3, 4}, (__m128i)(__v4si){1, 0, 0, 0}), 2, 4, 6, 8)); +TEST_CONSTEXPR(match_v4si(_mm_sll_epi32((__m128i)(__v4si){1, 2, 3, 4}, (__m128i)(__v4si){32, 0, 0, 0}), 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v4si(_mm_sll_epi32((__m128i)(__v4si){1, 2, 3, 4}, (__m128i)(__v4si){0, 1, 0, 0}), 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v4si(_mm_sll_epi32((__m128i)(__v4si){1, 2, 3, 4}, (__m128i)(__v4si){1, 0, 1, 1}), 2, 4, 6, 8)); __m128i test_mm_sll_epi64(__m128i A, __m128i B) { // CHECK-LABEL: test_mm_sll_epi64 // CHECK: call {{.*}}<2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) return _mm_sll_epi64(A, B); } +TEST_CONSTEXPR(match_v2di(_mm_sll_epi64((__m128i)(__v2di){1, 2}, (__m128i)(__v2di){1, 0}), 2, 4)); +TEST_CONSTEXPR(match_v2di(_mm_sll_epi64((__m128i)(__v2di){1, 2}, (__m128i)(__v2di){64, 0}), 0, 0)); +TEST_CONSTEXPR(match_v2di(_mm_sll_epi64((__m128i)(__v2di){1, 2}, (__m128i)(__v2di){1, 1}), 2, 4)); __m128i test_mm_slli_epi16(__m128i A) { // CHECK-LABEL: test_mm_slli_epi16 @@ -1440,9 +1453,10 @@ __m128d test_mm_sqrt_pd(__m128d A) { __m128d test_mm_sqrt_sd(__m128d A, __m128d B) { // CHECK-LABEL: test_mm_sqrt_sd - // CHECK: extractelement <2 x double> %{{.*}}, i64 0 + // CHECK: extractelement <2 x double> %{{.*}}, i32 0 // CHECK: call double @llvm.sqrt.f64(double {{.*}}) - // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0 + // CHECK: %[[sqrt_vec:.*]] = insertelement <2 x double> poison, double %{{.*}}, i32 0 + // CHECK: insertelement <2 x double> %[[sqrt_vec]], double %{{.*}}, i32 1 return _mm_sqrt_sd(A, B); } @@ -1451,12 +1465,22 @@ __m128i test_mm_sra_epi16(__m128i A, __m128i B) { // CHECK: call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) return _mm_sra_epi16(A, B); } +TEST_CONSTEXPR(match_v8hi(_mm_sra_epi16((__m128i)(__v8hi){-16, 16, -8, 8, -4, 4, -2, 2}, (__m128i)(__v8hi){1, 0, 0, 0, 0, 0, 0, 0}), -8, 8, -4, 4, -2, 2, -1, 1)); +TEST_CONSTEXPR(match_v8hi(_mm_sra_epi16((__m128i)(__v8hi){-16, 16, -8, 8, -4, 4, -2, 2}, (__m128i)(__v8hi){16, 0, 0, 0, 0, 0, 0, 0}), -1, 0, -1, 0, -1, 0, -1, 0)); +TEST_CONSTEXPR(match_v8hi(_mm_sra_epi16((__m128i)(__v8hi){-16, 16, -8, 8, -4, 4, -2, 2}, (__m128i)(__v8hi){0, 1, 0, 0, 0, 0, 0, 0}), -1, 0, -1, 0, -1, 0, -1, 0)); +TEST_CONSTEXPR(match_v8hi(_mm_sra_epi16((__m128i)(__v8hi){-16, 16, -8, 8, -4, 4, -2, 2}, (__m128i)(__v8hi){0, 0, 1, 0, 0, 0, 0, 0}), -1, 0, -1, 0, -1, 0, -1, 0)); +TEST_CONSTEXPR(match_v8hi(_mm_sra_epi16((__m128i)(__v8hi){-16, 16, -8, 8, -4, 4, -2, 2}, (__m128i)(__v8hi){0, 0, 0, 1, 0, 0, 0, 0}), -1, 0, -1, 0, -1, 0, -1, 0)); +TEST_CONSTEXPR(match_v8hi(_mm_sra_epi16((__m128i)(__v8hi){-16, 16, -8, 8, -4, 4, -2, 2}, (__m128i)(__v8hi){1, 0, 0, 0, 1, 1, 1, 1}), -8, 8, -4, 4, -2, 2, -1, 1)); __m128i test_mm_sra_epi32(__m128i A, __m128i B) { // CHECK-LABEL: test_mm_sra_epi32 // CHECK: call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) return _mm_sra_epi32(A, B); } +TEST_CONSTEXPR(match_v4si(_mm_sra_epi32((__m128i)(__v4si){-16, 16, -8, 8}, (__m128i)(__v4si){1, 0, 0, 0}), -8, 8, -4, 4)); +TEST_CONSTEXPR(match_v4si(_mm_sra_epi32((__m128i)(__v4si){-16, 16, -8, 8}, (__m128i)(__v4si){32, 0, 0, 0}), -1, 0, -1, 0)); +TEST_CONSTEXPR(match_v4si(_mm_sra_epi32((__m128i)(__v4si){-16, 16, -8, 8}, (__m128i)(__v4si){0, 1, 0, 0}), -1, 0, -1, 0)); +TEST_CONSTEXPR(match_v4si(_mm_sra_epi32((__m128i)(__v4si){-16, 16, -8, 8}, (__m128i)(__v4si){1, 0, 1, 1}), -8, 8, -4, 4)); __m128i test_mm_srai_epi16(__m128i A) { // CHECK-LABEL: test_mm_srai_epi16 @@ -1502,18 +1526,31 @@ __m128i test_mm_srl_epi16(__m128i A, __m128i B) { // CHECK: call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) return _mm_srl_epi16(A, B); } +TEST_CONSTEXPR(match_v8hu(_mm_srl_epi16((__m128i)(__v8hu){0x8000, 16, 8, 4, 2, 1, 0, 0xFFFF}, (__m128i)(__v8hi){1, 0, 0, 0, 0, 0, 0, 0}), 0x4000, 8, 4, 2, 1, 0, 0, 0x7FFF)); +TEST_CONSTEXPR(match_v8hi(_mm_srl_epi16((__m128i)(__v8hi){-1, 16, 8, 4, 2, 1, 0, -1}, (__m128i)(__v8hi){16, 0, 0, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v8hi(_mm_srl_epi16((__m128i)(__v8hi){-1, 16, 8, 4, 2, 1, 0, -1}, (__m128i)(__v8hi){0, 1, 0, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v8hi(_mm_srl_epi16((__m128i)(__v8hi){-1, 16, 8, 4, 2, 1, 0, -1}, (__m128i)(__v8hi){0, 0, 1, 0, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v8hi(_mm_srl_epi16((__m128i)(__v8hi){-1, 16, 8, 4, 2, 1, 0, -1}, (__m128i)(__v8hi){0, 0, 0, 1, 0, 0, 0, 0}), 0, 0, 0, 0, 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v8hu(_mm_srl_epi16((__m128i)(__v8hu){0x8000, 16, 8, 4, 2, 1, 0, 0xFFFF}, (__m128i)(__v8hi){1, 0, 0, 0, 1, 1, 1, 1}), 0x4000, 8, 4, 2, 1, 0, 0, 0x7FFF)); __m128i test_mm_srl_epi32(__m128i A, __m128i B) { // CHECK-LABEL: test_mm_srl_epi32 // CHECK: call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) return _mm_srl_epi32(A, B); } +TEST_CONSTEXPR(match_v4su(_mm_srl_epi32((__m128i)(__v4su){0x80000000, 16, 8, 4}, (__m128i)(__v4si){1, 0, 0, 0}), 0x40000000, 8, 4, 2)); +TEST_CONSTEXPR(match_v4si(_mm_srl_epi32((__m128i)(__v4si){-1, 16, 8, 4}, (__m128i)(__v4si){32, 0, 0, 0}), 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v4si(_mm_srl_epi32((__m128i)(__v4si){-1, 16, 8, 4}, (__m128i)(__v4si){0, 1, 0, 0}), 0, 0, 0, 0)); +TEST_CONSTEXPR(match_v4su(_mm_srl_epi32((__m128i)(__v4su){0x80000000, 16, 8, 4}, (__m128i)(__v4si){1, 0, 1, 1}), 0x40000000, 8, 4, 2)); __m128i test_mm_srl_epi64(__m128i A, __m128i B) { // CHECK-LABEL: test_mm_srl_epi64 // CHECK: call {{.*}}<2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) return _mm_srl_epi64(A, B); } +TEST_CONSTEXPR(match_v2du(_mm_srl_epi64((__m128i)(__v2du){0x8000000000000000ULL, 16}, (__m128i)(__v2di){1, 0}), 0x4000000000000000ULL, 8)); +TEST_CONSTEXPR(match_v2di(_mm_srl_epi64((__m128i)(__v2di){-1, 16}, (__m128i)(__v2di){64, 0}), 0, 0)); +TEST_CONSTEXPR(match_v2du(_mm_srl_epi64((__m128i)(__v2du){0x8000000000000000ULL, 16}, (__m128i)(__v2di){1, 1}), 0x4000000000000000ULL, 8)); __m128i test_mm_srli_epi16(__m128i A) { // CHECK-LABEL: test_mm_srli_epi16 diff --git a/clang/test/CodeGen/builtins-x86.c b/clang/test/CodeGen/builtins-x86.c index 31f309791c9f7..a1e63d59e88e1 100644 --- a/clang/test/CodeGen/builtins-x86.c +++ b/clang/test/CodeGen/builtins-x86.c @@ -282,8 +282,6 @@ void f0(void) { tmp_V4f = __builtin_ia32_rcpss(tmp_V4f); tmp_V4f = __builtin_ia32_rsqrtps(tmp_V4f); tmp_V4f = __builtin_ia32_rsqrtss(tmp_V4f); - tmp_V4f = __builtin_ia32_sqrtps(tmp_V4f); - tmp_V4f = __builtin_ia32_sqrtss(tmp_V4f); (void) __builtin_ia32_maskmovdqu(tmp_V16c, tmp_V16c, tmp_cp); tmp_i = __builtin_ia32_movmskpd(tmp_V2d); tmp_i = __builtin_ia32_pmovmskb128(tmp_V16c); @@ -292,8 +290,6 @@ void f0(void) { (void) __builtin_ia32_movnti64(tmp_LLip, tmp_LLi); #endif tmp_V2LLi = __builtin_ia32_psadbw128(tmp_V16c, tmp_V16c); - tmp_V2d = __builtin_ia32_sqrtpd(tmp_V2d); - tmp_V2d = __builtin_ia32_sqrtsd(tmp_V2d); tmp_V2LLi = __builtin_ia32_cvtpd2dq(tmp_V2d); tmp_V4f = __builtin_ia32_cvtpd2ps(tmp_V2d); tmp_V4i = __builtin_ia32_cvttpd2dq(tmp_V2d); @@ -400,8 +396,6 @@ void f0(void) { tmp_V4d = __builtin_ia32_vperm2f128_pd256(tmp_V4d, tmp_V4d, 0x7); tmp_V8f = __builtin_ia32_vperm2f128_ps256(tmp_V8f, tmp_V8f, 0x7); tmp_V8i = __builtin_ia32_vperm2f128_si256(tmp_V8i, tmp_V8i, 0x7); - tmp_V4d = __builtin_ia32_sqrtpd256(tmp_V4d); - tmp_V8f = __builtin_ia32_sqrtps256(tmp_V8f); tmp_V8f = __builtin_ia32_rsqrtps256(tmp_V8f); tmp_V8f = __builtin_ia32_rcpps256(tmp_V8f); tmp_V4d = __builtin_ia32_roundpd256(tmp_V4d, 0x1); diff --git a/clang/test/CodeGen/cfi-icall-trap-recover-runtime.c b/clang/test/CodeGen/cfi-icall-trap-recover-runtime.c index 117672a9d4368..2c44842f9d28e 100644 --- a/clang/test/CodeGen/cfi-icall-trap-recover-runtime.c +++ b/clang/test/CodeGen/cfi-icall-trap-recover-runtime.c @@ -9,6 +9,11 @@ // RUN: %clang_cc1 -fsanitize=cfi-icall -fno-sanitize-trap=cfi-icall -fsanitize-recover=cfi-icall -fsanitize-minimal-runtime -flto -fvisibility=hidden -triple x86_64-unknown-linux -fwhole-program-vtables -emit-llvm -o - %s | FileCheck --check-prefix=RECOVER_MIN %s +// RUN: %clang_cc1 -fsanitize=cfi-icall -fno-sanitize-trap=cfi-icall -fsanitize-recover=cfi-icall -fsanitize-minimal-runtime -fsanitize-handler-preserve-all-regs -flto -fvisibility=hidden -triple x86_64-unknown-linux -fwhole-program-vtables -emit-llvm -o - %s | FileCheck --check-prefix=PRESERVE_MIN %s + +// RUN: %clang_cc1 -fsanitize=cfi-icall -fno-sanitize-trap=cfi-icall -fsanitize-minimal-runtime -fsanitize-handler-preserve-all-regs -flto -fvisibility=hidden -triple x86_64-unknown-linux -fwhole-program-vtables -emit-llvm -o - %s | FileCheck --check-prefix=ABORT_MIN %s + + // TRAP-LABEL: define hidden void @f( // TRAP-SAME: ) #[[ATTR0:[0-9]+]] !type [[META6:![0-9]+]] !type [[META7:![0-9]+]] { // TRAP-NEXT: [[ENTRY:.*:]] @@ -34,6 +39,11 @@ // RECOVER_MIN-NEXT: [[ENTRY:.*:]] // RECOVER_MIN-NEXT: ret void // +// PRESERVE_MIN-LABEL: define hidden void @f( +// PRESERVE_MIN-SAME: ) #[[ATTR0:[0-9]+]] !type [[META6:![0-9]+]] !type [[META7:![0-9]+]] { +// PRESERVE_MIN-NEXT: [[ENTRY:.*:]] +// PRESERVE_MIN-NEXT: ret void +// void f() { } @@ -146,6 +156,27 @@ void xf(); // RECOVER_MIN-NEXT: call void (...) [[TMP2]]() // RECOVER_MIN-NEXT: ret void // +// PRESERVE_MIN-LABEL: define hidden void @g( +// PRESERVE_MIN-SAME: i32 noundef [[B:%.*]]) #[[ATTR0]] !type [[META8:![0-9]+]] !type [[META9:![0-9]+]] { +// PRESERVE_MIN-NEXT: [[ENTRY:.*:]] +// PRESERVE_MIN-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// PRESERVE_MIN-NEXT: [[FP:%.*]] = alloca ptr, align 8 +// PRESERVE_MIN-NEXT: store i32 [[B]], ptr [[B_ADDR]], align 4 +// PRESERVE_MIN-NEXT: [[TMP0:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// PRESERVE_MIN-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0 +// PRESERVE_MIN-NEXT: [[TMP1:%.*]] = zext i1 [[TOBOOL]] to i64 +// PRESERVE_MIN-NEXT: [[COND:%.*]] = select i1 [[TOBOOL]], ptr @f, ptr @xf +// PRESERVE_MIN-NEXT: store ptr [[COND]], ptr [[FP]], align 8 +// PRESERVE_MIN-NEXT: [[TMP2:%.*]] = load ptr, ptr [[FP]], align 8 +// PRESERVE_MIN-NEXT: [[TMP3:%.*]] = call i1 @llvm.type.test(ptr [[TMP2]], metadata !"_ZTSFvE"), !nosanitize [[META10:![0-9]+]] +// PRESERVE_MIN-NEXT: br i1 [[TMP3]], label %[[CONT:.*]], label %[[HANDLER_CFI_CHECK_FAIL:.*]], !prof [[PROF11:![0-9]+]], !nosanitize [[META10]] +// PRESERVE_MIN: [[HANDLER_CFI_CHECK_FAIL]]: +// PRESERVE_MIN-NEXT: call preserve_allcc void @__ubsan_handle_cfi_check_fail_minimal_preserve() #[[ATTR4:[0-9]+]], !nosanitize [[META10]] +// PRESERVE_MIN-NEXT: br label %[[CONT]], !nosanitize [[META10]] +// PRESERVE_MIN: [[CONT]]: +// PRESERVE_MIN-NEXT: call void (...) [[TMP2]]() +// PRESERVE_MIN-NEXT: ret void +// void g(int b) { void (*fp)() = b ? f : xf; fp(); @@ -186,3 +217,10 @@ void g(int b) { // RECOVER_MIN: [[META10]] = !{} // RECOVER_MIN: [[PROF11]] = !{!"branch_weights", i32 1048575, i32 1} //. +// PRESERVE_MIN: [[META6]] = !{i64 0, !"_ZTSFvE"} +// PRESERVE_MIN: [[META7]] = !{i64 0, !"_ZTSFvE.generalized"} +// PRESERVE_MIN: [[META8]] = !{i64 0, !"_ZTSFviE"} +// PRESERVE_MIN: [[META9]] = !{i64 0, !"_ZTSFviE.generalized"} +// PRESERVE_MIN: [[META10]] = !{} +// PRESERVE_MIN: [[PROF11]] = !{!"branch_weights", i32 1048575, i32 1} +//. diff --git a/clang/test/CodeGen/scoped-atomic-ops.c b/clang/test/CodeGen/scoped-atomic-ops.c index c39048120a457..b6dae3fa5f569 100644 --- a/clang/test/CodeGen/scoped-atomic-ops.c +++ b/clang/test/CodeGen/scoped-atomic-ops.c @@ -4539,6 +4539,111 @@ _Bool fi7e(_Bool *c) { return __scoped_atomic_exchange_n(c, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE); } + +// AMDGCN_CL_DEF-LABEL: define hidden void @fi8a( +// AMDGCN_CL_DEF-SAME: ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] { +// AMDGCN_CL_DEF-NEXT: [[ENTRY:.*:]] +// AMDGCN_CL_DEF-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) +// AMDGCN_CL_DEF-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) +// AMDGCN_CL_DEF-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4, addrspace(5) +// AMDGCN_CL_DEF-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4, addrspace(5) +// AMDGCN_CL_DEF-NEXT: [[DOTATOMICTMP1:%.*]] = alloca i32, align 4, addrspace(5) +// AMDGCN_CL_DEF-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca i32, align 4, addrspace(5) +// AMDGCN_CL_DEF-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr +// AMDGCN_CL_DEF-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr +// AMDGCN_CL_DEF-NEXT: [[DOTATOMICTMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP]] to ptr +// AMDGCN_CL_DEF-NEXT: [[ATOMIC_TEMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP]] to ptr +// AMDGCN_CL_DEF-NEXT: [[DOTATOMICTMP1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP1]] to ptr +// AMDGCN_CL_DEF-NEXT: [[ATOMIC_TEMP2_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP2]] to ptr +// AMDGCN_CL_DEF-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8 +// AMDGCN_CL_DEF-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8 +// AMDGCN_CL_DEF-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8 +// AMDGCN_CL_DEF-NEXT: store i32 -1, ptr [[DOTATOMICTMP_ASCAST]], align 4 +// AMDGCN_CL_DEF-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP_ASCAST]], align 4 +// AMDGCN_CL_DEF-NEXT: [[TMP2:%.*]] = atomicrmw uinc_wrap ptr [[TMP0]], i32 [[TMP1]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.no.remote.memory [[META3]] +// AMDGCN_CL_DEF-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP_ASCAST]], align 4 +// AMDGCN_CL_DEF-NEXT: [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP_ASCAST]], align 4 +// AMDGCN_CL_DEF-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8 +// AMDGCN_CL_DEF-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4 +// AMDGCN_CL_DEF-NEXT: [[TMP5:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8 +// AMDGCN_CL_DEF-NEXT: store i32 -1, ptr [[DOTATOMICTMP1_ASCAST]], align 4 +// AMDGCN_CL_DEF-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTATOMICTMP1_ASCAST]], align 4 +// AMDGCN_CL_DEF-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr [[TMP5]], i32 [[TMP6]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.no.remote.memory [[META3]] +// AMDGCN_CL_DEF-NEXT: store i32 [[TMP7]], ptr [[ATOMIC_TEMP2_ASCAST]], align 4 +// AMDGCN_CL_DEF-NEXT: [[TMP8:%.*]] = load i32, ptr [[ATOMIC_TEMP2_ASCAST]], align 4 +// AMDGCN_CL_DEF-NEXT: [[TMP9:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8 +// AMDGCN_CL_DEF-NEXT: store i32 [[TMP8]], ptr [[TMP9]], align 4 +// AMDGCN_CL_DEF-NEXT: ret void +// +// AMDGCN_CL_20-LABEL: define hidden void @fi8a( +// AMDGCN_CL_20-SAME: ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] { +// AMDGCN_CL_20-NEXT: [[ENTRY:.*:]] +// AMDGCN_CL_20-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) +// AMDGCN_CL_20-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) +// AMDGCN_CL_20-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4, addrspace(5) +// AMDGCN_CL_20-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4, addrspace(5) +// AMDGCN_CL_20-NEXT: [[DOTATOMICTMP1:%.*]] = alloca i32, align 4, addrspace(5) +// AMDGCN_CL_20-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca i32, align 4, addrspace(5) +// AMDGCN_CL_20-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr +// AMDGCN_CL_20-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr +// AMDGCN_CL_20-NEXT: [[DOTATOMICTMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP]] to ptr +// AMDGCN_CL_20-NEXT: [[ATOMIC_TEMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP]] to ptr +// AMDGCN_CL_20-NEXT: [[DOTATOMICTMP1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP1]] to ptr +// AMDGCN_CL_20-NEXT: [[ATOMIC_TEMP2_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP2]] to ptr +// AMDGCN_CL_20-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8 +// AMDGCN_CL_20-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8 +// AMDGCN_CL_20-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8 +// AMDGCN_CL_20-NEXT: store i32 -1, ptr [[DOTATOMICTMP_ASCAST]], align 4 +// AMDGCN_CL_20-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP_ASCAST]], align 4 +// AMDGCN_CL_20-NEXT: [[TMP2:%.*]] = atomicrmw uinc_wrap ptr [[TMP0]], i32 [[TMP1]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4]], !amdgpu.no.remote.memory [[META4]] +// AMDGCN_CL_20-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP_ASCAST]], align 4 +// AMDGCN_CL_20-NEXT: [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP_ASCAST]], align 4 +// AMDGCN_CL_20-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8 +// AMDGCN_CL_20-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4 +// AMDGCN_CL_20-NEXT: [[TMP5:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8 +// AMDGCN_CL_20-NEXT: store i32 -1, ptr [[DOTATOMICTMP1_ASCAST]], align 4 +// AMDGCN_CL_20-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTATOMICTMP1_ASCAST]], align 4 +// AMDGCN_CL_20-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr [[TMP5]], i32 [[TMP6]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4]], !amdgpu.no.remote.memory [[META4]] +// AMDGCN_CL_20-NEXT: store i32 [[TMP7]], ptr [[ATOMIC_TEMP2_ASCAST]], align 4 +// AMDGCN_CL_20-NEXT: [[TMP8:%.*]] = load i32, ptr [[ATOMIC_TEMP2_ASCAST]], align 4 +// AMDGCN_CL_20-NEXT: [[TMP9:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8 +// AMDGCN_CL_20-NEXT: store i32 [[TMP8]], ptr [[TMP9]], align 4 +// AMDGCN_CL_20-NEXT: ret void +// +// SPIRV-LABEL: define hidden spir_func void @fi8a( +// SPIRV-SAME: ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] { +// SPIRV-NEXT: [[ENTRY:.*:]] +// SPIRV-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// SPIRV-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8 +// SPIRV-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4 +// SPIRV-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4 +// SPIRV-NEXT: [[DOTATOMICTMP1:%.*]] = alloca i32, align 4 +// SPIRV-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca i32, align 4 +// SPIRV-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// SPIRV-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 +// SPIRV-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 +// SPIRV-NEXT: store i32 -1, ptr [[DOTATOMICTMP]], align 4 +// SPIRV-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4 +// SPIRV-NEXT: [[TMP2:%.*]] = atomicrmw uinc_wrap ptr [[TMP0]], i32 [[TMP1]] syncscope("device") monotonic, align 4 +// SPIRV-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4 +// SPIRV-NEXT: [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4 +// SPIRV-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 8 +// SPIRV-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4 +// SPIRV-NEXT: [[TMP5:%.*]] = load ptr, ptr [[A_ADDR]], align 8 +// SPIRV-NEXT: store i32 -1, ptr [[DOTATOMICTMP1]], align 4 +// SPIRV-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTATOMICTMP1]], align 4 +// SPIRV-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr [[TMP5]], i32 [[TMP6]] syncscope("device") monotonic, align 4 +// SPIRV-NEXT: store i32 [[TMP7]], ptr [[ATOMIC_TEMP2]], align 4 +// SPIRV-NEXT: [[TMP8:%.*]] = load i32, ptr [[ATOMIC_TEMP2]], align 4 +// SPIRV-NEXT: [[TMP9:%.*]] = load ptr, ptr [[A_ADDR]], align 8 +// SPIRV-NEXT: store i32 [[TMP8]], ptr [[TMP9]], align 4 +// SPIRV-NEXT: ret void +// +void fi8a(unsigned int *a, unsigned int *b) { + *b = __scoped_atomic_uinc_wrap(b, ~0U, __ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE); + *a = __scoped_atomic_udec_wrap(a, ~0U, __ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE); +} + //. // AMDGCN_CL_DEF: [[META3]] = !{} //. diff --git a/clang/test/CodeGenCUDA/cuda_weak_alias.cu b/clang/test/CodeGenCUDA/cuda_weak_alias.cu new file mode 100644 index 0000000000000..796493445d363 --- /dev/null +++ b/clang/test/CodeGenCUDA/cuda_weak_alias.cu @@ -0,0 +1,17 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --check-globals all --version 6 +// RUN: %clang_cc1 -x cuda -triple x86_64-unknown-linux-gnu -aux-triple nvptx64-nvidia-cuda -emit-llvm %s -o - | FileCheck %s --check-prefix=HOST + +extern "C" { + +//. +// HOST: @HostFunc = weak alias i32 (), ptr @__HostFunc +//. +// HOST-LABEL: define dso_local i32 @__HostFunc( +// HOST-SAME: ) #[[ATTR0:[0-9]+]] { +// HOST-NEXT: [[ENTRY:.*:]] +// HOST-NEXT: ret i32 42 +// +int __HostFunc(void) { return 42; } +int HostFunc(void) __attribute__ ((weak, alias("__HostFunc"))); + +} diff --git a/clang/test/CodeGenCXX/cfi-vcall-trap-recover-runtime.cpp b/clang/test/CodeGenCXX/cfi-vcall-trap-recover-runtime.cpp index 3e9328ac0e3ca..2451d31e9a489 100644 --- a/clang/test/CodeGenCXX/cfi-vcall-trap-recover-runtime.cpp +++ b/clang/test/CodeGenCXX/cfi-vcall-trap-recover-runtime.cpp @@ -9,6 +9,11 @@ // RUN: %clang_cc1 -fsanitize=cfi-vcall -fno-sanitize-trap=cfi-vcall -fsanitize-recover=cfi-vcall -fsanitize-minimal-runtime -flto -fvisibility=hidden -triple x86_64-unknown-linux -fwhole-program-vtables -emit-llvm -o - %s | FileCheck --check-prefix=RECOVER_MIN %s +// RUN: %clang_cc1 -fsanitize=cfi-vcall -fno-sanitize-trap=cfi-vcall -fsanitize-recover=cfi-vcall -fsanitize-minimal-runtime -flto -fvisibility=hidden -triple x86_64-unknown-linux -fwhole-program-vtables -fsanitize-handler-preserve-all-regs -emit-llvm -o - %s | FileCheck --check-prefix=PRESERVE_MIN %s + +// RUN: %clang_cc1 -fsanitize=cfi-vcall -fno-sanitize-trap=cfi-vcall -fsanitize-minimal-runtime -flto -fvisibility=hidden -triple x86_64-unknown-linux -fwhole-program-vtables -fsanitize-handler-preserve-all-regs -emit-llvm -o - %s | FileCheck --check-prefix=ABORT_MIN %s + + struct S1 { virtual void f(); }; @@ -111,6 +116,25 @@ struct S1 { // RECOVER_MIN-NEXT: call void [[TMP3]](ptr noundef nonnull align 8 dereferenceable(8) [[TMP0]]) // RECOVER_MIN-NEXT: ret void // +// PRESERVE_MIN-LABEL: define hidden void @_Z3s1fP2S1( +// PRESERVE_MIN-SAME: ptr noundef [[S1:%.*]]) #[[ATTR0:[0-9]+]] { +// PRESERVE_MIN-NEXT: [[ENTRY:.*:]] +// PRESERVE_MIN-NEXT: [[S1_ADDR:%.*]] = alloca ptr, align 8 +// PRESERVE_MIN-NEXT: store ptr [[S1]], ptr [[S1_ADDR]], align 8 +// PRESERVE_MIN-NEXT: [[TMP0:%.*]] = load ptr, ptr [[S1_ADDR]], align 8 +// PRESERVE_MIN-NEXT: [[VTABLE:%.*]] = load ptr, ptr [[TMP0]], align 8 +// PRESERVE_MIN-NEXT: [[TMP1:%.*]] = call i1 @llvm.type.test(ptr [[VTABLE]], metadata !"_ZTS2S1"), !nosanitize [[META5:![0-9]+]] +// PRESERVE_MIN-NEXT: [[TMP2:%.*]] = call i1 @llvm.type.test(ptr [[VTABLE]], metadata !"all-vtables"), !nosanitize [[META5]] +// PRESERVE_MIN-NEXT: br i1 [[TMP1]], label %[[CONT:.*]], label %[[HANDLER_CFI_CHECK_FAIL:.*]], !prof [[PROF6:![0-9]+]], !nosanitize [[META5]] +// PRESERVE_MIN: [[HANDLER_CFI_CHECK_FAIL]]: +// PRESERVE_MIN-NEXT: call preserve_allcc void @__ubsan_handle_cfi_check_fail_minimal_preserve() #[[ATTR3:[0-9]+]], !nosanitize [[META5]] +// PRESERVE_MIN-NEXT: br label %[[CONT]], !nosanitize [[META5]] +// PRESERVE_MIN: [[CONT]]: +// PRESERVE_MIN-NEXT: [[VFN:%.*]] = getelementptr inbounds ptr, ptr [[VTABLE]], i64 0 +// PRESERVE_MIN-NEXT: [[TMP3:%.*]] = load ptr, ptr [[VFN]], align 8 +// PRESERVE_MIN-NEXT: call void [[TMP3]](ptr noundef nonnull align 8 dereferenceable(8) [[TMP0]]) +// PRESERVE_MIN-NEXT: ret void +// void s1f(S1 *s1) { s1->f(); } @@ -130,3 +154,6 @@ void s1f(S1 *s1) { // RECOVER_MIN: [[META5]] = !{} // RECOVER_MIN: [[PROF6]] = !{!"branch_weights", i32 1048575, i32 1} //. +// PRESERVE_MIN: [[META5]] = !{} +// PRESERVE_MIN: [[PROF6]] = !{!"branch_weights", i32 1048575, i32 1} +//. diff --git a/clang/test/CodeGenHIP/hip_weak_alias.cpp b/clang/test/CodeGenHIP/hip_weak_alias.cpp new file mode 100644 index 0000000000000..3cc9a2bd09fc0 --- /dev/null +++ b/clang/test/CodeGenHIP/hip_weak_alias.cpp @@ -0,0 +1,125 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --check-globals all --version 6 +// REQUIRES: amdgpu-registered-target +// RUN: %clang_cc1 -x hip -triple x86_64-unknown-linux-gnu -aux-triple amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-host.bc +// RUN: %clang_cc1 -x hip -triple x86_64-unknown-linux-gnu -aux-triple amdgcn-amd-amdhsa -emit-llvm %s -o - | FileCheck %s --check-prefix=HOST +// RUN: %clang_cc1 -x hip -triple amdgcn-amd-amdhsa -aux-triple x86_64-unknown-linux-gnu -emit-llvm %s -fcuda-is-device -o - | FileCheck %s --check-prefix=DEVICE + +#define __device__ __attribute__((device)) +#define __host__ __attribute__((host)) + +extern "C" { +//. +// HOST: @HostFunc = weak alias i32 (), ptr @__HostFunc +// HOST: @HostFunc_ = alias i32 (), ptr @__HostFunc +// HOST: @HostVar = weak alias i32, ptr @__HostVar +// HOST: @HostVar_ = alias i32, ptr @__HostVar +// HOST: @Two = weak alias i32 (), ptr @__Two +// HOST: @Two_ = alias i32 (), ptr @__Two +// HOST: @_Z5Threev = weak alias i32 (), ptr @_Z7__Threev +// HOST: @_Z6Three_v = alias i32 (), ptr @_Z7__Threev +// HOST: @_Z4Fourv = weak alias i32 (), ptr @_Z6__Fourv +// HOST: @_Z4Fourf = weak alias float (float), ptr @_Z6__Fourf +//. +// DEVICE: @One = weak alias i32 (), ptr @__One +// DEVICE: @One_ = alias i32 (), ptr @__One +// DEVICE: @Two = weak alias i32 (), ptr @__Two +// DEVICE: @Two_ = alias i32 (), ptr @__Two +// DEVICE: @_Z5Threev = weak alias i32 (), ptr @_Z7__Threev +// DEVICE: @_Z6Three_v = alias i32 (), ptr @_Z7__Threev +// DEVICE: @_Z4Fourv = weak alias i32 (), ptr @_Z6__Fourv +// DEVICE: @_Z4Fourf = weak alias float (float), ptr @_Z6__Fourf +//. +// HOST-LABEL: define dso_local i32 @__HostFunc( +// HOST-SAME: ) #[[ATTR0:[0-9]+]] { +// HOST-NEXT: [[ENTRY:.*:]] +// HOST-NEXT: ret i32 42 +// +int __HostFunc(void) { return 42; } +int __HostVar = 1; +int HostFunc(void) __attribute__((weak, alias("__HostFunc"))); +int HostFunc_(void) __attribute__((alias("__HostFunc"))); +extern int __attribute__((weak, alias("__HostVar"))) HostVar; +extern int __attribute__((alias("__HostVar"))) HostVar_; + +// DEVICE-LABEL: define dso_local i32 @__One( +// DEVICE-SAME: ) #[[ATTR0:[0-9]+]] { +// DEVICE-NEXT: [[ENTRY:.*:]] +// DEVICE-NEXT: [[RETVAL:%.*]] = alloca i32, align 4, addrspace(5) +// DEVICE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr +// DEVICE-NEXT: ret i32 1 +// +__device__ int __One(void) { return 1; } +__device__ int One(void) __attribute__((weak, alias("__One"))); +__device__ int One_(void) __attribute__((alias("__One"))); + +// HOST-LABEL: define dso_local i32 @__Two( +// HOST-SAME: ) #[[ATTR0]] { +// HOST-NEXT: [[ENTRY:.*:]] +// HOST-NEXT: ret i32 2 +// +// DEVICE-LABEL: define dso_local i32 @__Two( +// DEVICE-SAME: ) #[[ATTR0]] { +// DEVICE-NEXT: [[ENTRY:.*:]] +// DEVICE-NEXT: [[RETVAL:%.*]] = alloca i32, align 4, addrspace(5) +// DEVICE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr +// DEVICE-NEXT: ret i32 2 +// +__host__ __device__ int __Two(void) { return 2; } +__host__ __device__ int Two(void) __attribute__((weak, alias("__Two"))); +__host__ __device__ int Two_(void) __attribute__((alias("__Two"))); +} + +// HOST-LABEL: define linkonce_odr noundef i32 @_Z7__Threev( +// HOST-SAME: ) #[[ATTR0]] comdat { +// HOST-NEXT: [[ENTRY:.*:]] +// HOST-NEXT: ret i32 5 +// +// DEVICE-LABEL: define linkonce_odr noundef i32 @_Z7__Threev( +// DEVICE-SAME: ) #[[ATTR0]] comdat { +// DEVICE-NEXT: [[ENTRY:.*:]] +// DEVICE-NEXT: [[RETVAL:%.*]] = alloca i32, align 4, addrspace(5) +// DEVICE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr +// DEVICE-NEXT: ret i32 5 +// +__host__ __device__ constexpr int __Three(void) { return 5; } +__host__ __device__ int Three(void) __attribute__((weak, alias("_Z7__Threev"))); +__host__ __device__ int Three_(void) __attribute__((alias("_Z7__Threev"))); + + +// HOST-LABEL: define dso_local noundef i32 @_Z6__Fourv( +// HOST-SAME: ) #[[ATTR0]] { +// HOST-NEXT: [[ENTRY:.*:]] +// HOST-NEXT: ret i32 2 +// +// DEVICE-LABEL: define dso_local noundef i32 @_Z6__Fourv( +// DEVICE-SAME: ) #[[ATTR0]] { +// DEVICE-NEXT: [[ENTRY:.*:]] +// DEVICE-NEXT: [[RETVAL:%.*]] = alloca i32, align 4, addrspace(5) +// DEVICE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr +// DEVICE-NEXT: ret i32 2 +// +__host__ __device__ int __Four(void) { return 2; } +// HOST-LABEL: define dso_local noundef float @_Z6__Fourf( +// HOST-SAME: float noundef [[F:%.*]]) #[[ATTR0]] { +// HOST-NEXT: [[ENTRY:.*:]] +// HOST-NEXT: [[F_ADDR:%.*]] = alloca float, align 4 +// HOST-NEXT: store float [[F]], ptr [[F_ADDR]], align 4 +// HOST-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4 +// HOST-NEXT: [[MUL:%.*]] = fmul contract float 2.000000e+00, [[TMP0]] +// HOST-NEXT: ret float [[MUL]] +// +// DEVICE-LABEL: define dso_local noundef float @_Z6__Fourf( +// DEVICE-SAME: float noundef [[F:%.*]]) #[[ATTR0]] { +// DEVICE-NEXT: [[ENTRY:.*:]] +// DEVICE-NEXT: [[RETVAL:%.*]] = alloca float, align 4, addrspace(5) +// DEVICE-NEXT: [[F_ADDR:%.*]] = alloca float, align 4, addrspace(5) +// DEVICE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr +// DEVICE-NEXT: [[F_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F_ADDR]] to ptr +// DEVICE-NEXT: store float [[F]], ptr [[F_ADDR_ASCAST]], align 4 +// DEVICE-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR_ASCAST]], align 4 +// DEVICE-NEXT: [[MUL:%.*]] = fmul contract float 2.000000e+00, [[TMP0]] +// DEVICE-NEXT: ret float [[MUL]] +// +__host__ __device__ float __Four(float f) { return 2.0f * f; } +__host__ __device__ int Four(void) __attribute__((weak, alias("_Z6__Fourv"))); +__host__ __device__ float Four(float f) __attribute__((weak, alias("_Z6__Fourf"))); diff --git a/clang/test/CodeGenHLSL/BasicFeatures/OutputArguments.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/OutputArguments.hlsl index d0ba8f447b732..ec03804ad1a4c 100644 --- a/clang/test/CodeGenHLSL/BasicFeatures/OutputArguments.hlsl +++ b/clang/test/CodeGenHLSL/BasicFeatures/OutputArguments.hlsl @@ -101,10 +101,16 @@ void funky(inout int3 X) { // Call the function with the temporary. // CHECK: call void {{.*}}funky{{.*}}(ptr noalias noundef nonnull align 16 dereferenceable(16) [[ArgTmp]]) -// Shuffle it back. +// Write it back. // CHECK: [[RetVal:%.*]] = load <3 x i32>, ptr [[ArgTmp]] -// CHECK: [[Vxyz:%.*]] = shufflevector <3 x i32> [[RetVal]], <3 x i32> poison, <3 x i32> -// CHECK: store <3 x i32> [[Vxyz]], ptr [[V]] +// CHECK: [[Src0:%.*]] = extractelement <3 x i32> [[RetVal]], i32 0 +// CHECK: [[PtrY:%.*]] = getelementptr <3 x i32>, ptr %V, i32 0, i32 1 +// CHECK: store i32 [[Src0]], ptr [[PtrY]], align 4 +// CHECK: [[Src1:%.*]] = extractelement <3 x i32> [[RetVal]], i32 1 +// CHECK: [[PtrZ:%.*]] = getelementptr <3 x i32>, ptr %V, i32 0, i32 2 +// CHECK: store i32 [[Src1]], ptr [[PtrZ]], align 4 +// CHECK: [[Src2:%.*]] = extractelement <3 x i32> [[RetVal]], i32 2 +// CHECK: store i32 [[Src2]], ptr %V, align 4 // OPT: ret <3 x i32> export int3 case4() { diff --git a/clang/test/CodeGenHLSL/builtins/ScalarSwizzles.hlsl b/clang/test/CodeGenHLSL/builtins/ScalarSwizzles.hlsl index 7804239edccae..270598265c660 100644 --- a/clang/test/CodeGenHLSL/builtins/ScalarSwizzles.hlsl +++ b/clang/test/CodeGenHLSL/builtins/ScalarSwizzles.hlsl @@ -259,9 +259,8 @@ bool AssignBool(bool V) { // CHECK-NEXT: [[B:%.*]] = load i32, ptr [[VAddr]], align 4 // CHECK-NEXT: [[LV1:%.*]] = trunc i32 [[B]] to i1 // CHECK-NEXT: [[D:%.*]] = zext i1 [[LV1]] to i32 -// CHECK-NEXT: [[C:%.*]] = load <2 x i32>, ptr [[X]], align 8 -// CHECK-NEXT: [[E:%.*]] = insertelement <2 x i32> [[C]], i32 [[D]], i32 1 -// CHECK-NEXT: store <2 x i32> [[E]], ptr [[X]], align 8 +// CHECK-NEXT: [[C:%.*]] = getelementptr <2 x i32>, ptr [[X]], i32 0, i32 1 +// CHECK-NEXT: store i32 [[D]], ptr [[C]], align 4 // CHECK-NEXT: ret void void AssignBool2(bool V) { bool2 X = true.xx; @@ -277,10 +276,13 @@ void AssignBool2(bool V) { // CHECK-NEXT: [[Z:%.*]] = load <2 x i32>, ptr [[VAddr]], align 8 // CHECK-NEXT: [[LV:%.*]] = trunc <2 x i32> [[Z]] to <2 x i1> // CHECK-NEXT: [[B:%.*]] = zext <2 x i1> [[LV]] to <2 x i32> -// CHECK-NEXT: [[A:%.*]] = load <2 x i32>, ptr [[X]], align 8 -// CHECK-NEXT: [[C:%.*]] = shufflevector <2 x i32> [[B]], <2 x i32> poison, <2 x i32> -// CHECK-NEXT: store <2 x i32> [[C]], ptr [[X]], align 8 +// CHECK-NEXT: [[V1:%.*]] = extractelement <2 x i32> [[B]], i32 0 +// CHECK-NEXT: store i32 [[V1]], ptr [[X]], align 4 +// CHECK-NEXT: [[V2:%.*]] = extractelement <2 x i32> [[B]], i32 1 +// CHECK-NEXT: [[X2:%.*]] = getelementptr <2 x i32>, ptr [[X]], i32 0, i32 1 +// CHECK-NEXT: store i32 [[V2]], ptr [[X2]], align 4 // CHECK-NEXT: ret void + void AssignBool3(bool2 V) { bool2 X = {true,true}; X.xy = V; @@ -313,10 +315,13 @@ bool2 AccessBools() { // CHECK-NEXT: [[L1:%.*]] = shufflevector <1 x i32> [[L0]], <1 x i32> poison, <3 x i32> zeroinitializer // CHECK-NEXT: [[TruncV:%.*]] = trunc <3 x i32> [[L1]] to <3 x i1> // CHECK-NEXT: [[L2:%.*]] = zext <3 x i1> [[TruncV]] to <3 x i32> -// CHECK-NEXT: [[L3:%.*]] = load <4 x i32>, ptr [[B]], align 16 -// CHECK-NEXT: [[L4:%.*]] = shufflevector <3 x i32> [[L2]], <3 x i32> poison, <4 x i32> -// CHECK-NEXT: [[L5:%.*]] = shufflevector <4 x i32> [[L3]], <4 x i32> [[L4]], <4 x i32> -// CHECK-NEXT: store <4 x i32> [[L5]], ptr [[B]], align 16 +// CHECK-NEXT: [[V1:%.*]] = extractelement <3 x i32> [[L2]], i32 0 +// CHECK-NEXT: store i32 [[V1]], ptr %B, align 4 +// CHECK-NEXT: [[V2:%.*]] = extractelement <3 x i32> [[L2]], i32 1 +// CHECK-NEXT: [[B2:%.*]] = getelementptr <4 x i32>, ptr %B, i32 0, i32 1 +// CHECK-NEXT: store i32 [[V2]], ptr [[B2]], align 4 +// CHECK-NEXT: [[V3:%.*]] = extractelement <3 x i32> [[L2]], i32 2 +// CHECK-NEXT: [[B3:%.*]] = getelementptr <4 x i32>, ptr %B, i32 0, i32 2 void BoolSizeMismatch() { bool4 B = {true,true,true,true}; B.xyz = false.xxx; diff --git a/clang/test/CodeGenHLSL/builtins/VectorSwizzles.hlsl b/clang/test/CodeGenHLSL/builtins/VectorSwizzles.hlsl new file mode 100644 index 0000000000000..c632e795098ea --- /dev/null +++ b/clang/test/CodeGenHLSL/builtins/VectorSwizzles.hlsl @@ -0,0 +1,96 @@ +// RUN: %clang_cc1 -finclude-default-header -fnative-half-type \ +// RUN: -triple dxil-pc-shadermodel6.3-library %s -disable-llvm-passes \ +// RUN: -emit-llvm -o - | FileCheck %s + +// CHECK-LABEL: Single + +// Setup local vars. +// CHECK: [[VecAddr:%.*]] = alloca <3 x i64>, align 32 +// CHECK-NEXT: [[AAddr:%.*]] = alloca i64, align 8 +// CHECK-NEXT: store <3 x i64> %vec, ptr [[VecAddr]], align 32 +// CHECK-NEXT: store i64 %a, ptr [[AAddr]], align 8 + +// Update single element of the vector. +// CHECK-NEXT: [[A:%.*]] = load i64, ptr [[AAddr]], align 8 +// CHECK-NEXT: [[Vy:%.*]] = getelementptr <3 x i64>, ptr [[VecAddr]], i32 0, i32 1 +// CHECK-NEXT: store i64 [[A]], ptr [[Vy]], align 8 + +// Return. +// CHECK-NEXT: [[RetVal:%.*]] = load <3 x i64>, ptr [[VecAddr]], align 32 +// CHECK-NEXT: ret <3 x i64> [[RetVal]] +uint64_t3 Single(uint64_t3 vec, uint64_t a){ + vec.y = a; + return vec; +} + +// CHECK-LABEL: Double + +// Setup local vars. +// CHECK: [[VecAddr:%.*]] = alloca <3 x float>, align 16 +// CHECK-NEXT: [[AAddr:%.*]] = alloca float, align 4 +// CHECK-NEXT: [[BAddr:%.*]] = alloca float, align 4 +// CHECK-NEXT: store <3 x float> %vec, ptr [[VecAddr]], align 16 +// CHECK-NEXT: store float %a, ptr [[AAddr]], align 4 +// CHECK-NEXT: store float %b, ptr [[BAddr]], align 4 + +// Create temporary vector {a, b}. +// CHECK-NEXT: [[A:%.*]] = load float, ptr [[AAddr]], align 4 +// CHECK-NEXT: [[TmpVec0:%.*]] = insertelement <2 x float> poison, float [[A]], i32 0 +// CHECK-NEXT: [[B:%.*]] = load float, ptr [[BAddr]], align 4 +// CHECK-NEXT: [[TmpVec1:%.*]] = insertelement <2 x float> [[TmpVec0]], float [[B]], i32 1 + +// Update two elements of the vector from temporary vector. +// CHECK-NEXT: [[TmpX:%.*]] = extractelement <2 x float> [[TmpVec1]], i32 0 +// CHECK-NEXT: [[VecZ:%.*]] = getelementptr <3 x float>, ptr [[VecAddr]], i32 0, i32 2 +// CHECK-NEXT: store float [[TmpX]], ptr [[VecZ]], align 4 +// CHECK-NEXT: [[TmpY:%.*]] = extractelement <2 x float> [[TmpVec1]], i32 1 +// CHECK-NEXT: [[VecY:%.*]] = getelementptr <3 x float>, ptr [[VecAddr]], i32 0, i32 1 +// CHECK-NEXT: store float [[TmpY]], ptr [[VecY]], align 4 + +// Return. +// CHECK-NEXT: [[RetVal:%.*]] = load <3 x float>, ptr [[VecAddr]], align 16 +// CHECK-NEXT: ret <3 x float> [[RetVal]] +float3 Double(float3 vec, float a, float b) { + vec.zy = {a, b}; + return vec; +} + +// CHECK-LABEL: Shuffle + +// Setup local vars. +// CHECK: [[VecAddr:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[AAddr:%.*]] = alloca half, align 2 +// CHECK-NEXT: [[BAddr:%.*]] = alloca half, align 2 +// CHECK-NEXT: store <4 x half> %vec, ptr [[VecAddr]], align 8 +// CHECK-NEXT: store half %a, ptr [[AAddr]], align 2 +// CHECK-NEXT: store half %b, ptr [[BAddr]], align 2 + +// Create temporary vector {a, b, 13.74, a}. +// CHECK-NEXT: [[A:%.*]] = load half, ptr [[AAddr]], align 2 +// CHECK-NEXT: [[TmpVec0:%.*]] = insertelement <4 x half> poison, half [[A]], i32 0 +// CHECK-NEXT: [[B:%.*]] = load half, ptr [[BAddr]], align 2 +// CHECK-NEXT: [[TmpVec1:%.*]] = insertelement <4 x half> [[TmpVec0]], half [[B]], i32 1 +// CHECK-NEXT: [[TmpVec2:%.*]] = insertelement <4 x half> %vecinit1, half 0xH4ADF, i32 2 +// CHECK-NEXT: [[A:%.*]] = load half, ptr [[AAddr]], align 2 +// CHECK-NEXT: [[TmpVec3:%.*]] = insertelement <4 x half> [[TmpVec2]], half [[A]], i32 3 + +// Update four elements of the vector via mixed up swizzle from the temporary vector. +// CHECK-NEXT: [[TmpX:%.*]] = extractelement <4 x half> [[TmpVec3]], i32 0 +// CHECK-NEXT: [[VecZ:%.*]] = getelementptr <4 x half>, ptr [[VecAddr]], i32 0, i32 2 +// CHECK-NEXT: store half [[TmpX]], ptr [[VecZ]], align 2 +// CHECK-NEXT: [[TmpY:%.*]] = extractelement <4 x half> [[TmpVec3]], i32 1 +// CHECK-NEXT: [[VecW:%.*]] = getelementptr <4 x half>, ptr [[VecAddr]], i32 0, i32 3 +// CHECK-NEXT: store half [[TmpY]], ptr [[VecW]], align 2 +// CHECK-NEXT: [[TmpZ:%.*]] = extractelement <4 x half> [[TmpVec3]], i32 2 +// CHECK-NEXT: store half [[TmpZ]], ptr [[VecAddr]], align 2 +// CHECK-NEXT: [[TmpW:%.*]] = extractelement <4 x half> [[TmpVec3]], i32 3 +// CHECK-NEXT: [[VecY:%.*]] = getelementptr <4 x half>, ptr [[VecAddr]], i32 0, i32 1 +// CHECK-NEXT: store half [[TmpW]], ptr [[VecY]], align 2 + +// Return. +// CHECK-NEXT: [[RetVal:%.*]] = load <4 x half>, ptr [[VecAddr]], align 8 +// CHECK-NEXT: ret <4 x half> [[RetVal]] +half4 Shuffle(half4 vec, half a, half b) { + vec.zwxy = {a, b, 13.74, a}; + return vec; +} diff --git a/clang/test/CodeGenHLSL/builtins/faceforward.hlsl b/clang/test/CodeGenHLSL/builtins/faceforward.hlsl index 70459d81685a1..261454e8bc152 100644 --- a/clang/test/CodeGenHLSL/builtins/faceforward.hlsl +++ b/clang/test/CodeGenHLSL/builtins/faceforward.hlsl @@ -1,9 +1,9 @@ // RUN: %clang_cc1 -finclude-default-header -triple \ // RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type -fnative-int16-type \ -// RUN: -emit-llvm -o - | FileCheck %s +// RUN: -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,DXCHECK // RUN: %clang_cc1 -finclude-default-header -triple \ // RUN: spirv-unknown-vulkan-compute %s -fnative-half-type -fnative-int16-type \ -// RUN: -emit-llvm -o - | FileCheck %s --check-prefix=SPVCHECK +// RUN: -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,SPVCHECK // CHECK-LABEL: test_faceforward_half // CHECK: %hlsl.dot.i = fmul reassoc nnan ninf nsz arcp afn half %{{.*}}, %{{.*}} @@ -11,42 +11,31 @@ // CHECK: %fneg.i = fneg reassoc nnan ninf nsz arcp afn half %{{.*}} // CHECK: %hlsl.select.i = select reassoc nnan ninf nsz arcp afn i1 %cmp.i, half %{{.*}}, half %fneg.i // CHECK: ret half %hlsl.select.i -// SPVCHECK-LABEL: test_faceforward_half -// SPVCHECK: %spv.faceforward.i = call reassoc nnan ninf nsz arcp afn noundef half @llvm.spv.faceforward.f16(half %{{.*}}, half %{{.*}}, half %{{.*}}) -// SPVCHECK: ret half %spv.faceforward.i half test_faceforward_half(half N, half I, half Ng) { return faceforward(N, I, Ng); } // CHECK-LABEL: test_faceforward_half2 -// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn half @llvm.dx.fdot.v2f16(<2 x half> %{{.*}}, <2 x half> %{{.*}}) +// DXCHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn half @llvm.[[ICF:dx]].fdot.v2f16(<2 x half> %{{.*}}, <2 x half> %{{.*}}) +// SPVCHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn half @llvm.[[ICF:spv]].fdot.v2f16(<2 x half> %{{.*}}, <2 x half> %{{.*}}) // CHECK: %cmp.i = fcmp reassoc nnan ninf nsz arcp afn olt half %hlsl.dot.i, 0xH0000 // CHECK: %fneg.i = fneg reassoc nnan ninf nsz arcp afn <2 x half> %{{.*}} // CHECK: %hlsl.select.i = select reassoc nnan ninf nsz arcp afn i1 %cmp.i, <2 x half> %{{.*}}, <2 x half> %fneg.i // CHECK: ret <2 x half> %hlsl.select.i -// SPVCHECK-LABEL: test_faceforward_half2 -// SPVCHECK: %spv.faceforward.i = call reassoc nnan ninf nsz arcp afn noundef <2 x half> @llvm.spv.faceforward.v2f16(<2 x half> %{{.*}}, <2 x half> %{{.*}}, <2 x half> %{{.*}}) -// SPVCHECK: ret <2 x half> %spv.faceforward.i half2 test_faceforward_half2(half2 N, half2 I, half2 Ng) { return faceforward(N, I, Ng); } // CHECK-LABEL: test_faceforward_half3 -// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn half @llvm.dx.fdot.v3f16(<3 x half> %{{.*}}, <3 x half> %{{.*}}) +// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn half @llvm.[[ICF]].fdot.v3f16(<3 x half> %{{.*}}, <3 x half> %{{.*}}) // CHECK: %cmp.i = fcmp reassoc nnan ninf nsz arcp afn olt half %hlsl.dot.i, 0xH0000 // CHECK: %fneg.i = fneg reassoc nnan ninf nsz arcp afn <3 x half> %{{.*}} // CHECK: %hlsl.select.i = select reassoc nnan ninf nsz arcp afn i1 %cmp.i, <3 x half> %{{.*}}, <3 x half> %fneg.i // CHECK: ret <3 x half> %hlsl.select.i -// SPVCHECK-LABEL: test_faceforward_half3 -// SPVCHECK: %spv.faceforward.i = call reassoc nnan ninf nsz arcp afn noundef <3 x half> @llvm.spv.faceforward.v3f16(<3 x half> %{{.*}}, <3 x half> %{{.*}}, <3 x half> %{{.*}}) -// SPVCHECK: ret <3 x half> %spv.faceforward.i half3 test_faceforward_half3(half3 N, half3 I, half3 Ng) { return faceforward(N, I, Ng); } // CHECK-LABEL: test_faceforward_half4 -// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn half @llvm.dx.fdot.v4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}) +// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn half @llvm.[[ICF]].fdot.v4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}) // CHECK: %cmp.i = fcmp reassoc nnan ninf nsz arcp afn olt half %hlsl.dot.i, 0xH0000 // CHECK: %fneg.i = fneg reassoc nnan ninf nsz arcp afn <4 x half> %{{.*}} // CHECK: %hlsl.select.i = select reassoc nnan ninf nsz arcp afn i1 %cmp.i, <4 x half> %{{.*}}, <4 x half> %fneg.i // CHECK: ret <4 x half> %hlsl.select.i -// SPVCHECK-LABEL: test_faceforward_half4 -// SPVCHECK: %spv.faceforward.i = call reassoc nnan ninf nsz arcp afn noundef <4 x half> @llvm.spv.faceforward.v4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <4 x half> %{{.*}}) -// SPVCHECK: ret <4 x half> %spv.faceforward.i half4 test_faceforward_half4(half4 N, half4 I, half4 Ng) { return faceforward(N, I, Ng); } // CHECK-LABEL: test_faceforward_float @@ -55,40 +44,28 @@ half4 test_faceforward_half4(half4 N, half4 I, half4 Ng) { return faceforward(N, // CHECK: %fneg.i = fneg reassoc nnan ninf nsz arcp afn float %{{.*}} // CHECK: %hlsl.select.i = select reassoc nnan ninf nsz arcp afn i1 %cmp.i, float %{{.*}}, float %fneg.i // CHECK: ret float %hlsl.select.i -// SPVCHECK-LABEL: test_faceforward_float -// SPVCHECK: %spv.faceforward.i = call reassoc nnan ninf nsz arcp afn noundef float @llvm.spv.faceforward.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}) -// SPVCHECK: ret float %spv.faceforward.i float test_faceforward_float(float N, float I, float Ng) { return faceforward(N, I, Ng); } // CHECK-LABEL: test_faceforward_float2 -// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn float @llvm.dx.fdot.v2f32(<2 x float> %{{.*}}, <2 x float> %{{.*}}) +// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn float @llvm.[[ICF]].fdot.v2f32(<2 x float> %{{.*}}, <2 x float> %{{.*}}) // CHECK: %cmp.i = fcmp reassoc nnan ninf nsz arcp afn olt float %hlsl.dot.i, 0.000000e+00 // CHECK: %fneg.i = fneg reassoc nnan ninf nsz arcp afn <2 x float> %{{.*}} // CHECK: %hlsl.select.i = select reassoc nnan ninf nsz arcp afn i1 %cmp.i, <2 x float> %{{.*}}, <2 x float> %fneg.i // CHECK: ret <2 x float> %hlsl.select.i -// SPVCHECK-LABEL: test_faceforward_float2 -// SPVCHECK: %spv.faceforward.i = call reassoc nnan ninf nsz arcp afn noundef <2 x float> @llvm.spv.faceforward.v2f32(<2 x float> %{{.*}}, <2 x float> %{{.*}}, <2 x float> %{{.*}}) -// SPVCHECK: ret <2 x float> %spv.faceforward.i float2 test_faceforward_float2(float2 N, float2 I, float2 Ng) { return faceforward(N, I, Ng); } // CHECK-LABEL: test_faceforward_float3 -// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn float @llvm.dx.fdot.v3f32(<3 x float> %{{.*}}, <3 x float> %{{.*}}) +// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn float @llvm.[[ICF]].fdot.v3f32(<3 x float> %{{.*}}, <3 x float> %{{.*}}) // CHECK: %cmp.i = fcmp reassoc nnan ninf nsz arcp afn olt float %hlsl.dot.i, 0.000000e+00 // CHECK: %fneg.i = fneg reassoc nnan ninf nsz arcp afn <3 x float> %{{.*}} // CHECK: %hlsl.select.i = select reassoc nnan ninf nsz arcp afn i1 %cmp.i, <3 x float> %{{.*}}, <3 x float> %fneg.i // CHECK: ret <3 x float> %hlsl.select.i -// SPVCHECK-LABEL: test_faceforward_float3 -// SPVCHECK: %spv.faceforward.i = call reassoc nnan ninf nsz arcp afn noundef <3 x float> @llvm.spv.faceforward.v3f32(<3 x float> %{{.*}}, <3 x float> %{{.*}}, <3 x float> %{{.*}}) -// SPVCHECK: ret <3 x float> %spv.faceforward.i float3 test_faceforward_float3(float3 N, float3 I, float3 Ng) { return faceforward(N, I, Ng); } // CHECK-LABEL: test_faceforward_float4 -// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn float @llvm.dx.fdot.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}) +// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn float @llvm.[[ICF]].fdot.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}) // CHECK: %cmp.i = fcmp reassoc nnan ninf nsz arcp afn olt float %hlsl.dot.i, 0.000000e+00 // CHECK: %fneg.i = fneg reassoc nnan ninf nsz arcp afn <4 x float> %{{.*}} // CHECK: %hlsl.select.i = select reassoc nnan ninf nsz arcp afn i1 %cmp.i, <4 x float> %{{.*}}, <4 x float> %fneg.i // CHECK: ret <4 x float> %hlsl.select.i -// SPVCHECK-LABEL: test_faceforward_float4 -// SPVCHECK: %spv.faceforward.i = call reassoc nnan ninf nsz arcp afn noundef <4 x float> @llvm.spv.faceforward.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}) -// SPVCHECK: ret <4 x float> %spv.faceforward.i float4 test_faceforward_float4(float4 N, float4 I, float4 Ng) { return faceforward(N, I, Ng); } diff --git a/clang/test/DebugInfo/Generic/dbg-info-all-calls-described.c b/clang/test/DebugInfo/Generic/dbg-info-all-calls-described.c index 3ca3aaa0b70f4..0ba4767c8ddda 100644 --- a/clang/test/DebugInfo/Generic/dbg-info-all-calls-described.c +++ b/clang/test/DebugInfo/Generic/dbg-info-all-calls-described.c @@ -59,6 +59,13 @@ // RUN: -debug-info-kind=standalone -dwarf-version=4 \ // RUN: | FileCheck %s -check-prefix=NO-ATTR +// Disabled by feature flag (enabled by default) +// RUN: %clang_cc1 -emit-llvm -triple %itanium_abi_triple %s -o - \ +// RUN: -O1 -disable-llvm-passes \ +// RUN: -debug-info-kind=standalone -dwarf-version=5 \ +// RUN: -gno-call-site-info \ +// RUN: | FileCheck %s -check-prefix=NO-ATTR + // NO-ATTR-NOT: FlagAllCallsDescribed // HAS-ATTR-DAG: DISubprogram(name: "declaration1", {{.*}}, spFlags: DISPFlagOptimized) diff --git a/clang/test/DebugInfo/Generic/ubsan-trap-reason-type-mismatch.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-type-mismatch.c index 802ec91b53a0d..5960930c04171 100644 --- a/clang/test/DebugInfo/Generic/ubsan-trap-reason-type-mismatch.c +++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-type-mismatch.c @@ -6,4 +6,4 @@ int type_mismatch(int *p) { return *p; } // CHECK-LABEL: @type_mismatch // CHECK: call void @llvm.ubsantrap(i8 22) {{.*}}!dbg [[LOC:![0-9]+]] // CHECK: [[LOC]] = !DILocation(line: 0, scope: [[MSG:![0-9]+]], {{.+}}) -// CHECK: [[MSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$Type mismatch in operation" +// CHECK: [[MSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$Alignment, null, or object-size error" diff --git a/clang/test/Driver/debug-options.c b/clang/test/Driver/debug-options.c index 45ac450ac8faa..27e2728f15948 100644 --- a/clang/test/Driver/debug-options.c +++ b/clang/test/Driver/debug-options.c @@ -297,6 +297,9 @@ // RUN: %clang -### -g -gno-column-info %s 2>&1 \ // RUN: | FileCheck -check-prefix=NOCI %s // +// RUN: %clang -### -g -gno-call-site-info %s 2>&1 \ +// RUN: | FileCheck -check-prefix=NOCALLSITE %s +// // RUN: %clang -### -g -target x86_64-unknown-unknown %s 2>&1 \ // | FileCheck -check-prefix=CI %s // @@ -426,6 +429,8 @@ // // NOCI-DAG: "-gno-column-info" // +// NOCALLSITE: "-gno-call-site-info" +// // GEXTREFS: "-dwarf-ext-refs" "-fmodule-format=obj" // GEXTREFS: "-debug-info-kind={{standalone|constructor}}" // NOGEXTREFS-NOT: -dwarf-ext-refs diff --git a/clang/test/Driver/fsanitize.c b/clang/test/Driver/fsanitize.c index f2a4d8c50ec23..c02b8828062f2 100644 --- a/clang/test/Driver/fsanitize.c +++ b/clang/test/Driver/fsanitize.c @@ -984,10 +984,20 @@ // CHECK-UBSAN-MINIMAL: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|pointer-overflow|float-cast-overflow|array-bounds|enum|bool|builtin|returns-nonnull-attribute|nonnull-attribute|function),?){18}"}} // CHECK-UBSAN-MINIMAL: "-fsanitize-minimal-runtime" -// RUN: %clang --target=x86_64-linux-gnu -fsanitize=undefined -fsanitize-minimal-runtime -fsanitize-handler-preserve-all-regs %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UBSAN-MINIMAL-PRESERVE -// CHECK-UBSAN-MINIMAL-PRESERVE: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|pointer-overflow|float-cast-overflow|array-bounds|enum|bool|builtin|returns-nonnull-attribute|nonnull-attribute|function),?){18}"}} -// CHECK-UBSAN-MINIMAL-PRESERVE: "-fsanitize-minimal-runtime" -// CHECK-UBSAN-MINIMAL-PRESERVE: "-fsanitize-handler-preserve-all-regs +// RUN: %clang --target=x86_64-linux-gnu -fsanitize=undefined -fsanitize-minimal-runtime -fsanitize-handler-preserve-all-regs %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UBSAN-MINIMAL-PRESERVE-X86-64 +// CHECK-UBSAN-MINIMAL-PRESERVE-X86-64: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|pointer-overflow|float-cast-overflow|array-bounds|enum|bool|builtin|returns-nonnull-attribute|nonnull-attribute|function),?){18}"}} +// CHECK-UBSAN-MINIMAL-PRESERVE-X86-64: "-fsanitize-minimal-runtime" +// CHECK-UBSAN-MINIMAL-PRESERVE-X86-64: "-fsanitize-handler-preserve-all-regs + +// RUN: %clang --target=aarch64-linux-gnu -fsanitize=undefined -fsanitize-minimal-runtime -fsanitize-handler-preserve-all-regs %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UBSAN-MINIMAL-PRESERVE-AARCH64 +// CHECK-UBSAN-MINIMAL-PRESERVE-AARCH64: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|pointer-overflow|float-cast-overflow|array-bounds|enum|bool|builtin|returns-nonnull-attribute|nonnull-attribute|function),?){18}"}} +// CHECK-UBSAN-MINIMAL-PRESERVE-AARCH64: "-fsanitize-minimal-runtime" +// CHECK-UBSAN-MINIMAL-PRESERVE-AARCH64: "-fsanitize-handler-preserve-all-regs + +// RUN: %clang --target=i386-linux-gnu -fsanitize=undefined -fsanitize-minimal-runtime -fsanitize-handler-preserve-all-regs %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UBSAN-MINIMAL-PRESERVE-I386 +// CHECK-UBSAN-MINIMAL-PRESERVE-I386: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|pointer-overflow|float-cast-overflow|array-bounds|enum|bool|builtin|returns-nonnull-attribute|nonnull-attribute|function),?){18}"}} +// CHECK-UBSAN-MINIMAL-PRESERVE-I386: "-fsanitize-minimal-runtime" +// CHECK-UBSAN-MINIMAL-PRESERVE-I386-NOT: "-fsanitize-handler-preserve-all-regs // RUN: %clang --target=x86_64-linux-gnu -fsanitize=integer -fsanitize-trap=integer %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-INTSAN-TRAP // CHECK-INTSAN-TRAP: "-fsanitize-trap=integer-divide-by-zero,shift-base,shift-exponent,signed-integer-overflow,unsigned-integer-overflow,unsigned-shift-base,implicit-unsigned-integer-truncation,implicit-signed-integer-truncation,implicit-integer-sign-change" diff --git a/clang/test/Driver/hip-spirv-backend-bindings.c b/clang/test/Driver/hip-spirv-backend-bindings.c new file mode 100644 index 0000000000000..59b3f4fb54d4c --- /dev/null +++ b/clang/test/Driver/hip-spirv-backend-bindings.c @@ -0,0 +1,57 @@ +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend -ccc-print-bindings \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-BASE,CHECK-SPIRV + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend -fgpu-rdc -ccc-print-bindings \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-BASE,CHECK-SPIRV-RDC + +// CHECK-SPIRV-BASE: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[INPUT:.+]]"], output: "[[HIPI:.+\.hipi]]" +// CHECK-SPIRV-BASE: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[HIPI]]"], output: "[[SPV_BC:.+\.bc]]" +// CHECK-SPIRV: # "spirv64-amd-amdhsa" - "Offload::Packager", inputs: ["[[SPV_BC]]"], output: "[[HIP_OUT:.+\.out]]" +// CHECK-SPIRV: # "spirv64-amd-amdhsa" - "Offload::Linker", inputs: ["[[HIP_OUT]]"], output: "[[HIPFB:.+\.hipfb]]" +// CHECK-SPIRV-RDC: # "x86_64-unknown-linux-gnu" - "Offload::Packager", inputs: ["[[SPV_BC]]"], output: "[[HIP_OUT:.+\.out]]" +// CHECK-SPIRV-BASE: # "x86_64-unknown-linux-gnu" - "clang", inputs: ["[[INPUT]]"], output: "[[HIPI:.+\.hipi]]" +// CHECK-SPIRV: # "x86_64-unknown-linux-gnu" - "clang", inputs: ["[[HIPI]]", "[[HIPFB]]"], output: "[[x86_BC:.+\.bc]]" +// CHECK-SPIRV-RDC: # "x86_64-unknown-linux-gnu" - "clang", inputs: ["[[HIPI]]", "[[HIP_OUT]]"], output: "[[x86_BC:.+\.bc]]" +// CHECK-SPIRV-BASE: # "x86_64-unknown-linux-gnu" - "clang", inputs: ["[[x86_BC]]"], output: "[[x86_S:.+\.s]]" +// CHECK-SPIRV-BASE: # "x86_64-unknown-linux-gnu" - "clang::as", inputs: ["[[x86_S]]"], output: "[[x86_O:.+\.o]]" +// CHECK-SPIRV-BASE: # "x86_64-unknown-linux-gnu" - "Offload::Linker", inputs: ["[[x86_O]]"], output: "{{.+\.out}}" + +// CHECK-SPIRV # "x86_64-unknown-linux-gnu" - "Offload::Linker", inputs: ["[[x86_O]]"], output: "[[x86_O:.+\.o]]" +// CHECK-SPIRV # "x86_64-unknown-linux-gnu" - "GNU::Linker", inputs: ["[[x86_O]]"], output: "{{.+\.out}}" + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -ccc-print-bindings \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-OFFLOAD-DEVICE-ONLY + +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[INPUT:.+]]"], output: "[[HIPI:.+\.hipi]]" +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[HIPI]]"], output: "[[SPV_BC:.+\.bc]]" +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[SPV_BC]]"], output: "[[SPV_OUT:.+\.out]]" +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY: # "spirv64-amd-amdhsa" - "AMDGCN::Linker", inputs: ["[[SPV_OUT]]"], output: "{{.+\.hipfb}}" + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -fgpu-rdc -ccc-print-bindings \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC + +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[INPUT:.+]]"], output: "[[HIPI:.+\.hipi]]" +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[HIPI]]"], output: "[[SPV_BC:.+\.bc]]" +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[SPV_BC]]"], output: "{{.+}}" + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -S -fgpu-rdc -ccc-print-bindings \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -S -ccc-print-bindings \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY + +// CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[INPUT:.+]]"], output: "[[HIPI:.+\.hipi]]" +// CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[HIPI]]"], output: "[[SPV_BC:.+\.bc]]" +// CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[SPV_BC]]"], output: "{{.+\.s}}" diff --git a/clang/test/Driver/hip-spirv-backend-opt.c b/clang/test/Driver/hip-spirv-backend-opt.c new file mode 100644 index 0000000000000..10d9a0b01caf3 --- /dev/null +++ b/clang/test/Driver/hip-spirv-backend-opt.c @@ -0,0 +1,61 @@ +// This test case validates the behavior of -use-spirv-backend + +// --offload-device-only is always set --- testing interactions with -S and -fgpu-rdc + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -### -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -S -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-TEXTUAL + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -### -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-BINARY + +// The new driver's behavior is to emit LLVM IR for --offload-device-only and -fgpu-rdc (independently of SPIR-V). +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -### -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -S -fgpu-rdc -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-LL,CHECK-FGPU-RDC + +// The new driver's behavior is to emit LLVM IR for --offload-device-only and -fgpu-rdc (independently of SPIR-V). +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -### -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -fgpu-rdc -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-BC,CHECK-FGPU-RDC + +// --offload-device-only is always unset --- testing interactions with -S and -fgpu-rdc + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -### -x hip %s -save-temps \ +// RUN: -use-spirv-backend -S -fgpu-rdc -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-BC,CHECK-FGPU-RDC + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -### -x hip %s -save-temps \ +// RUN: -use-spirv-backend -S -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-BC + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -### -x hip %s -save-temps \ +// RUN: -use-spirv-backend -fgpu-rdc -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-BC,CHECK-CLANG-LINKER-WRAPPER + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -### -x hip %s -save-temps \ +// RUN: -use-spirv-backend -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-BC,CHECK-CLANG-LINKER-WRAPPER + +// RUN: %clang --no-offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -### -x hip %s -save-temps \ +// RUN: -use-spirv-backend -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-BC,CHECK-SPIRV-BACKEND-BINARY-EQ-TRIPLE + +// CHECK-SPIRV-TRANSLATOR-NOT: "{{.*llvm-spirv.*}}" +// CHECK-SPIRV-BACKEND-TEXTUAL: "{{.*clang(\.exe)?}}" "-cc1" "-triple" "spirv64-amd-amdhsa" {{.*}} "-S" +// CHECK-SPIRV-BACKEND-BINARY: "{{.*clang(\.exe)?}}" "-cc1" "-triple" "spirv64-amd-amdhsa" {{.*}} "-emit-obj" +// CHECK-SPIRV-BACKEND-BC: "{{.*clang(\.exe)?}}" "-cc1" "-triple" "spirv64-amd-amdhsa" {{.*}} "-emit-llvm-bc" +// CHECK-SPIRV-BACKEND-LL: "{{.*clang(\.exe)?}}" "-cc1" "-triple" "spirv64-amd-amdhsa" {{.*}} "-emit-llvm" +// CHECK-SPIRV-BACKEND-BINARY-EQ-TRIPLE: "{{.*clang(\.exe)?}}" "-cc1" {{.*}}"-triple=spirv64-amd-amdhsa" {{.*}}"-emit-obj" +// CHECK-FGPU-RDC-SAME: {{.*}} "-fgpu-rdc" +// CHECK-CLANG-LINKER-WRAPPER: "{{.*}}clang-linker-wrapper" "--should-extract=amdgcnspirv" {{.*}} "--device-compiler=spirv64-amd-amdhsa=-use-spirv-backend" diff --git a/clang/test/Driver/hip-spirv-backend-phases.c b/clang/test/Driver/hip-spirv-backend-phases.c new file mode 100644 index 0000000000000..d743b8cd50c40 --- /dev/null +++ b/clang/test/Driver/hip-spirv-backend-phases.c @@ -0,0 +1,80 @@ +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend -ccc-print-phases \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-BINARY + +// CHECK-SPIRV-BINARY: [[P0:[0-9]+]]: input, "[[INPUT:.*]].c", hip, (host-hip) +// CHECK-SPIRV-BINARY: [[P1:[0-9]+]]: preprocessor, {[[P0]]}, hip-cpp-output, (host-hip) +// CHECK-SPIRV-BINARY: [[P2:[0-9]+]]: compiler, {[[P1]]}, ir, (host-hip) + +// CHECK-SPIRV-BINARY: [[P3:[0-9]+]]: input, "[[INPUT]].c", hip, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY: [[P4:[0-9]+]]: preprocessor, {[[P3]]}, hip-cpp-output, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY: [[P5:[0-9]+]]: compiler, {[[P4]]}, ir, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY: [[P6:[0-9]+]]: offload, "device-hip (spirv64-amd-amdhsa:amdgcnspirv)" {[[P5]]}, ir +// CHECK-SPIRV-BINARY: [[P7:[0-9]+]]: llvm-offload-binary, {[[P6]]}, image, (device-hip) +// CHECK-SPIRV-BINARY: [[P8:[0-9]+]]: clang-linker-wrapper, {[[P7]]}, hip-fatbin, (device-hip) + +// CHECK-SPIRV-BINARY: [[P9:[0-9]+]]: offload, "host-hip (x86_64-unknown-linux-gnu)" {[[P2]]}, "device-hip (spirv64-amd-amdhsa)" {[[P8]]}, ir +// CHECK-SPIRV-BINARY: [[P10:[0-9]+]]: backend, {[[P9]]}, assembler, (host-hip) +// CHECK-SPIRV-BINARY: [[P11:[0-9]+]]: assembler, {[[P10]]}, object, (host-hip) +// CHECK-SPIRV-BINARY: [[P12:[0-9]+]]: clang-linker-wrapper, {[[P11]]}, image, (host-hip) + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend -fgpu-rdc -ccc-print-phases \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-BINARY-RDC + +// CHECK-SPIRV-BINARY-RDC: [[P0:[0-9]+]]: input, "[[INPUT:.*]].c", hip, (host-hip) +// CHECK-SPIRV-BINARY-RDC: [[P1:[0-9]+]]: preprocessor, {[[P0]]}, hip-cpp-output, (host-hip) +// CHECK-SPIRV-BINARY-RDC: [[P2:[0-9]+]]: compiler, {[[P1]]}, ir, (host-hip) + +// CHECK-SPIRV-BINARY-RDC: [[P3:[0-9]+]]: input, "[[INPUT]].c", hip, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY-RDC: [[P4:[0-9]+]]: preprocessor, {[[P3]]}, hip-cpp-output, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY-RDC: [[P5:[0-9]+]]: compiler, {[[P4]]}, ir, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY-RDC: [[P6:[0-9]+]]: offload, "device-hip (spirv64-amd-amdhsa:amdgcnspirv)" {[[P5]]}, ir +// CHECK-SPIRV-BINARY-RDC: [[P7:[0-9]+]]: llvm-offload-binary, {[[P6]]}, image, (device-hip) + +// CHECK-SPIRV-BINARY-RDC: [[P8:[0-9]+]]: offload, "host-hip (x86_64-unknown-linux-gnu)" {[[P2]]}, "device-hip (x86_64-unknown-linux-gnu)" {[[P7]]}, ir +// CHECK-SPIRV-BINARY-RDC: [[P9:[0-9]+]]: backend, {[[P8]]}, assembler, (host-hip) +// CHECK-SPIRV-BINARY-RDC: [[P10:[0-9]+]]: assembler, {[[P9]]}, object, (host-hip) +// CHECK-SPIRV-BINARY-RDC: [[P11:[0-9]+]]: clang-linker-wrapper, {[[P10]]}, image, (host-hip) + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -ccc-print-phases \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-BINARY-OFFLOAD-DEVICE-ONLY + +// CHECK-SPIRV-BINARY-OFFLOAD-DEVICE-ONLY: [[P0:[0-9]+]]: input, "{{.*}}.c", hip, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY-OFFLOAD-DEVICE-ONLY: [[P1:[0-9]+]]: preprocessor, {[[P0]]}, hip-cpp-output, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY-OFFLOAD-DEVICE-ONLY: [[P2:[0-9]+]]: compiler, {[[P1]]}, ir, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY-OFFLOAD-DEVICE-ONLY: [[P3:[0-9]+]]: backend, {[[P2]]}, image, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY-OFFLOAD-DEVICE-ONLY: [[P4:[0-9]+]]: offload, "device-hip (spirv64-amd-amdhsa:amdgcnspirv)" {[[P3]]}, image +// CHECK-SPIRV-BINARY-OFFLOAD-DEVICE-ONLY: [[P5:[0-9]+]]: linker, {[[P4]]}, hip-fatbin, (device-hip) +// CHECK-SPIRV-BINARY-OFFLOAD-DEVICE-ONLY: [[P6:[0-9]+]]: offload, "device-hip (spirv64-amd-amdhsa)" {[[P5]]}, none + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -fgpu-rdc -ccc-print-phases \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC + +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC: [[P0:[0-9]+]]: input, "{{.*}}.c", hip, (device-hip, amdgcnspirv) +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC: [[P1:[0-9]+]]: preprocessor, {[[P0]]}, hip-cpp-output, (device-hip, amdgcnspirv) +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC: [[P2:[0-9]+]]: compiler, {[[P1]]}, ir, (device-hip, amdgcnspirv) +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC: [[P3:[0-9]+]]: backend, {[[P2]]}, ir, (device-hip, amdgcnspirv) +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC: [[P4:[0-9]+]]: offload, "device-hip (spirv64-amd-amdhsa:amdgcnspirv)" {[[P3]]}, none + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -S -fgpu-rdc -ccc-print-phases \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -S -ccc-print-phases \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY + +// CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY: [[P0:[0-9]+]]: input, "{{.*}}.c", hip, (device-hip, amdgcnspirv) +// CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY: [[P1:[0-9]+]]: preprocessor, {[[P0]]}, hip-cpp-output, (device-hip, amdgcnspirv) +// CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY: [[P2:[0-9]+]]: compiler, {[[P1]]}, ir, (device-hip, amdgcnspirv) +// CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY: [[P3:[0-9]+]]: backend, {[[P2]]}, assembler, (device-hip, amdgcnspirv) +// CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY: [[P4:[0-9]+]]: offload, "device-hip (spirv64-amd-amdhsa:amdgcnspirv)" {[[P3]]}, none diff --git a/clang/test/Driver/riscv-features.c b/clang/test/Driver/riscv-features.c index 1c8b52bd31997..97736ff81c799 100644 --- a/clang/test/Driver/riscv-features.c +++ b/clang/test/Driver/riscv-features.c @@ -68,13 +68,6 @@ // DEFAULT-LINUX-SAME: "-target-feature" "+d" // DEFAULT-LINUX-SAME: "-target-feature" "+c" -// RUN: not %clang -c --target=riscv64-linux-gnu -gsplit-dwarf %s 2>&1 | FileCheck %s --check-prefix=ERR-SPLIT-DWARF -// RUN: not %clang -c --target=riscv64 -gsplit-dwarf=single %s 2>&1 | FileCheck %s --check-prefix=ERR-SPLIT-DWARF -// RUN: %clang -### -c --target=riscv64 -mno-relax -g -gsplit-dwarf %s 2>&1 | FileCheck %s --check-prefix=SPLIT-DWARF - -// ERR-SPLIT-DWARF: error: -gsplit-dwarf{{.*}} is unsupported with RISC-V linker relaxation (-mrelax) -// SPLIT-DWARF: "-split-dwarf-file" - // RUN: %clang -mabi=lp64d --target=riscv64-unknown-fuchsia -### %s -fsyntax-only 2>&1 | FileCheck %s -check-prefixes=FUCHSIA // FUCHSIA: "-target-feature" "+m" // FUCHSIA-SAME: "-target-feature" "+a" diff --git a/clang/test/OpenMP/amdgcn_weak_alias.c b/clang/test/OpenMP/amdgcn_weak_alias.c new file mode 100644 index 0000000000000..a9d5c1737b321 --- /dev/null +++ b/clang/test/OpenMP/amdgcn_weak_alias.c @@ -0,0 +1,103 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --check-globals all --version 6 +// REQUIRES: amdgpu-registered-target + +// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host.bc +// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -o - | FileCheck %s --check-prefix=HOST +// RUN: %clang_cc1 -fopenmp -x c -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=DEVICE + +//. +// HOST: @__One_var = global i32 1, align 4 +// HOST: @__Two_var = global i32 2, align 4 +// HOST: @__Three_var = global i32 3, align 4 +// HOST: @.offloading.entry_name = internal unnamed_addr constant [10 x i8] c"__Two_var\00", section ".llvm.rodata.offloading", align 1 +// HOST: @.offloading.entry.__Two_var = weak constant %struct.__tgt_offload_entry { i64 0, i16 1, i16 1, i32 0, ptr @__Two_var, ptr @.offloading.entry_name, i64 4, i64 0, ptr null }, section "llvm_offload_entries", align 8 +// HOST: @.offloading.entry_name.1 = internal unnamed_addr constant [12 x i8] c"__Three_var\00", section ".llvm.rodata.offloading", align 1 +// HOST: @.offloading.entry.__Three_var = weak constant %struct.__tgt_offload_entry { i64 0, i16 1, i16 1, i32 0, ptr @__Three_var, ptr @.offloading.entry_name.1, i64 4, i64 0, ptr null }, section "llvm_offload_entries", align 8 +// HOST: @One = weak alias i32 (), ptr @__One +// HOST: @One_ = alias i32 (), ptr @__One +// HOST: @One_var = weak alias i32, ptr @__One_var +// HOST: @One_var_ = alias i32, ptr @__One_var +// HOST: @Two = weak alias i32 (), ptr @__Two +// HOST: @Two_ = alias i32 (), ptr @__Two +// HOST: @Two_var = weak alias i32, ptr @__Two_var +// HOST: @Two_var_ = alias i32, ptr @__Two_var +// HOST: @Three = weak alias i32 (), ptr @__Three +// HOST: @Three_ = alias i32 (), ptr @__Three +// HOST: @Three_var = weak alias i32, ptr @__Three_var +// HOST: @Three_var_ = alias i32, ptr @__Three_var +//. +// DEVICE: @__Two_var = addrspace(1) global i32 2, align 4 +// DEVICE: @__Three_var = addrspace(1) global i32 3, align 4 +// DEVICE: @Two = weak hidden alias i32 (), ptr @__Two +// DEVICE: @Two_ = hidden alias i32 (), ptr @__Two +// DEVICE: @Two_var = weak alias i32, addrspacecast (ptr addrspace(1) @__Two_var to ptr) +// DEVICE: @Two_var_ = alias i32, addrspacecast (ptr addrspace(1) @__Two_var to ptr) +// DEVICE: @Three = weak hidden alias i32 (), ptr @__Three +// DEVICE: @Three.1 = weak hidden alias i32 (), ptr @__Three +// DEVICE: @Three_ = hidden alias i32 (), ptr @__Three +// DEVICE: @Three_.2 = hidden alias i32 (), ptr @__Three +// DEVICE: @Three_var = weak alias i32, addrspacecast (ptr addrspace(1) @__Three_var to ptr) +// DEVICE: @Three_var_ = alias i32, addrspacecast (ptr addrspace(1) @__Three_var to ptr) +//. +// HOST-LABEL: define dso_local i32 @__One( +// HOST-SAME: ) #[[ATTR0:[0-9]+]] { +// HOST-NEXT: [[ENTRY:.*:]] +// HOST-NEXT: ret i32 1 +// +int __One(void) { return 1; } +int One(void) __attribute__ ((weak, alias("__One"))); +int One_(void) __attribute__ ((alias("__One"))); + +int __One_var = 1; +extern int __attribute__((weak, alias("__One_var"))) One_var; +extern int __attribute__((alias("__One_var"))) One_var_; + +#pragma omp declare target +// HOST-LABEL: define dso_local i32 @__Two( +// HOST-SAME: ) #[[ATTR0]] { +// HOST-NEXT: [[ENTRY:.*:]] +// HOST-NEXT: ret i32 2 +// +// DEVICE-LABEL: define hidden i32 @__Two( +// DEVICE-SAME: ) #[[ATTR0:[0-9]+]] { +// DEVICE-NEXT: [[ENTRY:.*:]] +// DEVICE-NEXT: [[RETVAL:%.*]] = alloca i32, align 4, addrspace(5) +// DEVICE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr +// DEVICE-NEXT: ret i32 2 +// +int __Two(void) { return 2; } +int Two(void) __attribute__ ((weak, alias("__Two"))); +int Two_(void) __attribute__ ((alias("__Two"))); + +int __Two_var = 2; +extern int __attribute__((weak, alias("__Two_var"))) Two_var; +extern int __attribute__((alias("__Two_var"))) Two_var_; +#pragma omp end declare target + +#pragma omp declare target +// HOST-LABEL: define dso_local i32 @__Three( +// HOST-SAME: ) #[[ATTR0]] { +// HOST-NEXT: [[ENTRY:.*:]] +// HOST-NEXT: ret i32 3 +// +// DEVICE-LABEL: define hidden i32 @__Three( +// DEVICE-SAME: ) #[[ATTR0]] { +// DEVICE-NEXT: [[ENTRY:.*:]] +// DEVICE-NEXT: [[RETVAL:%.*]] = alloca i32, align 4, addrspace(5) +// DEVICE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr +// DEVICE-NEXT: ret i32 3 +// +int __Three(void) { return 3; } +int __Three_var = 3; +#pragma omp end declare target +int Three(void) __attribute__ ((weak, alias("__Three"))); +int Three_(void) __attribute__ ((alias("__Three"))); +extern int __attribute__((weak, alias("__Three_var"))) Three_var; +extern int __attribute__((alias("__Three_var"))) Three_var_; +//. +// HOST: [[META0:![0-9]+]] = !{i32 1, !"__Two_var", i32 0, i32 0} +// HOST: [[META1:![0-9]+]] = !{i32 1, !"__Three_var", i32 0, i32 1} +//. +// DEVICE: [[META0:![0-9]+]] = !{i32 1, !"__Two_var", i32 0, i32 0} +// DEVICE: [[META1:![0-9]+]] = !{i32 1, !"__Three_var", i32 0, i32 1} +//. diff --git a/clang/test/OpenMP/amdgcn_weak_alias.cpp b/clang/test/OpenMP/amdgcn_weak_alias.cpp new file mode 100644 index 0000000000000..d1ffff4b1ca01 --- /dev/null +++ b/clang/test/OpenMP/amdgcn_weak_alias.cpp @@ -0,0 +1,115 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --check-globals all --version 6 +// RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host.bc +// RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -o - | FileCheck %s --check-prefix=HOST +// RUN: %clang_cc1 -fopenmp -x c++ -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=DEVICE + +//. +// HOST: @_Z3Onev = weak alias i32 (), ptr @_Z5__Onev +// HOST: @_Z3Onef = weak alias float (float), ptr @_Z5__Onef +// HOST: @_Z4One_v = alias i32 (), ptr @_Z5__Onev +// HOST: @_Z4One_f = alias float (float), ptr @_Z5__Onef +// HOST: @_Z3Twov = weak alias i32 (), ptr @_Z5__Twov +// HOST: @_Z3Twof = weak alias float (float), ptr @_Z5__Twof +// HOST: @_Z4Two_v = alias i32 (), ptr @_Z5__Twov +// HOST: @_Z4Two_f = alias float (float), ptr @_Z5__Twof +// HOST: @_Z5Threev = weak alias i32 (), ptr @_Z7__Threev +// HOST: @_Z6Three_v = alias i32 (), ptr @_Z7__Threev +// HOST: @_Z4Fourv = weak alias i32 (), ptr @_Z6__Fourv +// HOST: @_Z5Four_v = alias i32 (), ptr @_Z6__Fourv +//. +// DEVICE: @_Z3Twov = weak hidden alias i32 (), ptr @_Z5__Twov +// DEVICE: @_Z3Twof = weak hidden alias float (float), ptr @_Z5__Twof +// DEVICE: @_Z4Two_v = hidden alias i32 (), ptr @_Z5__Twov +// DEVICE: @_Z4Two_f = hidden alias float (float), ptr @_Z5__Twof +// DEVICE: @_Z5Threev = weak hidden alias i32 (), ptr @_Z7__Threev +// DEVICE: @_Z6Three_v = hidden alias i32 (), ptr @_Z7__Threev +//. +// HOST-LABEL: define dso_local noundef i32 @_Z5__Onev( +// HOST-SAME: ) #[[ATTR0:[0-9]+]] { +// HOST-NEXT: [[ENTRY:.*:]] +// HOST-NEXT: ret i32 1 +// +int __One(void) { return 1; } +// HOST-LABEL: define dso_local noundef float @_Z5__Onef( +// HOST-SAME: float noundef [[F:%.*]]) #[[ATTR0]] { +// HOST-NEXT: [[ENTRY:.*:]] +// HOST-NEXT: [[F_ADDR:%.*]] = alloca float, align 4 +// HOST-NEXT: store float [[F]], ptr [[F_ADDR]], align 4 +// HOST-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4 +// HOST-NEXT: [[MUL:%.*]] = fmul float 1.000000e+00, [[TMP0]] +// HOST-NEXT: ret float [[MUL]] +// +float __One(float f) { return 1.0f * f; } +int One(void) __attribute__((weak, alias("_Z5__Onev"))); +float One(float f) __attribute__((weak, alias("_Z5__Onef"))); +int One_(void) __attribute__((alias("_Z5__Onev"))); +float One_(float f) __attribute__((alias("_Z5__Onef"))); + +#pragma omp declare target +// HOST-LABEL: define dso_local noundef i32 @_Z5__Twov( +// HOST-SAME: ) #[[ATTR0]] { +// HOST-NEXT: [[ENTRY:.*:]] +// HOST-NEXT: ret i32 2 +// +// DEVICE-LABEL: define hidden noundef i32 @_Z5__Twov( +// DEVICE-SAME: ) #[[ATTR0:[0-9]+]] { +// DEVICE-NEXT: [[ENTRY:.*:]] +// DEVICE-NEXT: [[RETVAL:%.*]] = alloca i32, align 4, addrspace(5) +// DEVICE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr +// DEVICE-NEXT: ret i32 2 +// +int __Two(void) { return 2; } +// HOST-LABEL: define dso_local noundef float @_Z5__Twof( +// HOST-SAME: float noundef [[F:%.*]]) #[[ATTR0]] { +// HOST-NEXT: [[ENTRY:.*:]] +// HOST-NEXT: [[F_ADDR:%.*]] = alloca float, align 4 +// HOST-NEXT: store float [[F]], ptr [[F_ADDR]], align 4 +// HOST-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4 +// HOST-NEXT: [[MUL:%.*]] = fmul float 2.000000e+00, [[TMP0]] +// HOST-NEXT: ret float [[MUL]] +// +// DEVICE-LABEL: define hidden noundef float @_Z5__Twof( +// DEVICE-SAME: float noundef [[F:%.*]]) #[[ATTR0]] { +// DEVICE-NEXT: [[ENTRY:.*:]] +// DEVICE-NEXT: [[RETVAL:%.*]] = alloca float, align 4, addrspace(5) +// DEVICE-NEXT: [[F_ADDR:%.*]] = alloca float, align 4, addrspace(5) +// DEVICE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr +// DEVICE-NEXT: [[F_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F_ADDR]] to ptr +// DEVICE-NEXT: store float [[F]], ptr [[F_ADDR_ASCAST]], align 4 +// DEVICE-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR_ASCAST]], align 4 +// DEVICE-NEXT: [[MUL:%.*]] = fmul float 2.000000e+00, [[TMP0]] +// DEVICE-NEXT: ret float [[MUL]] +// +float __Two(float f) { return 2.0f * f; } +int Two(void) __attribute__((weak, alias("_Z5__Twov"))); +float Two(float f) __attribute__((weak, alias("_Z5__Twof"))); +int Two_(void) __attribute__((alias("_Z5__Twov"))); +float Two_(float f) __attribute__((alias("_Z5__Twof"))); +#pragma omp end declare target + +#pragma omp declare target +// HOST-LABEL: define linkonce_odr noundef i32 @_Z7__Threev( +// HOST-SAME: ) #[[ATTR0]] comdat { +// HOST-NEXT: [[ENTRY:.*:]] +// HOST-NEXT: ret i32 3 +// +// DEVICE-LABEL: define linkonce_odr hidden noundef i32 @_Z7__Threev( +// DEVICE-SAME: ) #[[ATTR0]] comdat { +// DEVICE-NEXT: [[ENTRY:.*:]] +// DEVICE-NEXT: [[RETVAL:%.*]] = alloca i32, align 4, addrspace(5) +// DEVICE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr +// DEVICE-NEXT: ret i32 3 +// +constexpr int __Three(void) { return 3; } +int Three(void) __attribute__((weak, alias("_Z7__Threev"))); +int Three_(void) __attribute__((alias("_Z7__Threev"))); +#pragma omp end declare target + +// HOST-LABEL: define linkonce_odr noundef i32 @_Z6__Fourv( +// HOST-SAME: ) #[[ATTR0]] comdat { +// HOST-NEXT: [[ENTRY:.*:]] +// HOST-NEXT: ret i32 4 +// +constexpr int __Four(void) { return 4; } +int Four(void) __attribute__((weak, alias("_Z6__Fourv"))); +int Four_(void) __attribute__((alias("_Z6__Fourv"))); diff --git a/clang/test/OpenMP/nvptx_weak_alias.c b/clang/test/OpenMP/nvptx_weak_alias.c new file mode 100644 index 0000000000000..e5e1b4409a5a5 --- /dev/null +++ b/clang/test/OpenMP/nvptx_weak_alias.c @@ -0,0 +1,22 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --check-globals all --version 6 +// REQUIRES: nvptx-registered-target + +// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -o - | FileCheck %s + +//. +// CHECK: @One = weak alias i32 (), ptr @__One +//. +// CHECK-LABEL: define dso_local i32 @__One( +// CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: ret i32 1 +// +int __One(void) { return 1; } +int One(void) __attribute__ ((weak, alias("__One"))); +//. +// CHECK: attributes #[[ATTR0]] = { noinline nounwind optnone "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" } +//. +// CHECK: [[META0:![0-9]+]] = !{i32 1, !"wchar_size", i32 4} +// CHECK: [[META1:![0-9]+]] = !{i32 7, !"openmp", i32 51} +// CHECK: [[META2:![0-9]+]] = !{!"{{.*}}clang version {{.*}}"} +//. diff --git a/clang/test/Preprocessor/init.c b/clang/test/Preprocessor/init.c index 4dea1b583a089..32c758699120e 100644 --- a/clang/test/Preprocessor/init.c +++ b/clang/test/Preprocessor/init.c @@ -1106,19 +1106,19 @@ // SPARC:#define __INT_LEAST8_MAX__ 127 // SPARC:#define __INT_LEAST8_TYPE__ signed char // SPARC:#define __INT_MAX__ 2147483647 -// SPARC:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324L -// SPARC:#define __LDBL_DIG__ 15 -// SPARC:#define __LDBL_EPSILON__ 2.2204460492503131e-16L +// SPARC:#define __LDBL_DENORM_MIN__ 6.47517511943802511092443895822764655e-4966L +// SPARC:#define __LDBL_DIG__ 33 +// SPARC:#define __LDBL_EPSILON__ 1.92592994438723585305597794258492732e-34L // SPARC:#define __LDBL_HAS_DENORM__ 1 // SPARC:#define __LDBL_HAS_INFINITY__ 1 // SPARC:#define __LDBL_HAS_QUIET_NAN__ 1 -// SPARC:#define __LDBL_MANT_DIG__ 53 -// SPARC:#define __LDBL_MAX_10_EXP__ 308 -// SPARC:#define __LDBL_MAX_EXP__ 1024 -// SPARC:#define __LDBL_MAX__ 1.7976931348623157e+308L -// SPARC:#define __LDBL_MIN_10_EXP__ (-307) -// SPARC:#define __LDBL_MIN_EXP__ (-1021) -// SPARC:#define __LDBL_MIN__ 2.2250738585072014e-308L +// SPARC:#define __LDBL_MANT_DIG__ 113 +// SPARC:#define __LDBL_MAX_10_EXP__ 4932 +// SPARC:#define __LDBL_MAX_EXP__ 16384 +// SPARC:#define __LDBL_MAX__ 1.18973149535723176508575932662800702e+4932L +// SPARC:#define __LDBL_MIN_10_EXP__ (-4931) +// SPARC:#define __LDBL_MIN_EXP__ (-16381) +// SPARC:#define __LDBL_MIN__ 3.36210314311209350626267781732175260e-4932L // SPARC:#define __LONG_LONG_MAX__ 9223372036854775807LL // SPARC:#define __LONG_MAX__ 2147483647L // SPARC-NOT:#define __LP64__ @@ -1134,7 +1134,7 @@ // SPARC:#define __SIZEOF_DOUBLE__ 8 // SPARC:#define __SIZEOF_FLOAT__ 4 // SPARC:#define __SIZEOF_INT__ 4 -// SPARC:#define __SIZEOF_LONG_DOUBLE__ 8 +// SPARC:#define __SIZEOF_LONG_DOUBLE__ 16 // SPARC:#define __SIZEOF_LONG_LONG__ 8 // SPARC:#define __SIZEOF_LONG__ 4 // SPARC:#define __SIZEOF_POINTER__ 4 diff --git a/clang/test/Preprocessor/predefined-arch-macros.c b/clang/test/Preprocessor/predefined-arch-macros.c index 27feeb57b5de2..1e38b4d3ba350 100644 --- a/clang/test/Preprocessor/predefined-arch-macros.c +++ b/clang/test/Preprocessor/predefined-arch-macros.c @@ -4210,6 +4210,11 @@ // CHECK_SPARC-NOT: #define __sparcv9 1 // CHECK_SPARC-NOT: #define __sparcv9__ 1 +// RUN: %clang -E -dM %s -o - 2>&1 \ +// RUN: -target sparc-unknown-linux \ +// RUN: | FileCheck -match-full-lines %s -check-prefix=CHECK_SPARC_LDBL +// CHECK_SPARC_LDBL: #define __LONG_DOUBLE_128__ 1 + // RUN: %clang -mcpu=v9 -E -dM %s -o - 2>&1 \ // RUN: -target sparc-unknown-linux \ // RUN: | FileCheck -match-full-lines %s -check-prefix=CHECK_SPARC-V9 diff --git a/clang/test/Sema/AArch64/sve-vector-conditional-op.cpp b/clang/test/Sema/AArch64/sve-vector-conditional-op.cpp new file mode 100644 index 0000000000000..0ca55e6268658 --- /dev/null +++ b/clang/test/Sema/AArch64/sve-vector-conditional-op.cpp @@ -0,0 +1,37 @@ +// RUN: %clang_cc1 %s -fsyntax-only -triple aarch64-none-linux-gnu -target-feature +sve -verify + +typedef int fixed_vector __attribute__((vector_size(4))); + +auto error_fixed_vector_result(__SVBool_t svbool, fixed_vector a, fixed_vector b) { + // expected-error@+1 {{vector condition type '__SVBool_t' and result type 'fixed_vector' (vector of 1 'int' value) do not have the same number of elements}} + return svbool ? a : b; +} + +auto error_void_result(__SVBool_t svbool) { + // expected-error@+1 {{GNU vector conditional operand cannot be void}} + return svbool ? (void)0 : (void)1; +} + +auto error_sve_splat_result_unsupported(__SVBool_t svbool, long long a, long long b) { + // expected-error@+1 {{scalar type 'long long' not supported with vector condition type '__SVBool_t'}} + return svbool ? a : b; +} + +auto error_sve_vector_result_matched_element_count(__SVBool_t svbool, __SVUint32_t a, __SVUint32_t b) { + // expected-error@+1 {{vector condition type '__SVBool_t' and result type '__SVUint32_t' do not have the same number of elements}} + return svbool ? a : b; +} + +// The following cases should be supported: + +__SVBool_t cond_svbool(__SVBool_t a, __SVBool_t b) { + return a < b ? a : b; +} + +__SVFloat32_t cond_svf32(__SVFloat32_t a, __SVFloat32_t b) { + return a < b ? a : b; +} + +__SVUint64_t cond_u64_splat(__SVUint64_t a) { + return a < 1ul ? a : 1ul; +} diff --git a/clang/test/Sema/scoped-atomic-ops.c b/clang/test/Sema/scoped-atomic-ops.c index 33044aa256cb0..23512a912c75c 100644 --- a/clang/test/Sema/scoped-atomic-ops.c +++ b/clang/test/Sema/scoped-atomic-ops.c @@ -31,7 +31,7 @@ void fi2b(int *i) { __scoped_atomic_store_n(i, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); } -void fi3a(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) { +void fi3a(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h, unsigned *i, unsigned *j) { *a = __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); *b = __scoped_atomic_fetch_sub(b, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); *c = __scoped_atomic_fetch_and(c, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); @@ -40,9 +40,11 @@ void fi3a(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) { *f = __scoped_atomic_fetch_nand(f, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); *g = __scoped_atomic_fetch_min(g, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); *h = __scoped_atomic_fetch_max(h, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); + *i = __scoped_atomic_uinc_wrap(i, 1u, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); + *j = __scoped_atomic_udec_wrap(j, 1u, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); } -void fi3b(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) { +void fi3b(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h, unsigned *i, unsigned *j) { *a = __scoped_atomic_fetch_add(1, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}} *b = __scoped_atomic_fetch_sub(1, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}} *c = __scoped_atomic_fetch_and(1, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}} @@ -51,9 +53,11 @@ void fi3b(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) { *f = __scoped_atomic_fetch_nand(1, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}} *g = __scoped_atomic_fetch_min(1, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}} *h = __scoped_atomic_fetch_max(1, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}} + *i = __scoped_atomic_uinc_wrap(1, 1u, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}} + *g = __scoped_atomic_udec_wrap(1, 1u, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}} } -void fi3c(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) { +void fi3c(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h, unsigned *i, unsigned *j) { *a = __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED); // expected-error {{too few arguments to function call, expected 4, have 3}} *b = __scoped_atomic_fetch_sub(b, 1, __ATOMIC_RELAXED); // expected-error {{too few arguments to function call, expected 4, have 3}} *c = __scoped_atomic_fetch_and(c, 1, __ATOMIC_RELAXED); // expected-error {{too few arguments to function call, expected 4, have 3}} @@ -62,9 +66,11 @@ void fi3c(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) { *f = __scoped_atomic_fetch_nand(f, 1, __ATOMIC_RELAXED); // expected-error {{too few arguments to function call, expected 4, have 3}} *g = __scoped_atomic_fetch_min(g, 1, __ATOMIC_RELAXED); // expected-error {{too few arguments to function call, expected 4, have 3}} *h = __scoped_atomic_fetch_max(h, 1, __ATOMIC_RELAXED); // expected-error {{too few arguments to function call, expected 4, have 3}} + *i = __scoped_atomic_uinc_wrap(i, 1u, __ATOMIC_RELAXED); // expected-error {{too few arguments to function call, expected 4, have 3}} + *j = __scoped_atomic_udec_wrap(j, 1u, __ATOMIC_RELAXED); // expected-error {{too few arguments to function call, expected 4, have 3}} } -void fi3d(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) { +void fi3d(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h, unsigned *i, unsigned *j) { *a = __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}} *b = __scoped_atomic_fetch_sub(b, 1, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}} *c = __scoped_atomic_fetch_and(c, 1, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}} @@ -73,6 +79,17 @@ void fi3d(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) { *f = __scoped_atomic_fetch_nand(f, 1, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}} *g = __scoped_atomic_fetch_min(g, 1, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}} *h = __scoped_atomic_fetch_max(h, 1, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}} + *i = __scoped_atomic_uinc_wrap(i, 1u, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}} + *j = __scoped_atomic_udec_wrap(j, 1u, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}} +} + +void fi3e(float *a, float *b, float *c, float *d, float *e, float *f) { + *a = __scoped_atomic_fetch_and(a, 1, __ATOMIC_RELAXED, 42); // expected-error {{address argument to atomic operation must be a pointer to integer ('float *' invalid)}} + *b = __scoped_atomic_fetch_or(b, 1, __ATOMIC_RELAXED, 42); // expected-error {{address argument to atomic operation must be a pointer to integer ('float *' invalid)}} + *c = __scoped_atomic_fetch_xor(c, 1, __ATOMIC_RELAXED, 42); // expected-error {{address argument to atomic operation must be a pointer to integer ('float *' invalid)}} + *d = __scoped_atomic_fetch_nand(d, 1, __ATOMIC_RELAXED, 42); // expected-error {{address argument to atomic operation must be a pointer to integer ('float *' invalid)}} + *f = __scoped_atomic_uinc_wrap(f, 1u, __ATOMIC_RELAXED, 42); // expected-error {{address argument to atomic operation must be a pointer to integer ('float *' invalid)}} + *e = __scoped_atomic_udec_wrap(e, 1u, __ATOMIC_RELAXED, 42); // expected-error {{address argument to atomic operation must be a pointer to integer ('float *' invalid)}} } int fi4a(int *i) { diff --git a/clang/test/SemaCXX/no-warn-thread-safety-analysis.cpp b/clang/test/SemaCXX/no-warn-thread-safety-analysis.cpp new file mode 100644 index 0000000000000..5b1964301fce7 --- /dev/null +++ b/clang/test/SemaCXX/no-warn-thread-safety-analysis.cpp @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 -Wthread-safety -Wthread-safety-pointer -Wthread-safety-beta -Wno-thread-safety-negative -fcxx-exceptions -DUSE_CAPABILITY=0 %s +// RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 -Wthread-safety -Wthread-safety-pointer -Wthread-safety-beta -Wno-thread-safety-negative -fcxx-exceptions -DUSE_CAPABILITY=1 %s +// RUN: %clang_cc1 -fsyntax-only -verify -std=c++17 -Wthread-safety -Wthread-safety-pointer -Wthread-safety-beta -Wno-thread-safety-negative -fcxx-exceptions -DUSE_CAPABILITY=0 %s +// RUN: %clang_cc1 -fsyntax-only -verify -std=c++17 -Wthread-safety -Wthread-safety-pointer -Wthread-safety-beta -Wno-thread-safety-negative -fcxx-exceptions -DUSE_CAPABILITY=1 %s +// expected-no-diagnostics + +struct foo { + ~foo(); +}; +struct bar : foo {}; +struct baz : bar {}; +baz foobar(baz a) { return a; } diff --git a/clang/test/SemaCXX/return.cpp b/clang/test/SemaCXX/return.cpp index 796c9ae91dedc..92be66c24489e 100644 --- a/clang/test/SemaCXX/return.cpp +++ b/clang/test/SemaCXX/return.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -std=c++11 -fcxx-exceptions -fexceptions -fsyntax-only -Wignored-qualifiers -verify +// RUN: %clang_cc1 %s -std=c++14 -fcxx-exceptions -fexceptions -fsyntax-only -Wignored-qualifiers -verify int test1() { throw; @@ -132,3 +133,27 @@ void cxx_unresolved_expr() { // expr doesn't assert. return int(undeclared, 4; // expected-error {{use of undeclared identifier 'undeclared'}} } + +#if __cplusplus >= 201402L +namespace GH43054 { +struct S{}; +const auto foo() { return 0; } // expected-warning {{'const' type qualifier on return type has no effect}} +const auto bar() { return S{}; } +template +const auto baz() { return T{}; } + +void test() { + baz(); + baz(); + + []() -> const auto { // expected-warning {{'const' type qualifier on return type has no effect}} + return 0; + }(); + + []() -> const auto { + return S{}; + }(); +} +} + +#endif diff --git a/clang/test/SemaHIP/amdgpu-gfx950-load-to-lds.hip b/clang/test/SemaHIP/amdgpu-gfx950-load-to-lds.hip index 366278f648939..509906d8c87a8 100644 --- a/clang/test/SemaHIP/amdgpu-gfx950-load-to-lds.hip +++ b/clang/test/SemaHIP/amdgpu-gfx950-load-to-lds.hip @@ -1,7 +1,6 @@ // REQUIRES: amdgpu-registered-target -// RUN: %clang_cc1 -fsyntax-only -triple amdgcn -target-cpu gfx950 -verify=device %s -fcuda-is-device -// RUN: %clang_cc1 -fsyntax-only -triple x86_64 -aux-triple amdgcn -verify=host %s -// device-no-diagnostics +// RUN: %clang_cc1 -fsyntax-only -triple amdgcn -target-cpu gfx950 -verify %s -fcuda-is-device +// RUN: %clang_cc1 -fsyntax-only -triple x86_64 -aux-triple amdgcn -verify %s #define __device__ __attribute__((device)) #define __global__ __attribute__((global)) @@ -20,11 +19,11 @@ __device__ void i_am_device(void* src, __amdgpu_buffer_rsrc_t rsrc, __shared__ v __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 12, vindex, voffset, soffset, 0, 0); __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 16, vindex, voffset, soffset, 0, 0); - __builtin_amdgcn_load_to_lds(src, dst, 1, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} - __builtin_amdgcn_load_to_lds(src, dst, 2, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} - __builtin_amdgcn_load_to_lds(src, dst, 4, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} - __builtin_amdgcn_load_to_lds(src, dst, 12, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} - __builtin_amdgcn_load_to_lds(src, dst, 16, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} + __builtin_amdgcn_load_to_lds(src, dst, 1, 0, 0); + __builtin_amdgcn_load_to_lds(src, dst, 2, 0, 0); + __builtin_amdgcn_load_to_lds(src, dst, 4, 0, 0); + __builtin_amdgcn_load_to_lds(src, dst, 12, 0, 0); + __builtin_amdgcn_load_to_lds(src, dst, 16, 0, 0); __builtin_amdgcn_global_load_lds(src, dst, 1, 0 , 0); __builtin_amdgcn_global_load_lds(src, dst, 2, 0 , 0); @@ -46,11 +45,11 @@ __global__ void i_am_kernel(void* src, __amdgpu_buffer_rsrc_t rsrc, __shared__ v __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 12, vindex, voffset, soffset, 0, 0); __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 16, vindex, voffset, soffset, 0, 0); - __builtin_amdgcn_load_to_lds(src, dst, 1, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} - __builtin_amdgcn_load_to_lds(src, dst, 2, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} - __builtin_amdgcn_load_to_lds(src, dst, 4, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} - __builtin_amdgcn_load_to_lds(src, dst, 12, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} - __builtin_amdgcn_load_to_lds(src, dst, 16, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} + __builtin_amdgcn_load_to_lds(src, dst, 1, 0, 0); + __builtin_amdgcn_load_to_lds(src, dst, 2, 0, 0); + __builtin_amdgcn_load_to_lds(src, dst, 4, 0, 0); + __builtin_amdgcn_load_to_lds(src, dst, 12, 0, 0); + __builtin_amdgcn_load_to_lds(src, dst, 16, 0, 0); __builtin_amdgcn_global_load_lds(src, dst, 1, 0 , 0); __builtin_amdgcn_global_load_lds(src, dst, 2, 0 , 0); @@ -58,3 +57,29 @@ __global__ void i_am_kernel(void* src, __amdgpu_buffer_rsrc_t rsrc, __shared__ v __builtin_amdgcn_global_load_lds(src, dst, 12, 0 , 0); __builtin_amdgcn_global_load_lds(src, dst, 16, 0 , 0); } + +__device__ void i_am_wrong(void* src, __amdgpu_buffer_rsrc_t rsrc, __shared__ void* dst, int vindex, int voffset, int soffset) { + __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, dst, 1, voffset, soffset, 0, 0, 4); // expected-error{{too many arguments to function call}} + __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, dst, 2, voffset, soffset, 0, 0, 4); // expected-error{{too many arguments to function call}} + __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, dst, 4, voffset, soffset, 0, 0, 4); // expected-error{{too many arguments to function call}} + __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, dst, 12, voffset, soffset, 0, 0, 4); // expected-error{{too many arguments to function call}} + __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, dst, 16, voffset, soffset, 0, 0, 4); // expected-error{{too many arguments to function call}} + + __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 1, vindex, voffset, soffset, 0, 0, 4); // expected-error{{too many arguments to function call}} + __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 2, vindex, voffset, soffset, 0, 0, 4); // expected-error{{too many arguments to function call}} + __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 4, vindex, voffset, soffset, 0, 0, 4); // expected-error{{too many arguments to function call}} + __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 12, vindex, voffset, soffset, 0, 0, 4); // expected-error{{too many arguments to function call}} + __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 16, vindex, voffset, soffset, 0, 0, 4); // expected-error{{too many arguments to function call}} + + __builtin_amdgcn_load_to_lds(src, dst, 1, 0, 0, 4); // expected-error{{too many arguments to function call}} + __builtin_amdgcn_load_to_lds(src, dst, 2, 0, 0, 4); // expected-error{{too many arguments to function call}} + __builtin_amdgcn_load_to_lds(src, dst, 4, 0, 0, 4); // expected-error{{too many arguments to function call}} + __builtin_amdgcn_load_to_lds(src, dst, 12, 0, 0, 4); // expected-error{{too many arguments to function call}} + __builtin_amdgcn_load_to_lds(src, dst, 16, 0, 0, 4); // expected-error{{too many arguments to function call}} + + __builtin_amdgcn_global_load_lds(src, dst, 1, 0 , 0, 4); // expected-error{{too many arguments to function call}} + __builtin_amdgcn_global_load_lds(src, dst, 2, 0 , 0, 4); // expected-error{{too many arguments to function call}} + __builtin_amdgcn_global_load_lds(src, dst, 4, 0 , 0, 4); // expected-error{{too many arguments to function call}} + __builtin_amdgcn_global_load_lds(src, dst, 12, 0 , 0, 4); // expected-error{{too many arguments to function call}} + __builtin_amdgcn_global_load_lds(src, dst, 16, 0 , 0, 4); // expected-error{{too many arguments to function call}} +} diff --git a/clang/test/SemaTemplate/attributes.cpp b/clang/test/SemaTemplate/attributes.cpp index 20fe983af28f7..d0ab0a68dec2a 100644 --- a/clang/test/SemaTemplate/attributes.cpp +++ b/clang/test/SemaTemplate/attributes.cpp @@ -640,3 +640,23 @@ namespace preferred_name { Foo<1, 2, int, float>::nosuch x; // expected-error {{no type named 'nosuch' in 'preferred_name::Bar'}} } ::preferred_name::Foo<1, 2, int, float>::nosuch x; // expected-error {{no type named 'nosuch' in 'preferred_name::Bar'}} + +// GH169072: templated attribute((constructor)) function crashes clang +// constructor/destructor attribute without priority argument should not crash. +namespace gh169072 { + template + [[gnu::constructor]] void foo() {} + + template void foo(); + + template + [[gnu::destructor]] void bar() {} + + template void bar(); + + // Also test with explicit priority argument + template + [[gnu::constructor(101)]] void baz() {} + + template void baz(); +} diff --git a/clang/tools/c-index-test/CMakeLists.txt b/clang/tools/c-index-test/CMakeLists.txt index 24e7c9692ca56..41e80e66ffa7a 100644 --- a/clang/tools/c-index-test/CMakeLists.txt +++ b/clang/tools/c-index-test/CMakeLists.txt @@ -27,6 +27,7 @@ else() libclang clangAST clangBasic + clangDriver clangFrontend clangIndex clangSerialization diff --git a/clang/tools/c-index-test/core_main.cpp b/clang/tools/c-index-test/core_main.cpp index 5a3086a7fc08f..c67479fd130ca 100644 --- a/clang/tools/c-index-test/core_main.cpp +++ b/clang/tools/c-index-test/core_main.cpp @@ -8,6 +8,7 @@ #include "clang/AST/Mangle.h" #include "clang/Basic/LangOptions.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/ASTUnit.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" diff --git a/clang/tools/diagtool/CMakeLists.txt b/clang/tools/diagtool/CMakeLists.txt index b49619c075c73..09b2a81790f87 100644 --- a/clang/tools/diagtool/CMakeLists.txt +++ b/clang/tools/diagtool/CMakeLists.txt @@ -15,5 +15,6 @@ add_clang_tool(diagtool clang_target_link_libraries(diagtool PRIVATE clangBasic + clangDriver clangFrontend ) diff --git a/clang/tools/diagtool/ShowEnabledWarnings.cpp b/clang/tools/diagtool/ShowEnabledWarnings.cpp index bea0288c09358..5b25e656dafa4 100644 --- a/clang/tools/diagtool/ShowEnabledWarnings.cpp +++ b/clang/tools/diagtool/ShowEnabledWarnings.cpp @@ -9,6 +9,7 @@ #include "DiagTool.h" #include "DiagnosticNames.h" #include "clang/Basic/LLVM.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/TextDiagnosticBuffer.h" #include "clang/Frontend/TextDiagnosticPrinter.h" diff --git a/clang/tools/driver/cc1_main.cpp b/clang/tools/driver/cc1_main.cpp index 300d59df1bf7b..cc757039cafd0 100644 --- a/clang/tools/driver/cc1_main.cpp +++ b/clang/tools/driver/cc1_main.cpp @@ -17,6 +17,7 @@ #include "clang/Basic/TargetOptions.h" #include "clang/CodeGen/ObjectFilePCHContainerWriter.h" #include "clang/Config/config.h" +#include "clang/Driver/Driver.h" #include "clang/Driver/DriverDiagnostic.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" @@ -269,7 +270,7 @@ int cc1_main(ArrayRef Argv, const char *Argv0, void *MainAddr) { if (Clang->getHeaderSearchOpts().UseBuiltinIncludes && Clang->getHeaderSearchOpts().ResourceDir.empty()) Clang->getHeaderSearchOpts().ResourceDir = - CompilerInvocation::GetResourcesPath(Argv0, MainAddr); + GetResourcesPath(Argv0, MainAddr); /// Create the actual file system. Clang->createVirtualFileSystem(llvm::vfs::getRealFileSystem(), DiagsBuffer); diff --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp index f4d6fa72a1dfe..32e84248c1b27 100644 --- a/clang/tools/libclang/CIndex.cpp +++ b/clang/tools/libclang/CIndex.cpp @@ -38,6 +38,7 @@ #include "clang/Basic/Stack.h" #include "clang/Basic/TargetInfo.h" #include "clang/Basic/Version.h" +#include "clang/Driver/CreateASTUnitFromArgs.h" #include "clang/Frontend/ASTUnit.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Index/CommentToXML.h" @@ -4361,7 +4362,7 @@ clang_parseTranslationUnit_Impl(CXIndex CIdx, const char *source_filename, LibclangInvocationReporter InvocationReporter( *CXXIdx, LibclangInvocationReporter::OperationKind::ParseOperation, options, llvm::ArrayRef(*Args), /*InvocationArgs=*/{}, unsaved_files); - std::unique_ptr Unit = ASTUnit::LoadFromCommandLine( + std::unique_ptr Unit = CreateASTUnitFromCommandLine( Args->data(), Args->data() + Args->size(), CXXIdx->getPCHContainerOperations(), DiagOpts, Diags, CXXIdx->getClangResourcesPath(), CXXIdx->getStorePreamblesInMemory(), diff --git a/clang/tools/libclang/CIndexer.cpp b/clang/tools/libclang/CIndexer.cpp index 11d9312b64849..853a936b43e37 100644 --- a/clang/tools/libclang/CIndexer.cpp +++ b/clang/tools/libclang/CIndexer.cpp @@ -16,6 +16,7 @@ #include "clang/Basic/Version.h" #include "clang/Config/config.h" #include "clang/Driver/Driver.h" +#include "clang/Options/OptionUtils.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallString.h" #include "llvm/Support/FileSystem.h" @@ -137,7 +138,7 @@ const std::string &CIndexer::getClangResourcesPath() { #endif // Cache our result. - ResourcesPath = driver::Driver::GetResourcesPath(LibClangPath); + ResourcesPath = GetResourcesPath(LibClangPath); return ResourcesPath; } diff --git a/clang/tools/libclang/CMakeLists.txt b/clang/tools/libclang/CMakeLists.txt index e0ff7605b68b8..b0105f5a5f79f 100644 --- a/clang/tools/libclang/CMakeLists.txt +++ b/clang/tools/libclang/CMakeLists.txt @@ -65,6 +65,7 @@ set(LIBS clangFrontend clangIndex clangLex + clangOptions clangRewrite clangSema clangSerialization diff --git a/clang/tools/libclang/Indexing.cpp b/clang/tools/libclang/Indexing.cpp index c142f142d5071..75323d70afcfe 100644 --- a/clang/tools/libclang/Indexing.cpp +++ b/clang/tools/libclang/Indexing.cpp @@ -15,6 +15,7 @@ #include "CXString.h" #include "CXTranslationUnit.h" #include "clang/AST/ASTConsumer.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/ASTUnit.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" diff --git a/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp b/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp index 66b3bba594fc9..a6308d115aa70 100644 --- a/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp +++ b/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp @@ -17,6 +17,7 @@ #include "clang/Analysis/FlowSensitive/DataflowAnalysis.h" #include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h" #include "clang/Analysis/FlowSensitive/DataflowEnvironment.h" +#include "clang/Analysis/FlowSensitive/Formula.h" #include "clang/Analysis/FlowSensitive/NoopAnalysis.h" #include "clang/Analysis/FlowSensitive/NoopLattice.h" #include "clang/Analysis/FlowSensitive/RecordOps.h" @@ -4382,6 +4383,40 @@ TEST(TransferTest, VarDeclInitAssignConditionalOperator) { }); } +TEST(TransferTest, VarDeclInitReferenceAssignConditionalOperator) { + std::string Code = R"( + struct A { + int i; + }; + + void target(A Foo, A Bar, bool Cond) { + A &Baz = Cond ? Foo : Bar; + // Make sure A::i is modeled. + Baz.i; + /*[[p]]*/ + } + )"; + runDataflow( + Code, + [](const llvm::StringMap> &Results, + ASTContext &ASTCtx) { + const Environment &Env = getEnvironmentAtAnnotation(Results, "p"); + + auto *FooIVal = cast(getFieldValue( + &getLocForDecl(ASTCtx, Env, "Foo"), "i", + ASTCtx, Env)); + auto *BarIVal = cast(getFieldValue( + &getLocForDecl(ASTCtx, Env, "Bar"), "i", + ASTCtx, Env)); + auto *BazIVal = cast(getFieldValue( + &getLocForDecl(ASTCtx, Env, "Baz"), "i", + ASTCtx, Env)); + + EXPECT_NE(BazIVal, FooIVal); + EXPECT_NE(BazIVal, BarIVal); + }); +} + TEST(TransferTest, VarDeclInDoWhile) { std::string Code = R"( void target(int *Foo) { @@ -6150,6 +6185,45 @@ TEST(TransferTest, ConditionalOperatorValue) { }); } +TEST(TransferTest, ConditionalOperatorValuesTested) { + // We should be able to show that the result of the conditional operator, + // JoinResultMustBeB1, must be equal to B1, because the condition is checking + // `B1 == B2` and selecting B1 on the false branch, or B2 on the true branch. + // Similarly, for JoinResultMustBeB2 == B2. + // Note that the conditional operator involves a join of two *different* + // glvalues, before casting the lvalue to an rvalue, which may affect the + // implementation of the transfer function, and thus affect whether or not we + // can prove that IsB1 == B1. + std::string Code = R"( + void target(bool B1, bool B2) { + bool JoinResultMustBeB1 = (B1 == B2) ? B2 : B1; + bool JoinResultMustBeB2 = (B1 == B2) ? B1 : B2; + // [[p]] + } + )"; + runDataflow( + Code, + [](const llvm::StringMap> &Results, + ASTContext &ASTCtx) { + Environment Env = getEnvironmentAtAnnotation(Results, "p").fork(); + + auto &B1 = getValueForDecl(ASTCtx, Env, "B1"); + auto &B2 = getValueForDecl(ASTCtx, Env, "B2"); + auto &JoinResultMustBeB1 = + getValueForDecl(ASTCtx, Env, "JoinResultMustBeB1"); + auto &JoinResultMustBeB2 = + getValueForDecl(ASTCtx, Env, "JoinResultMustBeB2"); + + const Formula &MustBeB1_Eq_B1 = + Env.arena().makeEquals(JoinResultMustBeB1.formula(), B1.formula()); + EXPECT_TRUE(Env.proves(MustBeB1_Eq_B1)); + + const Formula &MustBeB2_Eq_B2 = + Env.arena().makeEquals(JoinResultMustBeB2.formula(), B2.formula()); + EXPECT_TRUE(Env.proves(MustBeB2_Eq_B2)); + }); +} + TEST(TransferTest, ConditionalOperatorLocation) { std::string Code = R"( void target(bool Cond, int I1, int I2) { @@ -6177,6 +6251,66 @@ TEST(TransferTest, ConditionalOperatorLocation) { }); } +TEST(TransferTest, ConditionalOperatorLocationUpdatedAfter) { + // We don't currently model a Conditional Operator with an LValue result + // as having aliases to the LHS and RHS (if it isn't just the same LValue + // on both sides). We also don't model that the update "may" happen + // (a weak update). So, we don't consider the LHS and RHS as being weakly + // updated at [[after_diff]]. + std::string Code = R"( + void target(bool Cond, bool B1, bool B2) { + (void)0; + // [[before_same]] + (Cond ? B1 : B1) = !B1; + // [[after_same]] + (Cond ? B1 : B2) = !B1; + // [[after_diff]] + } + )"; + runDataflow( + Code, + [](const llvm::StringMap> &Results, + ASTContext &ASTCtx) { + Environment BeforeSameEnv = + getEnvironmentAtAnnotation(Results, "before_same").fork(); + Environment AfterSameEnv = + getEnvironmentAtAnnotation(Results, "after_same").fork(); + Environment AfterDiffEnv = + getEnvironmentAtAnnotation(Results, "after_diff").fork(); + + auto &BeforeSameB1 = + getValueForDecl(ASTCtx, BeforeSameEnv, "B1"); + auto &AfterSameB1 = + getValueForDecl(ASTCtx, AfterSameEnv, "B1"); + auto &AfterDiffB1 = + getValueForDecl(ASTCtx, AfterDiffEnv, "B1"); + + EXPECT_NE(&BeforeSameB1, &AfterSameB1); + EXPECT_NE(&BeforeSameB1, &AfterDiffB1); + // FIXME: The formula for AfterSameB1 should be different from + // AfterDiffB1 to reflect that B1 may be updated. + EXPECT_EQ(&AfterSameB1, &AfterDiffB1); + + // The value of B1 is definitely different from before_same vs + // after_same. + const Formula &B1ChangedForSame = + AfterSameEnv.arena().makeNot(AfterSameEnv.arena().makeEquals( + AfterSameB1.formula(), BeforeSameB1.formula())); + EXPECT_TRUE(AfterSameEnv.allows(B1ChangedForSame)); + EXPECT_TRUE(AfterSameEnv.proves(B1ChangedForSame)); + + const Formula &B1ChangedForDiff = + AfterDiffEnv.arena().makeNot(AfterDiffEnv.arena().makeEquals( + AfterDiffB1.formula(), AfterSameB1.formula())); + // FIXME: It should be possible that B1 *may* be updated, so it may be + // that AfterSameB1 != AfterDiffB1 or AfterSameB1 == AfterDiffB1. + EXPECT_FALSE(AfterSameEnv.allows(B1ChangedForDiff)); + // proves() should be false, since B1 may or may not have changed + // depending on `Cond`. + EXPECT_FALSE(AfterSameEnv.proves(B1ChangedForDiff)); + }); +} + TEST(TransferTest, ConditionalOperatorOnConstantExpr) { // This is a regression test: We used to crash when a `ConstantExpr` was used // in the branches of a conditional operator. diff --git a/clang/unittests/Driver/DXCModeTest.cpp b/clang/unittests/Driver/DXCModeTest.cpp index e0454f190b35a..130da620b40b5 100644 --- a/clang/unittests/Driver/DXCModeTest.cpp +++ b/clang/unittests/Driver/DXCModeTest.cpp @@ -15,6 +15,7 @@ #include "clang/Basic/LLVM.h" #include "clang/Basic/TargetOptions.h" #include "clang/Driver/Compilation.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Driver/Driver.h" #include "clang/Driver/ToolChain.h" #include "clang/Frontend/CompilerInstance.h" diff --git a/clang/unittests/Driver/ToolChainTest.cpp b/clang/unittests/Driver/ToolChainTest.cpp index afa17ff219be2..8f533790ec501 100644 --- a/clang/unittests/Driver/ToolChainTest.cpp +++ b/clang/unittests/Driver/ToolChainTest.cpp @@ -17,6 +17,7 @@ #include "clang/Basic/TargetInfo.h" #include "clang/Basic/TargetOptions.h" #include "clang/Driver/Compilation.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Driver/Driver.h" #include "clang/Frontend/CompilerInstance.h" #include "llvm/ADT/ArrayRef.h" diff --git a/clang/unittests/Format/ConfigParseTest.cpp b/clang/unittests/Format/ConfigParseTest.cpp index d578fa7a1a1e8..fec1c48c448d2 100644 --- a/clang/unittests/Format/ConfigParseTest.cpp +++ b/clang/unittests/Format/ConfigParseTest.cpp @@ -1164,6 +1164,36 @@ TEST(ConfigParseTest, ParsesConfiguration) { FormatStyle::BLS_Block); CHECK_PARSE("Cpp11BracedListStyle: true", Cpp11BracedListStyle, FormatStyle::BLS_AlignFirstComment); + + constexpr FormatStyle::IntegerLiteralSeparatorStyle + ExpectedIntegerLiteralSeparatorStyle{/*Binary=*/2, + /*BinaryMinDigitInsert=*/5, + /*BinaryMaxDigitRemove=*/2, + /*Decimal=*/6, + /*DecimalMinDigitInsert=*/6, + /*DecimalMaxDigitRemove=*/3, + /*Hex=*/4, + /*HexMinDigitInsert=*/2, + /*HexMaxDigitRemove=*/1}; + CHECK_PARSE("IntegerLiteralSeparator:\n" + " Binary: 2\n" + " BinaryMinDigitsInsert: 5\n" + " BinaryMaxDigitsRemove: 2\n" + " Decimal: 6\n" + " DecimalMinDigitsInsert: 6\n" + " DecimalMaxDigitsRemove: 3\n" + " Hex: 4\n" + " HexMinDigitsInsert: 2\n" + " HexMaxDigitsRemove: 1", + IntegerLiteralSeparator, ExpectedIntegerLiteralSeparatorStyle); + + // Backward compatibility: + CHECK_PARSE_NESTED_VALUE("BinaryMinDigits: 6", IntegerLiteralSeparator, + BinaryMinDigitsInsert, 6); + CHECK_PARSE_NESTED_VALUE("DecimalMinDigits: 5", IntegerLiteralSeparator, + DecimalMinDigitsInsert, 5); + CHECK_PARSE_NESTED_VALUE("HexMinDigits: 5", IntegerLiteralSeparator, + HexMinDigitsInsert, 5); } TEST(ConfigParseTest, ParsesConfigurationWithLanguages) { diff --git a/clang/unittests/Format/IntegerLiteralSeparatorTest.cpp b/clang/unittests/Format/IntegerLiteralSeparatorTest.cpp index 53b6dd8efadff..21cdab2187d90 100644 --- a/clang/unittests/Format/IntegerLiteralSeparatorTest.cpp +++ b/clang/unittests/Format/IntegerLiteralSeparatorTest.cpp @@ -137,34 +137,34 @@ TEST_F(IntegerLiteralSeparatorTest, UnderscoreAsSeparator) { verifyFormat("o = 0o400000000000000003n;", Style); } -TEST_F(IntegerLiteralSeparatorTest, MinDigits) { +TEST_F(IntegerLiteralSeparatorTest, MinDigitsInsert) { FormatStyle Style = getLLVMStyle(); Style.IntegerLiteralSeparator.Binary = 3; Style.IntegerLiteralSeparator.Decimal = 3; Style.IntegerLiteralSeparator.Hex = 2; - Style.IntegerLiteralSeparator.BinaryMinDigits = 7; + Style.IntegerLiteralSeparator.BinaryMinDigitsInsert = 7; verifyFormat("b1 = 0b101101;\n" "b2 = 0b1'101'101;", "b1 = 0b101'101;\n" "b2 = 0b1101101;", Style); - Style.IntegerLiteralSeparator.DecimalMinDigits = 5; + Style.IntegerLiteralSeparator.DecimalMinDigitsInsert = 5; verifyFormat("d1 = 2023;\n" "d2 = 10'000;", "d1 = 2'023;\n" "d2 = 100'00;", Style); - Style.IntegerLiteralSeparator.DecimalMinDigits = 3; + Style.IntegerLiteralSeparator.DecimalMinDigitsInsert = 3; verifyFormat("d1 = 123;\n" "d2 = 1'234;", "d1 = 12'3;\n" "d2 = 12'34;", Style); - Style.IntegerLiteralSeparator.HexMinDigits = 6; + Style.IntegerLiteralSeparator.HexMinDigitsInsert = 6; verifyFormat("h1 = 0xABCDE;\n" "h2 = 0xAB'CD'EF;", "h1 = 0xA'BC'DE;\n" @@ -243,6 +243,23 @@ TEST_F(IntegerLiteralSeparatorTest, FloatingPoint) { Style); } +TEST_F(IntegerLiteralSeparatorTest, MaxDigitsRemove) { + auto Style = getLLVMStyle(); + Style.IntegerLiteralSeparator.Decimal = 3; + Style.IntegerLiteralSeparator.DecimalMaxDigitsRemove = 4; + Style.IntegerLiteralSeparator.DecimalMinDigitsInsert = 7; + + verifyFormat("d1 = 123456;\n" + "d2 = 1234'56;", + Style); + + verifyFormat("d0 = 2023;\n" + "d3 = 5'000'000;", + "d0 = 20'2'3;\n" + "d3 = 5000000;", + Style); +} + } // namespace } // namespace test } // namespace format diff --git a/clang/unittests/Frontend/ASTUnitTest.cpp b/clang/unittests/Frontend/ASTUnitTest.cpp index dfdbe90e72f1f..bf9e4e184b5db 100644 --- a/clang/unittests/Frontend/ASTUnitTest.cpp +++ b/clang/unittests/Frontend/ASTUnitTest.cpp @@ -9,6 +9,8 @@ #include #include "clang/Basic/FileManager.h" +#include "clang/Driver/CreateASTUnitFromArgs.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/ASTUnit.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" @@ -173,7 +175,7 @@ TEST_F(ASTUnitTest, LoadFromCommandLineEarlyError) { auto PCHContainerOps = std::make_shared(); std::unique_ptr ErrUnit; - std::unique_ptr AST = ASTUnit::LoadFromCommandLine( + std::unique_ptr AST = CreateASTUnitFromCommandLine( &Args[0], &Args[4], PCHContainerOps, DiagOpts, Diags, "", false, "", false, CaptureDiagsKind::All, {}, true, 0, TU_Complete, false, false, false, SkipFunctionBodiesScope::None, false, true, false, false, @@ -201,7 +203,7 @@ TEST_F(ASTUnitTest, LoadFromCommandLineWorkingDirectory) { auto PCHContainerOps = std::make_shared(); std::unique_ptr ErrUnit; - std::unique_ptr AST = ASTUnit::LoadFromCommandLine( + std::unique_ptr AST = CreateASTUnitFromCommandLine( &Args[0], &Args[4], PCHContainerOps, DiagOpts, Diags, "", false, "", false, CaptureDiagsKind::All, {}, true, 0, TU_Complete, false, false, false, SkipFunctionBodiesScope::None, false, true, false, false, diff --git a/clang/unittests/Frontend/CompilerInstanceTest.cpp b/clang/unittests/Frontend/CompilerInstanceTest.cpp index cd3fefa1ea994..39d35b48f394a 100644 --- a/clang/unittests/Frontend/CompilerInstanceTest.cpp +++ b/clang/unittests/Frontend/CompilerInstanceTest.cpp @@ -8,6 +8,7 @@ #include "clang/Frontend/CompilerInstance.h" #include "clang/Basic/FileManager.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/FrontendActions.h" #include "clang/Frontend/TextDiagnosticPrinter.h" diff --git a/clang/unittests/Frontend/UtilsTest.cpp b/clang/unittests/Frontend/UtilsTest.cpp index fc411e4af705f..a82733d57714a 100644 --- a/clang/unittests/Frontend/UtilsTest.cpp +++ b/clang/unittests/Frontend/UtilsTest.cpp @@ -9,6 +9,7 @@ #include "clang/Frontend/Utils.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/TargetOptions.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Lex/PreprocessorOptions.h" diff --git a/clang/unittests/Sema/CMakeLists.txt b/clang/unittests/Sema/CMakeLists.txt index b61ed8c457635..188f6135a60ac 100644 --- a/clang/unittests/Sema/CMakeLists.txt +++ b/clang/unittests/Sema/CMakeLists.txt @@ -13,6 +13,7 @@ add_distinct_clang_unittest(SemaTests clangAST clangASTMatchers clangBasic + clangDriver clangFrontend clangParse clangSema diff --git a/clang/unittests/Sema/SemaNoloadLookupTest.cpp b/clang/unittests/Sema/SemaNoloadLookupTest.cpp index e565372698e5e..3944269eff502 100644 --- a/clang/unittests/Sema/SemaNoloadLookupTest.cpp +++ b/clang/unittests/Sema/SemaNoloadLookupTest.cpp @@ -10,6 +10,7 @@ #include "clang/AST/DeclarationName.h" #include "clang/ASTMatchers/ASTMatchFinder.h" #include "clang/ASTMatchers/ASTMatchers.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/FrontendAction.h" #include "clang/Frontend/FrontendActions.h" diff --git a/clang/unittests/Serialization/ForceCheckFileInputTest.cpp b/clang/unittests/Serialization/ForceCheckFileInputTest.cpp index edf33ae04230b..b76dcfec96063 100644 --- a/clang/unittests/Serialization/ForceCheckFileInputTest.cpp +++ b/clang/unittests/Serialization/ForceCheckFileInputTest.cpp @@ -9,6 +9,7 @@ #include "clang/ASTMatchers/ASTMatchFinder.h" #include "clang/ASTMatchers/ASTMatchers.h" #include "clang/Basic/FileManager.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/FrontendActions.h" diff --git a/clang/unittests/Serialization/LoadSpecLazilyTest.cpp b/clang/unittests/Serialization/LoadSpecLazilyTest.cpp index d7b55491fddac..f55925aeae1f2 100644 --- a/clang/unittests/Serialization/LoadSpecLazilyTest.cpp +++ b/clang/unittests/Serialization/LoadSpecLazilyTest.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/FrontendAction.h" #include "clang/Frontend/FrontendActions.h" diff --git a/clang/unittests/Serialization/ModuleCacheTest.cpp b/clang/unittests/Serialization/ModuleCacheTest.cpp index e9b8da3dba6af..df26e54588b9e 100644 --- a/clang/unittests/Serialization/ModuleCacheTest.cpp +++ b/clang/unittests/Serialization/ModuleCacheTest.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "clang/Basic/FileManager.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/FrontendActions.h" diff --git a/clang/unittests/Serialization/NoCommentsTest.cpp b/clang/unittests/Serialization/NoCommentsTest.cpp index 01bb6999a7c90..444a082bba907 100644 --- a/clang/unittests/Serialization/NoCommentsTest.cpp +++ b/clang/unittests/Serialization/NoCommentsTest.cpp @@ -9,6 +9,7 @@ #include "clang/ASTMatchers/ASTMatchFinder.h" #include "clang/ASTMatchers/ASTMatchers.h" #include "clang/Basic/FileManager.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/FrontendActions.h" diff --git a/clang/unittests/Serialization/PreambleInNamedModulesTest.cpp b/clang/unittests/Serialization/PreambleInNamedModulesTest.cpp index 55ee72875ead2..b826f20ce4d70 100644 --- a/clang/unittests/Serialization/PreambleInNamedModulesTest.cpp +++ b/clang/unittests/Serialization/PreambleInNamedModulesTest.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/FrontendActions.h" diff --git a/clang/unittests/Serialization/VarDeclConstantInitTest.cpp b/clang/unittests/Serialization/VarDeclConstantInitTest.cpp index 743f851fc5fe1..2be01def49809 100644 --- a/clang/unittests/Serialization/VarDeclConstantInitTest.cpp +++ b/clang/unittests/Serialization/VarDeclConstantInitTest.cpp @@ -9,6 +9,7 @@ #include "clang/ASTMatchers/ASTMatchFinder.h" #include "clang/ASTMatchers/ASTMatchers.h" #include "clang/Basic/FileManager.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/FrontendActions.h" diff --git a/clang/unittests/Tooling/SourceCodeTest.cpp b/clang/unittests/Tooling/SourceCodeTest.cpp index 549b77752f1c2..a998954a6e9ea 100644 --- a/clang/unittests/Tooling/SourceCodeTest.cpp +++ b/clang/unittests/Tooling/SourceCodeTest.cpp @@ -510,10 +510,14 @@ TEST(SourceCodeTest, EditInvolvingExpansionIgnoringExpansionShouldFail) { #define M1(x) x(1) #define M2(x, y) x ## y #define M3(x) foobar(x) +#define M4(x, y) x y +#define M5(x) x int foobar(int); int a = M1(foobar); int b = M2(foo, bar(2)); int c = M3(3); +int d = M4(foobar, (4)); +int e = M5(foobar) (5); )cpp"); CallsVisitor Visitor; diff --git a/clang/unittests/Tooling/Syntax/TokensTest.cpp b/clang/unittests/Tooling/Syntax/TokensTest.cpp index 47184cbf5d768..468ca5ddd2c75 100644 --- a/clang/unittests/Tooling/Syntax/TokensTest.cpp +++ b/clang/unittests/Tooling/Syntax/TokensTest.cpp @@ -20,6 +20,7 @@ #include "clang/Basic/SourceManager.h" #include "clang/Basic/TokenKinds.def" #include "clang/Basic/TokenKinds.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/FrontendAction.h" #include "clang/Frontend/Utils.h" diff --git a/clang/unittests/Tooling/Syntax/TreeTestBase.cpp b/clang/unittests/Tooling/Syntax/TreeTestBase.cpp index b2be64fc08f3d..dad75854240ef 100644 --- a/clang/unittests/Tooling/Syntax/TreeTestBase.cpp +++ b/clang/unittests/Tooling/Syntax/TreeTestBase.cpp @@ -13,6 +13,7 @@ #include "TreeTestBase.h" #include "clang/AST/ASTConsumer.h" #include "clang/Basic/LLVM.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/FrontendAction.h" diff --git a/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake b/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake index ca45d7bd2af7f..c10367715396e 100644 --- a/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake +++ b/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake @@ -102,7 +102,7 @@ endif() set(ALL_CFI_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS64} ${HEXAGON} ${LOONGARCH64}) set(ALL_SCUDO_STANDALONE_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} - ${MIPS32} ${MIPS64} ${PPC64} ${HEXAGON} ${LOONGARCH64} ${RISCV64}) + ${MIPS32} ${MIPS64} ${PPC64} ${HEXAGON} ${LOONGARCH64} ${RISCV64} ${S390X}) if(APPLE) set(ALL_XRAY_SUPPORTED_ARCH ${X86_64} ${ARM64}) else() diff --git a/compiler-rt/lib/builtins/CMakeLists.txt b/compiler-rt/lib/builtins/CMakeLists.txt index aa2a2519afc02..7e8621855eb84 100644 --- a/compiler-rt/lib/builtins/CMakeLists.txt +++ b/compiler-rt/lib/builtins/CMakeLists.txt @@ -1011,9 +1011,10 @@ else () list(APPEND BUILTIN_CFLAGS_${arch} -fomit-frame-pointer -DCOMPILER_RT_ARMHF_TARGET) endif() - # For RISCV32, we must force enable int128 for compiling long + # For RISCV32 and 32-bit SPARC, we must force enable int128 for compiling long # double routines. - if(COMPILER_RT_ENABLE_SOFTWARE_INT128 OR "${arch}" STREQUAL "riscv32") + if (COMPILER_RT_ENABLE_SOFTWARE_INT128 OR ("${arch}" MATCHES "riscv32|sparc$" + AND NOT CMAKE_COMPILER_IS_GNUCC)) list(APPEND BUILTIN_CFLAGS_${arch} -fforce-enable-int128) endif() diff --git a/compiler-rt/lib/builtins/cpu_model/x86.c b/compiler-rt/lib/builtins/cpu_model/x86.c index b4b60986022d4..8b352cfe568d0 100644 --- a/compiler-rt/lib/builtins/cpu_model/x86.c +++ b/compiler-rt/lib/builtins/cpu_model/x86.c @@ -135,13 +135,9 @@ enum ProcessorFeatures { FEATURE_AVX512BW, FEATURE_AVX512DQ, FEATURE_AVX512CD, - FEATURE_AVX512ER, - FEATURE_AVX512PF, - FEATURE_AVX512VBMI, + FEATURE_AVX512VBMI = 26, FEATURE_AVX512IFMA, - FEATURE_AVX5124VNNIW, - FEATURE_AVX5124FMAPS, - FEATURE_AVX512VPOPCNTDQ, + FEATURE_AVX512VPOPCNTDQ = 30, FEATURE_AVX512VBMI2, FEATURE_GFNI, FEATURE_VPCLMULQDQ, @@ -181,8 +177,7 @@ enum ProcessorFeatures { // FEATURE_OSXSAVE, FEATURE_PCONFIG = 63, FEATURE_PKU, - FEATURE_PREFETCHWT1, - FEATURE_PRFCHW, + FEATURE_PRFCHW = 66, FEATURE_PTWRITE, FEATURE_RDPID, FEATURE_RDRND, @@ -231,7 +226,11 @@ enum ProcessorFeatures { FEATURE_USERMSR, FEATURE_AVX10_1 = 114, FEATURE_AVX10_2 = 116, + FEATURE_AMX_AVX512, + FEATURE_AMX_TF32, + FEATURE_AMX_FP8 = 120, FEATURE_MOVRS, + FEATURE_AMX_MOVRS, CPU_FEATURE_MAX }; @@ -959,12 +958,10 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf, setFeature(FEATURE_ADX); if (HasLeaf7 && ((EBX >> 21) & 1) && HasAVX512Save) setFeature(FEATURE_AVX512IFMA); + if (HasLeaf7 && ((EBX >> 23) & 1)) + setFeature(FEATURE_CLFLUSHOPT); if (HasLeaf7 && ((EBX >> 24) & 1)) setFeature(FEATURE_CLWB); - if (HasLeaf7 && ((EBX >> 26) & 1) && HasAVX512Save) - setFeature(FEATURE_AVX512PF); - if (HasLeaf7 && ((EBX >> 27) & 1) && HasAVX512Save) - setFeature(FEATURE_AVX512ER); if (HasLeaf7 && ((EBX >> 28) & 1) && HasAVX512Save) setFeature(FEATURE_AVX512CD); if (HasLeaf7 && ((EBX >> 29) & 1)) @@ -974,8 +971,6 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf, if (HasLeaf7 && ((EBX >> 31) & 1) && HasAVX512Save) setFeature(FEATURE_AVX512VL); - if (HasLeaf7 && ((ECX >> 0) & 1)) - setFeature(FEATURE_PREFETCHWT1); if (HasLeaf7 && ((ECX >> 1) & 1) && HasAVX512Save) setFeature(FEATURE_AVX512VBMI); if (HasLeaf7 && ((ECX >> 4) & 1)) @@ -1011,10 +1006,6 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf, if (HasLeaf7 && ((ECX >> 29) & 1)) setFeature(FEATURE_ENQCMD); - if (HasLeaf7 && ((EDX >> 2) & 1) && HasAVX512Save) - setFeature(FEATURE_AVX5124VNNIW); - if (HasLeaf7 && ((EDX >> 3) & 1) && HasAVX512Save) - setFeature(FEATURE_AVX5124FMAPS); if (HasLeaf7 && ((EDX >> 5) & 1)) setFeature(FEATURE_UINTR); if (HasLeaf7 && ((EDX >> 8) & 1) && HasAVX512Save) @@ -1088,6 +1079,17 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf, if (HasLeafD && ((EAX >> 3) & 1) && HasAVXSave) setFeature(FEATURE_XSAVES); + bool HasLeaf1E = MaxLevel >= 0x1e && + !getX86CpuIDAndInfoEx(0x1e, 0x1, &EAX, &EBX, &ECX, &EDX); + if (HasLeaf1E && (EAX & 0x10)) + setFeature(FEATURE_AMX_FP8); + if (HasLeaf1E && (EAX & 0x40)) + setFeature(FEATURE_AMX_TF32); + if (HasLeaf1E && (EAX & 0x80)) + setFeature(FEATURE_AMX_AVX512); + if (HasLeaf1E && (EAX & 0x100)) + setFeature(FEATURE_AMX_MOVRS); + bool HasLeaf24 = MaxLevel >= 0x24 && !getX86CpuIDAndInfo(0x24, &EAX, &EBX, &ECX, &EDX); if (HasLeaf7Subleaf1 && ((EDX >> 19) & 1) && HasLeaf24) { diff --git a/compiler-rt/lib/scudo/standalone/tsd_shared.h b/compiler-rt/lib/scudo/standalone/tsd_shared.h index 8b570a770b503..404e984e1f5e9 100644 --- a/compiler-rt/lib/scudo/standalone/tsd_shared.h +++ b/compiler-rt/lib/scudo/standalone/tsd_shared.h @@ -93,6 +93,7 @@ struct TSDRegistrySharedT { void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); + MutexTSDs.lock(); for (u32 I = 0; I < TSDsArraySize; I++) TSDs[I].lock(); } @@ -100,6 +101,7 @@ struct TSDRegistrySharedT { void enable() NO_THREAD_SAFETY_ANALYSIS { for (s32 I = static_cast(TSDsArraySize - 1); I >= 0; I--) TSDs[I].unlock(); + MutexTSDs.unlock(); Mutex.unlock(); } diff --git a/compiler-rt/lib/tysan/CMakeLists.txt b/compiler-rt/lib/tysan/CMakeLists.txt index 7d13ae3963919..e7a6e71b86a0c 100644 --- a/compiler-rt/lib/tysan/CMakeLists.txt +++ b/compiler-rt/lib/tysan/CMakeLists.txt @@ -46,8 +46,10 @@ if(APPLE) OBJECT_LIBS RTTysan_dynamic RTInterception RTSanitizerCommon + RTSanitizerCommonCoverage RTSanitizerCommonLibc RTSanitizerCommonSymbolizer + RTUbsan CFLAGS ${TYSAN_DYNAMIC_CFLAGS} LINK_FLAGS ${WEAK_SYMBOL_LINK_FLAGS} DEFS ${TYSAN_DYNAMIC_DEFINITIONS} @@ -71,8 +73,10 @@ else() SOURCES ${TYSAN_SOURCES} OBJECT_LIBS RTInterception RTSanitizerCommon + RTSanitizerCommonCoverage RTSanitizerCommonLibc RTSanitizerCommonSymbolizer + RTUbsan CFLAGS ${TYSAN_CFLAGS} PARENT_TARGET tysan) endforeach() diff --git a/compiler-rt/test/builtins/CMakeLists.txt b/compiler-rt/test/builtins/CMakeLists.txt index 8e3cb35183ba7..36135c7905900 100644 --- a/compiler-rt/test/builtins/CMakeLists.txt +++ b/compiler-rt/test/builtins/CMakeLists.txt @@ -48,7 +48,8 @@ foreach(arch ${BUILTIN_TEST_ARCH}) string(REPLACE ";" " " BUILTINS_TEST_TARGET_CFLAGS "${BUILTINS_TEST_TARGET_CFLAGS}") endif() - if (COMPILER_RT_ENABLE_SOFTWARE_INT128 OR ${arch} STREQUAL "riscv32") + if (COMPILER_RT_ENABLE_SOFTWARE_INT128 OR ("${arch}" MATCHES "riscv32|sparc$" + AND NOT CMAKE_COMPILER_IS_GNUCC)) list(APPEND BUILTINS_TEST_TARGET_CFLAGS -fforce-enable-int128) string(REPLACE ";" " " BUILTINS_TEST_TARGET_CFLAGS "${BUILTINS_TEST_TARGET_CFLAGS}") endif() diff --git a/compiler-rt/test/sanitizer_common/TestCases/printf-ldbl.c b/compiler-rt/test/sanitizer_common/TestCases/printf-ldbl.c index cfe8d800d3834..f6629ab81c3b3 100644 --- a/compiler-rt/test/sanitizer_common/TestCases/printf-ldbl.c +++ b/compiler-rt/test/sanitizer_common/TestCases/printf-ldbl.c @@ -1,8 +1,5 @@ // RUN: %clang %s -o %t && %run %t 2>&1 -// Issue #41838 -// XFAIL: sparc-target-arch && target={{.*solaris.*}} - #include #include #include diff --git a/compiler-rt/test/sanitizer_common/TestCases/scanf-ldbl.c b/compiler-rt/test/sanitizer_common/TestCases/scanf-ldbl.c index a38f34a245fae..9ca30f4a65688 100644 --- a/compiler-rt/test/sanitizer_common/TestCases/scanf-ldbl.c +++ b/compiler-rt/test/sanitizer_common/TestCases/scanf-ldbl.c @@ -1,8 +1,5 @@ // RUN: %clang %s -o %t && %run %t 2>&1 -// Issue #41838 -// XFAIL: sparc-target-arch && target={{.*solaris.*}} - #include #include #include diff --git a/compiler-rt/test/ubsan/CMakeLists.txt b/compiler-rt/test/ubsan/CMakeLists.txt index 410585e6a07ef..f0b84f431472a 100644 --- a/compiler-rt/test/ubsan/CMakeLists.txt +++ b/compiler-rt/test/ubsan/CMakeLists.txt @@ -62,6 +62,9 @@ foreach(arch ${UBSAN_TEST_ARCH}) if(COMPILER_RT_HAS_TSAN AND ";${TSAN_SUPPORTED_ARCH};" MATCHES ";${arch};" AND NOT ANDROID) add_ubsan_testsuites("ThreadSanitizer" tsan ${arch}) endif() + if(COMPILER_RT_HAS_TYSAN AND ";${TYSAN_SUPPORTED_ARCH};" MATCHES ";${arch};") + add_ubsan_testsuites("TypeSanitizer" tysan ${arch}) + endif() endforeach() macro(add_ubsan_device_testsuite test_mode sanitizer platform arch) @@ -124,6 +127,10 @@ if(APPLE) if(COMPILER_RT_HAS_TSAN AND ";${TSAN_SUPPORTED_ARCH};" MATCHES ";${arch};") add_ubsan_device_testsuite("ThreadSanitizer" tsan ${platform} ${arch}) endif() + + if(COMPILER_RT_HAS_TYSAN AND ";${TYSAN_SUPPORTED_ARCH};" MATCHES ";${arch};") + add_ubsan_device_testsuite("TypeSanitizer" tysan ${platform} ${arch}) + endif() endforeach() endforeach() endif() diff --git a/compiler-rt/test/ubsan/TestCases/Float/cast-overflow.cpp b/compiler-rt/test/ubsan/TestCases/Float/cast-overflow.cpp index 8638bf69f749e..80063b7a0f9f9 100644 --- a/compiler-rt/test/ubsan/TestCases/Float/cast-overflow.cpp +++ b/compiler-rt/test/ubsan/TestCases/Float/cast-overflow.cpp @@ -9,9 +9,6 @@ // RUN: %run %t 6 2>&1 | FileCheck %s --check-prefix=CHECK-6 // RUN: %run %t 7 2>&1 | FileCheck %s --check-prefix=CHECK-7 -// Issue #41838 -// XFAIL: sparc-target-arch && target={{.*solaris.*}} - // This test assumes float and double are IEEE-754 single- and double-precision. #if defined(__APPLE__) diff --git a/compiler-rt/test/ubsan/TestCases/Misc/Posix/print_stack_trace.cpp b/compiler-rt/test/ubsan/TestCases/Misc/Posix/print_stack_trace.cpp index 2eac710d98085..c5c8eb7853458 100644 --- a/compiler-rt/test/ubsan/TestCases/Misc/Posix/print_stack_trace.cpp +++ b/compiler-rt/test/ubsan/TestCases/Misc/Posix/print_stack_trace.cpp @@ -4,6 +4,9 @@ // This test is temporarily disabled due to broken unwinding on ARM. // UNSUPPORTED: target={{.*-linux-.*}} +// Temporarily unsupporting on TySan until interfaces are implemented +// UNSUPPORTED: ubsan-tysan + // The test doesn't pass on Darwin in UBSan-TSan configuration, because TSan is // using the slow unwinder which is not supported on Darwin. The test should // be universal after landing of https://reviews.llvm.org/D32806. diff --git a/compiler-rt/test/ubsan/TestCases/Misc/Posix/sigaction.cpp b/compiler-rt/test/ubsan/TestCases/Misc/Posix/sigaction.cpp index 0ab65bd30d92c..0b848ec8ac471 100644 --- a/compiler-rt/test/ubsan/TestCases/Misc/Posix/sigaction.cpp +++ b/compiler-rt/test/ubsan/TestCases/Misc/Posix/sigaction.cpp @@ -7,6 +7,9 @@ // Reason unknown, needs debugging. // UNSUPPORTED: target=aarch64{{.*}} && ubsan-tsan +// TySan doesn't build a shared library +// UNSUPPORTED: ubsan-tysan + #include #include #include diff --git a/compiler-rt/test/ubsan/TestCases/Misc/coverage-levels.cpp b/compiler-rt/test/ubsan/TestCases/Misc/coverage-levels.cpp index c6133178262cc..fd019d2242552 100644 --- a/compiler-rt/test/ubsan/TestCases/Misc/coverage-levels.cpp +++ b/compiler-rt/test/ubsan/TestCases/Misc/coverage-levels.cpp @@ -19,8 +19,9 @@ // RUN: %clangxx -fsanitize=shift -O1 -fsanitize-coverage=edge,trace-pc-guard %s -o %t // RUN: %env_ubsan_opts=coverage=1:verbosity=1:coverage_dir='"%t-dir"' %run %t 2>&1 | FileCheck %s --check-prefix=CHECK3 --check-prefix=CHECK_WARN -// Coverage is not yet implemented in TSan. +// Coverage is not yet implemented in TSan or TySan. // XFAIL: ubsan-tsan +// XFAIL: ubsan-tysan // UNSUPPORTED: ubsan-standalone-static // No coverage support // UNSUPPORTED: target={{.*openbsd.*}} diff --git a/compiler-rt/test/ubsan/TestCases/Misc/log-path_test.cpp b/compiler-rt/test/ubsan/TestCases/Misc/log-path_test.cpp index 4773884cb4cc0..3fd02957a6903 100644 --- a/compiler-rt/test/ubsan/TestCases/Misc/log-path_test.cpp +++ b/compiler-rt/test/ubsan/TestCases/Misc/log-path_test.cpp @@ -24,9 +24,6 @@ // FIXME: log_path is not supported on Windows yet. // XFAIL: target={{.*windows-msvc.*}} -// Issue #41838 -// XFAIL: sparc-target-arch && target={{.*solaris.*}} - #include #include int main(int argc, char *argv[]) { diff --git a/compiler-rt/test/ubsan/lit.common.cfg.py b/compiler-rt/test/ubsan/lit.common.cfg.py index 25e527903788e..314d207f94ad5 100644 --- a/compiler-rt/test/ubsan/lit.common.cfg.py +++ b/compiler-rt/test/ubsan/lit.common.cfg.py @@ -39,6 +39,9 @@ def get_required_attr(config, attr_name): elif ubsan_lit_test_mode == "ThreadSanitizer": config.available_features.add("ubsan-tsan") clang_ubsan_cflags = ["-fsanitize=thread"] +elif ubsan_lit_test_mode == "TypeSanitizer": + config.available_features.add("ubsan-tysan") + clang_ubsan_cflags = ["-fsanitize=type"] else: lit_config.fatal("Unknown UBSan test mode: %r" % ubsan_lit_test_mode) diff --git a/compiler-rt/test/ubsan_minimal/TestCases/override-callback.c b/compiler-rt/test/ubsan_minimal/TestCases/override-callback.c index aaed134b3ae81..8c04a0091cb11 100644 --- a/compiler-rt/test/ubsan_minimal/TestCases/override-callback.c +++ b/compiler-rt/test/ubsan_minimal/TestCases/override-callback.c @@ -1,6 +1,7 @@ -// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change %s -o %t && %run %t 2>&1 | FileCheck %s -// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change -fno-sanitize-recover=all %s -o %t && not --crash %run %t 2>&1 | FileCheck %s -// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change -fno-sanitize-recover=all -DOVERRIDE=1 %s -o %t && not --crash %run %t 2>&1 | FileCheck %s --check-prefixes=FATAL +// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change %s -o %t && %run %t 2>&1 | FileCheck %s +// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change -fsanitize-handler-preserve-all-regs -DPRESERVE %s -o %t && %run %t 2>&1 | FileCheck %s --check-prefixes=PRESERVE +// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change -fno-sanitize-recover=all %s -o %t && not --crash %run %t 2>&1 | FileCheck %s +// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change -fno-sanitize-recover=all -DOVERRIDE=1 %s -o %t && not --crash %run %t 2>&1 | FileCheck %s --check-prefixes=FATAL #include #include @@ -9,8 +10,21 @@ static int Result; void __ubsan_report_error(const char *kind, uintptr_t caller) { +// -fsanitize-handler-preserve-all-regs is ignored on other architectures. +// Prented we called to other handler on those. +#if defined(PRESERVE) && !defined(__aarch64__) && !defined(__x86_64__) + fprintf(stderr, "CUSTOM_CALLBACK_PRESERVE: %s\n", kind); +#else fprintf(stderr, "CUSTOM_CALLBACK: %s\n", kind); +#endif +} + +#if defined(__aarch64__) || defined(__x86_64__) +[[clang::preserve_all]] void __ubsan_report_error_preserve(const char *kind, + uintptr_t caller) { + fprintf(stderr, "CUSTOM_CALLBACK_PRESERVE: %s\n", kind); } +#endif #if OVERRIDE void __ubsan_report_error_fatal(const char *kind, uintptr_t caller) { @@ -21,5 +35,6 @@ void __ubsan_report_error_fatal(const char *kind, uintptr_t caller) { int main(int argc, const char **argv) { int32_t t0 = (~((uint32_t)0)); // CHECK: CUSTOM_CALLBACK: implicit-conversion + // PRESERVE: CUSTOM_CALLBACK_PRESERVE: implicit-conversion // FATAL: FATAL_CALLBACK: implicit-conversion } diff --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DAP.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DAP.py index 792e0be629fc4..68ca50a5e81db 100644 --- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DAP.py +++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DAP.py @@ -763,20 +763,27 @@ def launch(self, cmdline): launch_request = self._get_launch_params(cmdline) - # For some reason, we *must* submit in the order launch->configurationDone, and then we will receive responses - # in the order configurationDone->launch. - self._flush_breakpoints() + # Per DAP protocol, the correct sequence is: + # 1. Send launch request + # 2. Wait for launch response and "initialized" event + # 3. Set breakpoints + # 4. Send configurationDone to start the process launch_req_id = self.send_message(self.make_request("launch", launch_request)) - config_done_req_id = self.send_message(self.make_request("configurationDone")) - config_done_response = self._await_response(config_done_req_id) - assert config_done_response["success"], "Should simply receive an affirmative?" launch_response = self._await_response(launch_req_id) if not launch_response["success"]: raise DebuggerException( f"failure launching debugger: \"{launch_response['body']['error']['format']}\"" ) - # We can't interact meaningfully with the process until we have the thread ID and confirmation that the process - # has finished launching. + + # Set breakpoints after receiving launch response but before configurationDone. + self._flush_breakpoints() + + # Send configurationDone to allow the process to start running. + config_done_req_id = self.send_message(self.make_request("configurationDone")) + config_done_response = self._await_response(config_done_req_id) + assert config_done_response["success"] + + # Wait for the process to launch and obtain a thread ID. while self._debugger_state.thread is None or not self._debugger_state.launched: time.sleep(0.001) diff --git a/flang-rt/cmake/modules/HandleLibs.cmake b/flang-rt/cmake/modules/HandleLibs.cmake index a193045fc0bfa..9987d6f668978 100644 --- a/flang-rt/cmake/modules/HandleLibs.cmake +++ b/flang-rt/cmake/modules/HandleLibs.cmake @@ -45,8 +45,6 @@ elseif (FLANG_RT_LIBCXX_PROVIDER STREQUAL "llvm") endif () if (FLANG_RT_HAS_STDLIB_FLAG) - target_compile_options(flang-rt-libc-headers INTERFACE - $<$:$> - ) + target_compile_options(flang-rt-libc-headers INTERFACE $<$:-stdlib=libc++>) endif () endif () diff --git a/flang-rt/lib/cuda/allocator.cpp b/flang-rt/lib/cuda/allocator.cpp index 5436051002265..dc3ce0ee1b590 100644 --- a/flang-rt/lib/cuda/allocator.cpp +++ b/flang-rt/lib/cuda/allocator.cpp @@ -19,8 +19,6 @@ #include "flang/Runtime/CUDA/common.h" #include "flang/Support/Fortran.h" -#include "cuda_runtime.h" - namespace Fortran::runtime::cuda { struct DeviceAllocation { @@ -133,6 +131,15 @@ void RTDEF(CUFRegisterAllocator)() { allocatorRegistry.Register( kUnifiedAllocatorPos, {&CUFAllocUnified, CUFFreeUnified}); } + +cudaStream_t RTDECL(CUFGetAssociatedStream)(void *p) { + int pos = findAllocation(p); + if (pos >= 0) { + cudaStream_t stream = deviceAllocations[pos].stream; + return stream; + } + return nullptr; +} } void *CUFAllocPinned( diff --git a/flang-rt/lib/runtime/extensions.cpp b/flang-rt/lib/runtime/extensions.cpp index 19e75143705ab..d3a618c1a39ec 100644 --- a/flang-rt/lib/runtime/extensions.cpp +++ b/flang-rt/lib/runtime/extensions.cpp @@ -163,6 +163,17 @@ void FORTRAN_PROCEDURE_NAME(flush)(const int &unit) { Cookie cookie{IONAME(BeginFlush)(unit, __FILE__, __LINE__)}; IONAME(EndIoStatement)(cookie); } + +void RTNAME(Flush)(int unit) { + // We set the `unit == -1` on the `flush()` case, so flush all units. + if (unit < 0) { + Terminator terminator{__FILE__, __LINE__}; + IoErrorHandler handler{terminator}; + ExternalFileUnit::FlushAll(handler); + return; + } + FORTRAN_PROCEDURE_NAME(flush)(unit); +} } // namespace io // CALL FDATE(DATE) diff --git a/flang-rt/unittests/Runtime/CUDA/Allocatable.cpp b/flang-rt/unittests/Runtime/CUDA/Allocatable.cpp index 9935ae0eaac2f..f061c082cc614 100644 --- a/flang-rt/unittests/Runtime/CUDA/Allocatable.cpp +++ b/flang-rt/unittests/Runtime/CUDA/Allocatable.cpp @@ -121,3 +121,54 @@ TEST(AllocatableCUFTest, StreamDeviceAllocatable) { cudaDeviceSynchronize(); EXPECT_EQ(cudaSuccess, cudaGetLastError()); } + +TEST(AllocatableAsyncTest, StreamDeviceAllocatable) { + using Fortran::common::TypeCategory; + RTNAME(CUFRegisterAllocator)(); + // REAL(4), DEVICE, ALLOCATABLE :: a(:) + auto a{createAllocatable(TypeCategory::Real, 4)}; + a->SetAllocIdx(kDeviceAllocatorPos); + EXPECT_EQ((int)kDeviceAllocatorPos, a->GetAllocIdx()); + EXPECT_FALSE(a->HasAddendum()); + RTNAME(AllocatableSetBounds)(*a, 0, 1, 10); + + cudaStream_t stream; + cudaStreamCreate(&stream); + EXPECT_EQ(cudaSuccess, cudaGetLastError()); + + RTNAME(AllocatableAllocate) + (*a, /*asyncObject=*/(int64_t *)&stream, /*hasStat=*/false, + /*errMsg=*/nullptr, __FILE__, __LINE__); + EXPECT_TRUE(a->IsAllocated()); + cudaDeviceSynchronize(); + EXPECT_EQ(cudaSuccess, cudaGetLastError()); + cudaStream_t s = RTDECL(CUFGetAssociatedStream)(a->raw().base_addr); + EXPECT_EQ(s, stream); + RTNAME(AllocatableDeallocate) + (*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__); + EXPECT_FALSE(a->IsAllocated()); + cudaDeviceSynchronize(); + + cudaStream_t defaultStream = 0; + RTNAME(AllocatableAllocate) + (*a, /*asyncObject=*/(int64_t *)&defaultStream, /*hasStat=*/false, + /*errMsg=*/nullptr, __FILE__, __LINE__); + EXPECT_TRUE(a->IsAllocated()); + cudaDeviceSynchronize(); + EXPECT_EQ(cudaSuccess, cudaGetLastError()); + cudaStream_t d = RTDECL(CUFGetAssociatedStream)(a->raw().base_addr); + EXPECT_EQ(d, defaultStream); + RTNAME(AllocatableDeallocate) + (*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__); + EXPECT_FALSE(a->IsAllocated()); + cudaDeviceSynchronize(); + + RTNAME(AllocatableAllocate) + (*a, /*asyncObject=*/nullptr, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, + __LINE__); + EXPECT_TRUE(a->IsAllocated()); + cudaDeviceSynchronize(); + EXPECT_EQ(cudaSuccess, cudaGetLastError()); + cudaStream_t empty = RTDECL(CUFGetAssociatedStream)(a->raw().base_addr); + EXPECT_EQ(empty, nullptr); +} diff --git a/flang/docs/Intrinsics.md b/flang/docs/Intrinsics.md index bfda5f3253a68..48e732d0b35a1 100644 --- a/flang/docs/Intrinsics.md +++ b/flang/docs/Intrinsics.md @@ -1288,6 +1288,40 @@ program chdir_func end program chdir_func ``` +### Non-Standard Intrinsics: FLUSH + +#### Description +`FLUSH(UNIT)` causes all pending I/O operations for the file connected to the +specified unit to be completed. If `UNIT` is omitted, all units are flushed. + +#### Arguments + +| | | +|------------|---------------------------------------------------------------------------------------------------| +| `UNIT` | (Optional) The unit number of an open file. If omitted, all open units are flushed. The type shall be `INTEGER`. | + +#### Usage and Info + +- **Standard:** GNU extension +- **Class:** Subroutine +- **Syntax:** `CALL FLUSH([UNIT])` + +#### Example +```Fortran +program demo_flush + integer :: unit + + ! Flush all units + call flush() + + ! Flush specific unit + open(unit=10, file='output.dat') + write(10, *) 'Data' + call flush(10) + close(10) +end program demo_flush +``` + ### Non-Standard Intrinsics: FSEEK and FTELL #### Description diff --git a/flang/docs/OpenMPSupport.md b/flang/docs/OpenMPSupport.md index 81f5f9f6dee5b..6ef0f2a581771 100644 --- a/flang/docs/OpenMPSupport.md +++ b/flang/docs/OpenMPSupport.md @@ -40,12 +40,12 @@ Note : No distinction is made between the support in Parser/Semantics, MLIR, Low | target data construct | P | device clause not supported | | target construct | P | device clause not supported | | target update construct | P | device clause not supported | -| declare target directive | P | | +| declare target directive | Y | | | teams construct | Y | | -| distribute construct | P | dist_schedule clause not supported | -| distribute simd construct | P | dist_schedule and linear clauses are not supported | -| distribute parallel loop construct | P | dist_schedule clause not supported | -| distribute parallel loop simd construct | P | dist_schedule and linear clauses are not supported | +| distribute construct | Y | | +| distribute simd construct | P | linear clauses are not supported | +| distribute parallel loop construct | Y | | +| distribute parallel loop simd construct | P | linear clauses are not supported | | depend clause | Y | | | declare reduction construct | N | | | atomic construct extensions | Y | | @@ -53,13 +53,13 @@ Note : No distinction is made between the support in Parser/Semantics, MLIR, Low | cancellation point construct | Y | | | parallel do simd construct | P | linear clause not supported | | target teams construct | P | device clause not supported | -| teams distribute construct | P | dist_schedule clause not supported | -| teams distribute simd construct | P | dist_schedule and linear clauses are not supported | -| target teams distribute construct | P | device and dist_schedule clauses are not supported | -| teams distribute parallel loop construct | P | dist_schedule clause not supported | -| target teams distribute parallel loop construct | P | device and dist_schedule clauses are not supported | -| teams distribute parallel loop simd construct | P | dist_schedule and linear clauses are not supported | -| target teams distribute parallel loop simd construct | P | device, dist_schedule and linear clauses are not supported | +| teams distribute construct | Y | | +| teams distribute simd construct | P | linear clause is not supported | +| target teams distribute construct | P | device clause is not supported | +| teams distribute parallel loop construct | Y | | +| target teams distribute parallel loop construct | P | device clause is not supported | +| teams distribute parallel loop simd construct | P | linear clause is not supported | +| target teams distribute parallel loop simd construct | P | device and linear clauses are not supported | ## Extensions ### ATOMIC construct diff --git a/flang/include/flang/Common/enum-set.h b/flang/include/flang/Common/enum-set.h index e048c66a393d0..ce1129474f8e7 100644 --- a/flang/include/flang/Common/enum-set.h +++ b/flang/include/flang/Common/enum-set.h @@ -217,6 +217,16 @@ template class EnumSet { private: bitsetType bitset_{}; }; + +namespace detail { +template struct IsEnumSetTest { + static constexpr bool value{false}; +}; +template struct IsEnumSetTest> { + static constexpr bool value{true}; +}; +} // namespace detail +template constexpr bool IsEnumSet{detail::IsEnumSetTest::value}; } // namespace Fortran::common template diff --git a/flang/include/flang/Optimizer/Builder/CUDAIntrinsicCall.h b/flang/include/flang/Optimizer/Builder/CUDAIntrinsicCall.h index 977bc0f4ee58c..e9b6e5cf23933 100644 --- a/flang/include/flang/Optimizer/Builder/CUDAIntrinsicCall.h +++ b/flang/include/flang/Optimizer/Builder/CUDAIntrinsicCall.h @@ -29,6 +29,8 @@ struct CUDAIntrinsicLibrary : IntrinsicLibrary { template fir::ExtendedValue genAtomicAddVector(mlir::Type, llvm::ArrayRef); + fir::ExtendedValue genAtomicAddVector4x4(mlir::Type, + llvm::ArrayRef); mlir::Value genAtomicAnd(mlir::Type, llvm::ArrayRef); fir::ExtendedValue genAtomicCas(mlir::Type, llvm::ArrayRef); diff --git a/flang/include/flang/Optimizer/Builder/IntrinsicCall.h b/flang/include/flang/Optimizer/Builder/IntrinsicCall.h index ce0b26c868701..005a9786e43b9 100644 --- a/flang/include/flang/Optimizer/Builder/IntrinsicCall.h +++ b/flang/include/flang/Optimizer/Builder/IntrinsicCall.h @@ -254,6 +254,7 @@ struct IntrinsicLibrary { template mlir::Value genExtremum(mlir::Type, llvm::ArrayRef); mlir::Value genFloor(mlir::Type, llvm::ArrayRef); + void genFlush(llvm::ArrayRef); mlir::Value genFraction(mlir::Type resultType, mlir::ArrayRef args); void genFree(mlir::ArrayRef args); diff --git a/flang/include/flang/Optimizer/Builder/Runtime/Intrinsics.h b/flang/include/flang/Optimizer/Builder/Runtime/Intrinsics.h index 7a97172cfbb9a..5121ccce921c6 100644 --- a/flang/include/flang/Optimizer/Builder/Runtime/Intrinsics.h +++ b/flang/include/flang/Optimizer/Builder/Runtime/Intrinsics.h @@ -51,6 +51,8 @@ mlir::Value genDsecnds(fir::FirOpBuilder &builder, mlir::Location loc, void genEtime(fir::FirOpBuilder &builder, mlir::Location loc, mlir::Value values, mlir::Value time); +void genFlush(fir::FirOpBuilder &builder, mlir::Location loc, mlir::Value unit); + void genFree(fir::FirOpBuilder &builder, mlir::Location loc, mlir::Value ptr); mlir::Value genFseek(fir::FirOpBuilder &builder, mlir::Location loc, diff --git a/flang/include/flang/Optimizer/Dialect/FIROps.td b/flang/include/flang/Optimizer/Dialect/FIROps.td index d416d6c61f178..5d16b9816e318 100644 --- a/flang/include/flang/Optimizer/Dialect/FIROps.td +++ b/flang/include/flang/Optimizer/Dialect/FIROps.td @@ -3753,7 +3753,7 @@ def fir_DeclareReductionOp : fir_Op<"declare_reduction", [IsolatedFromAbove, duplication at the moment. TODO Combine both ops into one. See: https://discourse.llvm.org/t/dialect-for-data-locality-sharing-specifiers-clauses-in-openmp-openacc-and-do-concurrent/86108. - Declares a `do concurrent` reduction. This requires two mandatory and three + Declares a `do concurrent` reduction. This requires two mandatory and four optional regions. 1. The optional alloc region specifies how to allocate the thread-local @@ -3782,6 +3782,9 @@ def fir_DeclareReductionOp : fir_Op<"declare_reduction", [IsolatedFromAbove, allocated by the initializer region. The region has an argument that contains the value of the thread-local reduction accumulator. This will be executed after the reduction has completed. + 6. The DataPtrPtr region specifies how to access the base address of a + boxed-value. This is used, in particular, for GPU reductions in order + know where partial reduction results are stored in remote lanes. Note that the MLIR type system does not allow for type-polymorphic reductions. Separate reduction declarations should be created for different @@ -3789,23 +3792,30 @@ def fir_DeclareReductionOp : fir_Op<"declare_reduction", [IsolatedFromAbove, For initializer and reduction regions, the operand to `fir.yield` must match the parent operation's results. + + * `$byref_element_type`: For by-ref reductions, we want to keep track of the + boxed/allocated type. For example, for a `real, allocatable` variable, + `real` should be stored in this attribute. }]; let arguments = (ins SymbolNameAttr:$sym_name, - TypeAttr:$type); + TypeAttr:$type, + OptionalAttr:$byref_element_type); let regions = (region MaxSizedRegion<1>:$allocRegion, AnyRegion:$initializerRegion, AnyRegion:$reductionRegion, AnyRegion:$atomicReductionRegion, - AnyRegion:$cleanupRegion); + AnyRegion:$cleanupRegion, + AnyRegion:$dataPtrPtrRegion); let assemblyFormat = "$sym_name `:` $type attr-dict-with-keyword " "( `alloc` $allocRegion^ )? " "`init` $initializerRegion " "`combiner` $reductionRegion " "( `atomic` $atomicReductionRegion^ )? " - "( `cleanup` $cleanupRegion^ )? "; + "( `cleanup` $cleanupRegion^ )? " + "( `data_ptr_ptr` $dataPtrPtrRegion^ )? "; let extraClassDeclaration = [{ mlir::BlockArgument getAllocMoldArg() { diff --git a/flang/include/flang/Optimizer/OpenACC/Support/FIROpenACCOpsInterfaces.h b/flang/include/flang/Optimizer/OpenACC/Support/FIROpenACCOpsInterfaces.h index 0020e1ab21a56..d7f8f87ccb8bf 100644 --- a/flang/include/flang/Optimizer/OpenACC/Support/FIROpenACCOpsInterfaces.h +++ b/flang/include/flang/Optimizer/OpenACC/Support/FIROpenACCOpsInterfaces.h @@ -65,6 +65,7 @@ struct GlobalVariableModel : public mlir::acc::GlobalVariableOpInterface::ExternalModel< GlobalVariableModel, fir::GlobalOp> { bool isConstant(mlir::Operation *op) const; + mlir::Region *getInitRegion(mlir::Operation *op) const; }; template diff --git a/flang/include/flang/Optimizer/Transforms/Passes.h b/flang/include/flang/Optimizer/Transforms/Passes.h index f83a1559fa016..4dcdddaac8ee5 100644 --- a/flang/include/flang/Optimizer/Transforms/Passes.h +++ b/flang/include/flang/Optimizer/Transforms/Passes.h @@ -40,7 +40,6 @@ std::unique_ptr createArrayValueCopyPass(fir::ArrayValueCopyOptions options = {}); std::unique_ptr createMemDataFlowOptPass(); std::unique_ptr createPromoteToAffinePass(); -std::unique_ptr createFIRToSCFPass(); std::unique_ptr createAddDebugInfoPass(fir::AddDebugInfoOptions options = {}); diff --git a/flang/include/flang/Optimizer/Transforms/Passes.td b/flang/include/flang/Optimizer/Transforms/Passes.td index 0f613584c6e17..f5403ab6ff503 100644 --- a/flang/include/flang/Optimizer/Transforms/Passes.td +++ b/flang/include/flang/Optimizer/Transforms/Passes.td @@ -81,7 +81,6 @@ def FIRToSCFPass : Pass<"fir-to-scf"> { let description = [{ Convert FIR structured control flow ops to SCF dialect. }]; - let constructor = "::fir::createFIRToSCFPass()"; let dependentDialects = [ "fir::FIROpsDialect", "mlir::scf::SCFDialect" ]; diff --git a/flang/include/flang/Parser/dump-parse-tree.h b/flang/include/flang/Parser/dump-parse-tree.h index 32fcd4182bed7..f460e61fbb915 100644 --- a/flang/include/flang/Parser/dump-parse-tree.h +++ b/flang/include/flang/Parser/dump-parse-tree.h @@ -14,9 +14,11 @@ #include "parse-tree.h" #include "tools.h" #include "unparse.h" +#include "flang/Common/enum-set.h" #include "flang/Common/idioms.h" #include "flang/Common/indirection.h" #include "flang/Support/Fortran.h" +#include "llvm/ADT/StringExtras.h" #include "llvm/Frontend/OpenMP/OMP.h" #include "llvm/Support/raw_ostream.h" #include @@ -35,6 +37,19 @@ class ParseTreeDumper { : out_(out), asFortran_{asFortran} {} static constexpr const char *GetNodeName(const char *) { return "char *"; } + + template + static std::string GetMemberNames(const common::EnumSet &x) { + llvm::ListSeparator sep; + std::string s; + llvm::raw_string_ostream stream(s); + x.IterateOverMembers([&](E e) { stream << sep << T::EnumToString(e); }); + return stream.str(); + } +#define NODE_ENUMSET(T, S) \ + static std::string GetNodeName(const T::S &x) { \ + return #S " = {"s + GetMemberNames(x) + "}"s; \ + } #define NODE_NAME(T, N) \ static constexpr const char *GetNodeName(const T &) { return N; } #define NODE_ENUM(T, E) \ @@ -572,7 +587,8 @@ class ParseTreeDumper { NODE_ENUM(OmpDeviceTypeClause, DeviceTypeDescription) NODE(parser, OmpDirectiveName) NODE(parser, OmpDirectiveSpecification) - NODE_ENUM(OmpDirectiveSpecification, Flags) + NODE_ENUM(OmpDirectiveSpecification, Flag) + NODE_ENUMSET(OmpDirectiveSpecification, Flags) NODE(parser, OmpDoacross) NODE(OmpDoacross, Sink) NODE(OmpDoacross, Source) diff --git a/flang/include/flang/Parser/parse-tree-visitor.h b/flang/include/flang/Parser/parse-tree-visitor.h index af1d34ae804f3..7ebce671c5fd1 100644 --- a/flang/include/flang/Parser/parse-tree-visitor.h +++ b/flang/include/flang/Parser/parse-tree-visitor.h @@ -10,6 +10,7 @@ #define FORTRAN_PARSER_PARSE_TREE_VISITOR_H_ #include "parse-tree.h" +#include "flang/Common/enum-set.h" #include "flang/Common/visit.h" #include #include @@ -41,7 +42,7 @@ struct ParseTreeVisitorLookupScope { // Default case for visitation of non-class data members, strings, and // any other non-decomposable values. template - static std::enable_if_t || + static std::enable_if_t || common::IsEnumSet || std::is_same_v || std::is_same_v> Walk(const A &x, V &visitor) { if (visitor.Pre(x)) { diff --git a/flang/include/flang/Parser/parse-tree.h b/flang/include/flang/Parser/parse-tree.h index 003d11721908e..dd928e1244a2f 100644 --- a/flang/include/flang/Parser/parse-tree.h +++ b/flang/include/flang/Parser/parse-tree.h @@ -22,6 +22,7 @@ #include "format-specification.h" #include "message.h" #include "provenance.h" +#include "flang/Common/enum-set.h" #include "flang/Common/idioms.h" #include "flang/Common/indirection.h" #include "flang/Common/reference.h" @@ -4975,7 +4976,9 @@ struct OmpClauseList { // --- Directives and constructs struct OmpDirectiveSpecification { - ENUM_CLASS(Flags, None, DeprecatedSyntax); + ENUM_CLASS(Flag, DeprecatedSyntax) + using Flags = common::EnumSet; + TUPLE_CLASS_BOILERPLATE(OmpDirectiveSpecification); const OmpDirectiveName &DirName() const { return std::get(t); diff --git a/flang/include/flang/Runtime/CUDA/allocator.h b/flang/include/flang/Runtime/CUDA/allocator.h index 59fdb22b6e663..56176360296a9 100644 --- a/flang/include/flang/Runtime/CUDA/allocator.h +++ b/flang/include/flang/Runtime/CUDA/allocator.h @@ -13,11 +13,14 @@ #include "flang/Runtime/descriptor-consts.h" #include "flang/Runtime/entry-names.h" +#include "cuda_runtime.h" + namespace Fortran::runtime::cuda { extern "C" { void RTDECL(CUFRegisterAllocator)(); +cudaStream_t RTDECL(CUFGetAssociatedStream)(void *); } void *CUFAllocPinned(std::size_t, std::int64_t *); diff --git a/flang/include/flang/Runtime/extensions.h b/flang/include/flang/Runtime/extensions.h index 9fd3e118a0f22..8db68eb9c245c 100644 --- a/flang/include/flang/Runtime/extensions.h +++ b/flang/include/flang/Runtime/extensions.h @@ -34,6 +34,7 @@ double RTNAME(Dsecnds)(double *refTime, const char *sourceFile, int line); // CALL FLUSH(n) antedates the Fortran 2003 FLUSH statement. void FORTRAN_PROCEDURE_NAME(flush)(const int &unit); +void RTNAME(Flush)(int unit); // GNU extension subroutine FDATE void FORTRAN_PROCEDURE_NAME(fdate)(char *string, std::int64_t length); diff --git a/flang/lib/Evaluate/intrinsics.cpp b/flang/lib/Evaluate/intrinsics.cpp index 8f4204b1f9afe..2ba28a7ea752e 100644 --- a/flang/lib/Evaluate/intrinsics.cpp +++ b/flang/lib/Evaluate/intrinsics.cpp @@ -1597,6 +1597,10 @@ static const IntrinsicInterface intrinsicSubroutine[]{ {"exit", {{"status", DefaultInt, Rank::scalar, Optionality::optional}}, {}, Rank::elemental, IntrinsicClass::impureSubroutine}, {"free", {{"ptr", Addressable}}, {}}, + {"flush", + {{"unit", AnyInt, Rank::scalar, Optionality::optional, + common::Intent::In}}, + {}, Rank::elemental, IntrinsicClass::impureSubroutine}, {"fseek", {{"unit", AnyInt, Rank::scalar}, {"offset", AnyInt, Rank::scalar}, {"whence", AnyInt, Rank::scalar}, diff --git a/flang/lib/Frontend/CMakeLists.txt b/flang/lib/Frontend/CMakeLists.txt index bb0b4a39cec9b..fb74b3dcb280e 100644 --- a/flang/lib/Frontend/CMakeLists.txt +++ b/flang/lib/Frontend/CMakeLists.txt @@ -75,7 +75,6 @@ add_flang_library(flangFrontend CLANG_LIBS clangBasic - clangDriver clangOptions ) diff --git a/flang/lib/Frontend/CompilerInvocation.cpp b/flang/lib/Frontend/CompilerInvocation.cpp index 0c32f3914e04b..b6c4e6303cdac 100644 --- a/flang/lib/Frontend/CompilerInvocation.cpp +++ b/flang/lib/Frontend/CompilerInvocation.cpp @@ -325,10 +325,9 @@ static void parseCodeGenArgs(Fortran::frontend::CodeGenOptions &opts, for (auto *a : args.filtered(clang::options::OPT_fpass_plugin_EQ)) opts.LLVMPassPlugins.push_back(a->getValue()); - opts.Reciprocals = clang::driver::tools::parseMRecipOption(diags, args); + opts.Reciprocals = clang::parseMRecipOption(diags, args); - opts.PreferVectorWidth = - clang::driver::tools::parseMPreferVectorWidthOption(diags, args); + opts.PreferVectorWidth = clang::parseMPreferVectorWidthOption(diags, args); // -fembed-offload-object option for (auto *a : args.filtered(clang::options::OPT_fembed_offload_object_EQ)) diff --git a/flang/lib/Lower/OpenMP/ClauseProcessor.h b/flang/lib/Lower/OpenMP/ClauseProcessor.h index 529b871330052..54ec9c5f0d752 100644 --- a/flang/lib/Lower/OpenMP/ClauseProcessor.h +++ b/flang/lib/Lower/OpenMP/ClauseProcessor.h @@ -20,7 +20,6 @@ #include "flang/Lower/OpenMP/Clauses.h" #include "flang/Lower/Support/ReductionProcessor.h" #include "flang/Optimizer/Builder/Todo.h" -#include "flang/Parser/dump-parse-tree.h" #include "flang/Parser/parse-tree.h" #include "mlir/Dialect/OpenMP/OpenMPDialect.h" diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp index 6ca8636bb6459..0a200388a36e5 100644 --- a/flang/lib/Lower/OpenMP/OpenMP.cpp +++ b/flang/lib/Lower/OpenMP/OpenMP.cpp @@ -20,11 +20,13 @@ #include "flang/Common/idioms.h" #include "flang/Evaluate/type.h" #include "flang/Lower/Bridge.h" +#include "flang/Lower/ConvertCall.h" #include "flang/Lower/ConvertExpr.h" #include "flang/Lower/ConvertExprToHLFIR.h" #include "flang/Lower/ConvertVariable.h" #include "flang/Lower/DirectivesCommon.h" #include "flang/Lower/OpenMP/Clauses.h" +#include "flang/Lower/PFTBuilder.h" #include "flang/Lower/StatementContext.h" #include "flang/Lower/Support/ReductionProcessor.h" #include "flang/Lower/SymbolMap.h" @@ -568,14 +570,9 @@ getCollapsedLoopEval(lower::pft::Evaluation &eval, int collapseValue) { if (collapseValue == 0) return &eval; - lower::pft::Evaluation *curEval = &eval.getFirstNestedEvaluation(); - for (int i = 1; i < collapseValue; i++) { - // The nested evaluations should be DoConstructs (i.e. they should form - // a loop nest). Each DoConstruct is a tuple . - assert(curEval->isA()); - curEval = &*std::next(curEval->getNestedEvaluations().begin()); - } + lower::pft::Evaluation *curEval = &eval; + for (int i = 0; i < collapseValue; i++) + curEval = getNestedDoConstruct(*curEval); return curEval; } @@ -3586,19 +3583,32 @@ processReductionCombiner(lower::AbstractConverter &converter, const parser::OmpStylizedInstance::Instance &instance = std::get(combinerInstance.t); - const auto *as = std::get_if(&instance.u); - if (!as) { - TODO(converter.getCurrentLocation(), - "A combiner that is a subroutine call is not yet supported"); + std::optional evalExprOpt; + if (const auto *as = std::get_if(&instance.u)) { + auto &expr = std::get(as->t); + evalExprOpt = makeExpr(expr, semaCtx); + } else if (const auto *call = std::get_if(&instance.u)) { + if (call->typedCall) { + const auto &procRef = *call->typedCall; + evalExprOpt = semantics::SomeExpr{procRef}; + } else { + TODO(converter.getCurrentLocation(), + "CallStmt without typedCall is not yet supported"); + } + } else { + TODO(converter.getCurrentLocation(), "Unsupported combiner instance type"); } - auto &expr = std::get(as->t); - genCombinerCB = [&](fir::FirOpBuilder &builder, mlir::Location loc, - mlir::Type type, mlir::Value lhs, mlir::Value rhs, - bool isByRef) { - const auto &evalExpr = makeExpr(expr, semaCtx); + + assert(evalExprOpt.has_value() && "evalExpr must be initialized"); + semantics::SomeExpr evalExpr = *evalExprOpt; + + genCombinerCB = [&, evalExpr](fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Type type, mlir::Value lhs, + mlir::Value rhs, bool isByRef) { lower::SymMapScope scope(symTable); const std::list &declList = std::get>(combinerInstance.t); + mlir::Value ompOutVar; for (const parser::OmpStylizedDeclaration &decl : declList) { auto &name = std::get(decl.var.t); mlir::Value addr = lhs; @@ -3621,15 +3631,32 @@ processReductionCombiner(lower::AbstractConverter &converter, auto declareOp = hlfir::DeclareOp::create(builder, loc, addr, name.ToString(), nullptr, {}, nullptr, nullptr, 0, attributes); + if (name.ToString() == "omp_out") + ompOutVar = declareOp.getResult(0); symTable.addVariableDefinition(*name.symbol, declareOp); } lower::StatementContext stmtCtx; - mlir::Value result = fir::getBase( - convertExprToValue(loc, converter, evalExpr, symTable, stmtCtx)); - if (auto refType = llvm::dyn_cast(result.getType())) - if (lhs.getType() == refType.getElementType()) - result = fir::LoadOp::create(builder, loc, result); + mlir::Value result = common::visit( + common::visitors{ + [&](const evaluate::ProcedureRef &procRef) -> mlir::Value { + convertCallToHLFIR(loc, converter, procRef, std::nullopt, + symTable, stmtCtx); + auto outVal = fir::LoadOp::create(builder, loc, ompOutVar); + return outVal; + }, + [&](const auto &expr) -> mlir::Value { + mlir::Value exprResult = fir::getBase(convertExprToValue( + loc, converter, evalExpr, symTable, stmtCtx)); + // Optional load may be generated if we get a reference to the + // reduction type. + if (auto refType = + llvm::dyn_cast(exprResult.getType())) + if (lhs.getType() == refType.getElementType()) + exprResult = fir::LoadOp::create(builder, loc, exprResult); + return exprResult; + }}, + evalExpr.u); stmtCtx.finalizeAndPop(); if (isByRef) { fir::StoreOp::create(builder, loc, result, lhs); diff --git a/flang/lib/Lower/OpenMP/Utils.cpp b/flang/lib/Lower/OpenMP/Utils.cpp index fed84eb4df071..ccac64335c29a 100644 --- a/flang/lib/Lower/OpenMP/Utils.cpp +++ b/flang/lib/Lower/OpenMP/Utils.cpp @@ -796,7 +796,7 @@ static void processTileSizesFromOpenMPConstruct( } } -static pft::Evaluation *getNestedDoConstruct(pft::Evaluation &eval) { +pft::Evaluation *getNestedDoConstruct(pft::Evaluation &eval) { for (pft::Evaluation &nested : eval.getNestedEvaluations()) { // In an OpenMPConstruct there can be compiler directives: // 1 <> diff --git a/flang/lib/Lower/OpenMP/Utils.h b/flang/lib/Lower/OpenMP/Utils.h index 2960b663b08b2..8a68ff8bd3bdc 100644 --- a/flang/lib/Lower/OpenMP/Utils.h +++ b/flang/lib/Lower/OpenMP/Utils.h @@ -167,6 +167,8 @@ void genObjectList(const ObjectList &objects, void lastprivateModifierNotSupported(const omp::clause::Lastprivate &lastp, mlir::Location loc); +pft::Evaluation *getNestedDoConstruct(pft::Evaluation &eval); + int64_t collectLoopRelatedInfo( lower::AbstractConverter &converter, mlir::Location currentLocation, lower::pft::Evaluation &eval, const omp::List &clauses, diff --git a/flang/lib/Lower/Support/ReductionProcessor.cpp b/flang/lib/Lower/Support/ReductionProcessor.cpp index 721cb45cd7d24..db8ad909b1d2f 100644 --- a/flang/lib/Lower/Support/ReductionProcessor.cpp +++ b/flang/lib/Lower/Support/ReductionProcessor.cpp @@ -572,10 +572,21 @@ DeclareRedType ReductionProcessor::createDeclareReductionHelper( mlir::OpBuilder modBuilder(module.getBodyRegion()); mlir::Type valTy = fir::unwrapRefType(type); - if (!isByRef) + + // For by-ref reductions, we want to keep track of the + // boxed/referenced/allocated type. For example, for a `real, allocatable` + // variable, `real` should be stored. + mlir::TypeAttr boxedTyAttr{}; + mlir::Type boxedTy; + + if (isByRef) { + boxedTy = fir::unwrapPassByRefType(valTy); + boxedTyAttr = mlir::TypeAttr::get(boxedTy); + } else type = valTy; - decl = DeclareRedType::create(modBuilder, loc, reductionOpName, type); + decl = DeclareRedType::create(modBuilder, loc, reductionOpName, type, + boxedTyAttr); createReductionAllocAndInitRegions(converter, loc, decl, genInitValueCB, type, isByRef); builder.createBlock(&decl.getReductionRegion(), @@ -585,6 +596,38 @@ DeclareRedType ReductionProcessor::createDeclareReductionHelper( mlir::Value op1 = decl.getReductionRegion().front().getArgument(0); mlir::Value op2 = decl.getReductionRegion().front().getArgument(1); genCombinerCB(builder, loc, type, op1, op2, isByRef); + + if (isByRef && fir::isa_box_type(valTy)) { + bool isBoxReductionSupported = [&]() { + auto offloadMod = llvm::dyn_cast( + *builder.getModule()); + + // This check tests the implementation status on the GPU. Box reductions + // are fully supported on the CPU. + if (!offloadMod.getIsGPU()) + return true; + + auto seqTy = mlir::dyn_cast(boxedTy); + + // Dynamically-shaped arrays are not supported yet on the GPU. + return !seqTy || !fir::sequenceWithNonConstantShape(seqTy); + }(); + + if (!isBoxReductionSupported) { + TODO(loc, "Reduction of dynamically-shaped arrays are not supported yet " + "on the GPU."); + } + + mlir::Region &dataPtrPtrRegion = decl.getDataPtrPtrRegion(); + mlir::Block &dataAddrBlock = *builder.createBlock( + &dataPtrPtrRegion, dataPtrPtrRegion.end(), {type}, {loc}); + builder.setInsertionPointToEnd(&dataAddrBlock); + mlir::Value boxRefOperand = dataAddrBlock.getArgument(0); + mlir::Value baseAddrOffset = fir::BoxOffsetOp::create( + builder, loc, boxRefOperand, fir::BoxFieldAttr::base_addr); + genYield(builder, loc, baseAddrOffset); + } + return decl; } diff --git a/flang/lib/Optimizer/Builder/CUDAIntrinsicCall.cpp b/flang/lib/Optimizer/Builder/CUDAIntrinsicCall.cpp index f8c953b38c857..270037f5fcb00 100644 --- a/flang/lib/Optimizer/Builder/CUDAIntrinsicCall.cpp +++ b/flang/lib/Optimizer/Builder/CUDAIntrinsicCall.cpp @@ -195,7 +195,7 @@ static constexpr IntrinsicHandler cudaHandlers[]{ false}, {"atomicadd_r4x4", static_cast( - &CI::genAtomicAddVector<4>), + &CI::genAtomicAddVector4x4), {{{"a", asAddr}, {"v", asAddr}}}, false}, {"atomicaddd", @@ -758,6 +758,56 @@ fir::ExtendedValue CUDAIntrinsicLibrary::genAtomicAddVector( return fir::ArrayBoxValue(res, {ext}); } +// ATOMICADDVECTOR4x4 +fir::ExtendedValue CUDAIntrinsicLibrary::genAtomicAddVector4x4( + mlir::Type resultType, llvm::ArrayRef args) { + assert(args.size() == 2); + mlir::Value a = fir::getBase(args[0]); + if (mlir::isa(a.getType())) + a = fir::BoxAddrOp::create(builder, loc, a); + + const unsigned extent = 4; + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(builder.getContext()); + mlir::Value ptr = builder.createConvert(loc, llvmPtrTy, a); + mlir::Type f32Ty = builder.getF32Type(); + mlir::Type idxTy = builder.getIndexType(); + mlir::Type refTy = fir::ReferenceType::get(f32Ty); + llvm::SmallVector values; + for (unsigned i = 0; i < extent; ++i) { + mlir::Value pos = builder.createIntegerConstant(loc, idxTy, i); + mlir::Value coord = fir::CoordinateOp::create(builder, loc, refTy, + fir::getBase(args[1]), pos); + mlir::Value value = fir::LoadOp::create(builder, loc, coord); + values.push_back(value); + } + + auto inlinePtx = mlir::NVVM::InlinePtxOp::create( + builder, loc, {f32Ty, f32Ty, f32Ty, f32Ty}, + {ptr, values[0], values[1], values[2], values[3]}, {}, + "atom.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};", {}); + + llvm::SmallVector results; + results.push_back(inlinePtx.getResult(0)); + results.push_back(inlinePtx.getResult(1)); + results.push_back(inlinePtx.getResult(2)); + results.push_back(inlinePtx.getResult(3)); + + mlir::Type vecF32Ty = mlir::VectorType::get({extent}, f32Ty); + mlir::Value undef = mlir::LLVM::UndefOp::create(builder, loc, vecF32Ty); + mlir::Type i32Ty = builder.getI32Type(); + for (unsigned i = 0; i < extent; ++i) + undef = mlir::LLVM::InsertElementOp::create( + builder, loc, undef, results[i], + builder.createIntegerConstant(loc, i32Ty, i)); + + auto i128Ty = builder.getIntegerType(128); + auto i128VecTy = mlir::VectorType::get({1}, i128Ty); + mlir::Value vec128 = + mlir::vector::BitCastOp::create(builder, loc, i128VecTy, undef); + return mlir::vector::ExtractOp::create(builder, loc, vec128, + mlir::ArrayRef{0}); +} + mlir::Value CUDAIntrinsicLibrary::genAtomicAnd(mlir::Type resultType, llvm::ArrayRef args) { diff --git a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp index 1714d48980a85..f78afd9a21a4d 100644 --- a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp +++ b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp @@ -91,6 +91,11 @@ static bool isStaticallyAbsent(llvm::ArrayRef args, size_t argIndex) { return args.size() <= argIndex || !args[argIndex]; } +static bool isOptional(mlir::Value value) { + auto varIface = mlir::dyn_cast_or_null( + value.getDefiningOp()); + return varIface && varIface.isOptional(); +} /// Test if an ExtendedValue is present. This is used to test if an intrinsic /// argument is present at compile time. This does not imply that the related @@ -303,6 +308,10 @@ static constexpr IntrinsicHandler handlers[]{ {"back", asValue, handleDynamicOptional}}}, /*isElemental=*/false}, {"floor", &I::genFloor}, + {"flush", + &I::genFlush, + {{{"unit", asAddr}}}, + /*isElemental=*/false}, {"fraction", &I::genFraction}, {"free", &I::genFree}, {"fseek", @@ -3942,6 +3951,40 @@ mlir::Value IntrinsicLibrary::genFloor(mlir::Type resultType, return builder.createConvert(loc, resultType, floor); } +// FLUSH +void IntrinsicLibrary::genFlush(llvm::ArrayRef args) { + assert(args.size() == 1); + + mlir::Value unit; + if (isStaticallyAbsent(args[0])) + // Give a sentinal value of `-1` on the `()` case. + unit = builder.createIntegerConstant(loc, builder.getI32Type(), -1); + else { + unit = fir::getBase(args[0]); + if (isOptional(unit)) { + mlir::Value isPresent = + fir::IsPresentOp::create(builder, loc, builder.getI1Type(), unit); + unit = builder + .genIfOp(loc, builder.getI32Type(), isPresent, + /*withElseRegion=*/true) + .genThen([&]() { + mlir::Value loaded = fir::LoadOp::create(builder, loc, unit); + fir::ResultOp::create(builder, loc, loaded); + }) + .genElse([&]() { + mlir::Value negOne = builder.createIntegerConstant( + loc, builder.getI32Type(), -1); + fir::ResultOp::create(builder, loc, negOne); + }) + .getResults()[0]; + } else { + unit = fir::LoadOp::create(builder, loc, unit); + } + } + + fir::runtime::genFlush(builder, loc, unit); +} + // FRACTION mlir::Value IntrinsicLibrary::genFraction(mlir::Type resultType, llvm::ArrayRef args) { @@ -6298,12 +6341,6 @@ IntrinsicLibrary::genCharacterCompare(mlir::Type resultType, fir::getBase(args[1]), fir::getLen(args[1])); } -static bool isOptional(mlir::Value value) { - auto varIface = mlir::dyn_cast_or_null( - value.getDefiningOp()); - return varIface && varIface.isOptional(); -} - // LOC fir::ExtendedValue IntrinsicLibrary::genLoc(mlir::Type resultType, diff --git a/flang/lib/Optimizer/Builder/Runtime/Intrinsics.cpp b/flang/lib/Optimizer/Builder/Runtime/Intrinsics.cpp index 110b1b20898c7..9fa3b18a255bd 100644 --- a/flang/lib/Optimizer/Builder/Runtime/Intrinsics.cpp +++ b/flang/lib/Optimizer/Builder/Runtime/Intrinsics.cpp @@ -137,6 +137,15 @@ void fir::runtime::genEtime(fir::FirOpBuilder &builder, mlir::Location loc, fir::CallOp::create(builder, loc, runtimeFunc, args); } +void fir::runtime::genFlush(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value unit) { + auto runtimeFunc = fir::runtime::getRuntimeFunc(loc, builder); + llvm::SmallVector args = fir::runtime::createArguments( + builder, loc, runtimeFunc.getFunctionType(), unit); + + fir::CallOp::create(builder, loc, runtimeFunc, args); +} + void fir::runtime::genFree(fir::FirOpBuilder &builder, mlir::Location loc, mlir::Value ptr) { auto runtimeFunc = fir::runtime::getRuntimeFunc(loc, builder); diff --git a/flang/lib/Optimizer/OpenACC/Support/FIROpenACCOpsInterfaces.cpp b/flang/lib/Optimizer/OpenACC/Support/FIROpenACCOpsInterfaces.cpp index 902a2ecdec35f..e4d02e93b041f 100644 --- a/flang/lib/Optimizer/OpenACC/Support/FIROpenACCOpsInterfaces.cpp +++ b/flang/lib/Optimizer/OpenACC/Support/FIROpenACCOpsInterfaces.cpp @@ -71,6 +71,11 @@ bool GlobalVariableModel::isConstant(mlir::Operation *op) const { return globalOp.getConstant().has_value(); } +mlir::Region *GlobalVariableModel::getInitRegion(mlir::Operation *op) const { + auto globalOp = mlir::cast(op); + return globalOp.hasInitializationBody() ? &globalOp.getRegion() : nullptr; +} + // Helper to recursively process address-of operations in derived type // descriptors and collect all needed fir.globals. static void processAddrOfOpInDerivedTypeDescriptor( diff --git a/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp b/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp index 9aad8cddc60a1..1012a9608aa27 100644 --- a/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp +++ b/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp @@ -848,7 +848,8 @@ class DoConcurrentConversion if (!ompReducer) { ompReducer = mlir::omp::DeclareReductionOp::create( rewriter, firReducer.getLoc(), ompReducerName, - firReducer.getTypeAttr().getValue()); + firReducer.getTypeAttr().getValue(), + firReducer.getByrefElementTypeAttr()); cloneFIRRegionToOMP(rewriter, firReducer.getAllocRegion(), ompReducer.getAllocRegion()); diff --git a/flang/lib/Optimizer/Transforms/FIRToSCF.cpp b/flang/lib/Optimizer/Transforms/FIRToSCF.cpp index e72ee333101f5..187caa6043ac8 100644 --- a/flang/lib/Optimizer/Transforms/FIRToSCF.cpp +++ b/flang/lib/Optimizer/Transforms/FIRToSCF.cpp @@ -18,6 +18,8 @@ namespace fir { namespace { class FIRToSCFPass : public fir::impl::FIRToSCFPassBase { + using FIRToSCFPassBase::FIRToSCFPassBase; + public: void runOnOperation() override; }; @@ -230,7 +232,3 @@ void FIRToSCFPass::runOnOperation() { fir::populateFIRToSCFRewrites(patterns, parallelUnordered); walkAndApplyPatterns(getOperation(), std::move(patterns)); } - -std::unique_ptr fir::createFIRToSCFPass() { - return std::make_unique(); -} diff --git a/flang/lib/Parser/openmp-parsers.cpp b/flang/lib/Parser/openmp-parsers.cpp index b033206d90c41..bd259a9c6e01d 100644 --- a/flang/lib/Parser/openmp-parsers.cpp +++ b/flang/lib/Parser/openmp-parsers.cpp @@ -1633,7 +1633,8 @@ TYPE_PARSER( maybe(Parser{}), maybe(parenthesized( OmpArgumentListParser{})), - pure(OmpDirectiveSpecification::Flags::DeprecatedSyntax)))) || + pure(OmpDirectiveSpecification::Flags( + {OmpDirectiveSpecification::Flag::DeprecatedSyntax}))))) || // Parse DECLARE_VARIANT individually, because the "[base:]variant" // argument will conflict with DECLARE_REDUCTION's "ident:types...". predicated(Parser{}, @@ -1643,13 +1644,13 @@ TYPE_PARSER( maybe(parenthesized(OmpArgumentListParser< llvm::omp::Directive::OMPD_declare_variant>{})), maybe(Parser{}), - pure(OmpDirectiveSpecification::Flags::None))) || + pure(OmpDirectiveSpecification::Flags()))) || // Parse the standard syntax: directive [(arguments)] [clauses] sourced(construct( // sourced(OmpDirectiveNameParser{}), maybe(parenthesized(OmpArgumentListParser<>{})), maybe(Parser{}), - pure(OmpDirectiveSpecification::Flags::None)))) + pure(OmpDirectiveSpecification::Flags())))) static bool IsStandaloneOrdered(const OmpDirectiveSpecification &dirSpec) { // An ORDERED construct is standalone if it has DOACROSS or DEPEND clause. diff --git a/flang/lib/Parser/unparse.cpp b/flang/lib/Parser/unparse.cpp index 3854d33d46d48..8e9c7d04bc522 100644 --- a/flang/lib/Parser/unparse.cpp +++ b/flang/lib/Parser/unparse.cpp @@ -2142,7 +2142,7 @@ class UnparseVisitor { Walk(std::get(x.t)); auto flags{std::get(x.t)}; - if (flags == OmpDirectiveSpecification::Flags::DeprecatedSyntax) { + if (flags.test(OmpDirectiveSpecification::Flag::DeprecatedSyntax)) { if (x.DirId() == llvm::omp::Directive::OMPD_flush) { // FLUSH clause arglist unparseClauses(); @@ -2539,8 +2539,8 @@ class UnparseVisitor { void Unparse(const OpenMPInteropConstruct &x) { BeginOpenMP(); Word("!$OMP INTEROP"); - using Flags = OmpDirectiveSpecification::Flags; - if (std::get(x.v.t) == Flags::DeprecatedSyntax) { + auto flags{std::get(x.v.t)}; + if (flags.test(OmpDirectiveSpecification::Flag::DeprecatedSyntax)) { Walk("(", std::get>(x.v.t), ")"); Walk(" ", std::get>(x.v.t)); } else { @@ -2679,8 +2679,8 @@ class UnparseVisitor { void Unparse(const OpenMPFlushConstruct &x) { BeginOpenMP(); Word("!$OMP FLUSH"); - using Flags = OmpDirectiveSpecification::Flags; - if (std::get(x.v.t) == Flags::DeprecatedSyntax) { + auto flags{std::get(x.v.t)}; + if (flags.test(OmpDirectiveSpecification::Flag::DeprecatedSyntax)) { Walk("(", std::get>(x.v.t), ")"); Walk(" ", std::get>(x.v.t)); } else { diff --git a/flang/lib/Semantics/check-omp-structure.cpp b/flang/lib/Semantics/check-omp-structure.cpp index f597eaa4711dc..f7778472f71f1 100644 --- a/flang/lib/Semantics/check-omp-structure.cpp +++ b/flang/lib/Semantics/check-omp-structure.cpp @@ -2748,8 +2748,8 @@ void OmpStructureChecker::Leave(const parser::OpenMPFlushConstruct &x) { unsigned version{context_.langOptions().OpenMPVersion}; if (version >= 52) { - using Flags = parser::OmpDirectiveSpecification::Flags; - if (std::get(x.v.t) == Flags::DeprecatedSyntax) { + auto &flags{std::get(x.v.t)}; + if (flags.test(parser::OmpDirectiveSpecification::Flag::DeprecatedSyntax)) { context_.Say(x.source, "The syntax \"FLUSH clause (object, ...)\" has been deprecated, use \"FLUSH(object, ...) clause\" instead"_warn_en_US); } diff --git a/flang/test/Lower/CUDA/cuda-atomicadd.cuf b/flang/test/Lower/CUDA/cuda-atomicadd.cuf index 6669b4afa291d..573e01242c78f 100644 --- a/flang/test/Lower/CUDA/cuda-atomicadd.cuf +++ b/flang/test/Lower/CUDA/cuda-atomicadd.cuf @@ -32,4 +32,4 @@ attributes(global) subroutine test_atomicadd_r4x4() end subroutine ! CHECK-LABEL: func.func @_QPtest_atomicadd_r4x4() attributes {cuf.proc_attr = #cuf.cuda_proc} -! CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} seq_cst : !llvm.ptr, vector<4xf32> +! CHECK: atom.add.v4.f32 diff --git a/flang/test/Lower/Intrinsics/flush.f90 b/flang/test/Lower/Intrinsics/flush.f90 new file mode 100644 index 0000000000000..2b02179d84c79 --- /dev/null +++ b/flang/test/Lower/Intrinsics/flush.f90 @@ -0,0 +1,41 @@ +! RUN: bbc -emit-hlfir %s -o - | FileCheck %s +! RUN: %flang_fc1 -emit-hlfir %s -o - | FileCheck %s +! +! Test lowering of intrinsic subroutine FLUSH with and without optional UNIT argument. +! +! CHECK-LABEL: func.func @_QPflush_all() +! CHECK: %[[UNIT:.*]] = arith.constant -1 : i32 +! CHECK: fir.call @_FortranAFlush(%[[UNIT]]) fastmath : (i32) -> () +! CHECK: return +subroutine flush_all() + call flush() ! flush all units +end subroutine + +! CHECK-LABEL: func.func @_QPflush_unit() +! CHECK: %[[ALLOCA:.*]] = fir.alloca i32 +! CHECK: %[[UNITC:.*]] = arith.constant 10 : i32 +! CHECK: fir.store %[[UNITC]] to %[[ALLOCA]] : !fir.ref +! CHECK: %[[LOADED:.*]] = fir.load %[[ALLOCA]] : !fir.ref +! CHECK: fir.call @_FortranAFlush(%[[LOADED]]) fastmath : (i32) -> () +! CHECK: return +subroutine flush_unit() + call flush(10) ! flush specific unit +end subroutine + +! CHECK-LABEL: func.func @_QPflush_optional( +! CHECK-SAME: %[[ARG0:.*]]: !fir.ref {fir.bindc_name = "unit", fir.optional}) { +! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{.*}} {fortran_attrs = #fir.var_attrs, uniq_name = "_QFflush_optionalEunit"} : (!fir.ref, !fir.dscope) -> (!fir.ref, !fir.ref) +! CHECK: %[[IS_PRESENT:.*]] = fir.is_present %[[DECL]]#0 : (!fir.ref) -> i1 +! CHECK: %[[UNIT:.*]] = fir.if %[[IS_PRESENT]] -> (i32) { +! CHECK: %[[LOADED:.*]] = fir.load %[[DECL]]#0 : !fir.ref +! CHECK: fir.result %[[LOADED]] : i32 +! CHECK: } else { +! CHECK: %[[DEFAULT:.*]] = arith.constant -1 : i32 +! CHECK: fir.result %[[DEFAULT]] : i32 +! CHECK: } +! CHECK: fir.call @_FortranAFlush(%[[UNIT]]) fastmath : (i32) -> () +! CHECK: return +subroutine flush_optional(unit) + integer, optional :: unit + call flush(unit) ! flush with dynamically optional argument +end subroutine diff --git a/flang/test/Lower/OpenMP/compiler-directives-loop.f90 b/flang/test/Lower/OpenMP/compiler-directives-loop.f90 new file mode 100644 index 0000000000000..916b5a9fbd57f --- /dev/null +++ b/flang/test/Lower/OpenMP/compiler-directives-loop.f90 @@ -0,0 +1,31 @@ +!RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=60 %s -o - | FileCheck %s + +! Check that we generate proper body of the do-construct. + +!CHECK: omp.loop_nest (%[[ARG1:arg[0-9]+]]) : i32 = (%c1_i32) to (%c10_i32) inclusive step (%c1_i32_1) { +!CHECK: %[[V0:[0-9]+]]:2 = hlfir.declare %arg0 {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) +!CHECK: hlfir.assign %[[ARG1]] to %[[V0]]#0 : i32, !fir.ref +!CHECK: %[[V1:[0-9]+]] = fir.load %[[V0]]#0 : !fir.ref +!CHECK: %[[V2:[0-9]+]] = fir.convert %[[V1]] : (i32) -> f32 +!CHECK: %[[V3:[0-9]+]] = fir.load %[[V0]]#0 : !fir.ref +!CHECK: %[[V4:[0-9]+]] = fir.convert %[[V3]] : (i32) -> i64 +!CHECK: %[[V5:[0-9]+]] = hlfir.designate %3#0 (%[[V4]]) : (!fir.ref>, i64) -> !fir.ref +!CHECK: hlfir.assign %[[V2]] to %[[V5]] : f32, !fir.ref +!CHECK: omp.yield +!CHECK: } + +program omp_cdir_codegen + implicit none + integer, parameter :: n = 10 + real :: a(n) + integer :: i + +!$omp parallel do +!dir$ unroll + do i = 1, n + a(i) = real(i) + end do +!$omp end parallel do + + print *, 'a(1)=', a(1), ' a(n)=', a(n) +end program omp_cdir_codegen diff --git a/flang/test/Lower/OpenMP/delayed-privatization-reduction-byref.f90 b/flang/test/Lower/OpenMP/delayed-privatization-reduction-byref.f90 index 4b6a643f94059..4c7b6ac5f5f9b 100644 --- a/flang/test/Lower/OpenMP/delayed-privatization-reduction-byref.f90 +++ b/flang/test/Lower/OpenMP/delayed-privatization-reduction-byref.f90 @@ -22,7 +22,7 @@ subroutine red_and_delayed_private ! CHECK-SAME: @[[PRIVATIZER_SYM:.*]] : i32 ! CHECK-LABEL: omp.declare_reduction -! CHECK-SAME: @[[REDUCTION_SYM:.*]] : !fir.ref alloc +! CHECK-SAME: @[[REDUCTION_SYM:.*]] : !fir.ref attributes {byref_element_type = i32} alloc ! CHECK-LABEL: _QPred_and_delayed_private ! CHECK: omp.parallel diff --git a/flang/test/Lower/OpenMP/omp-declare-reduction-combsub.f90 b/flang/test/Lower/OpenMP/omp-declare-reduction-combsub.f90 new file mode 100644 index 0000000000000..098b3f84aa2f3 --- /dev/null +++ b/flang/test/Lower/OpenMP/omp-declare-reduction-combsub.f90 @@ -0,0 +1,60 @@ +! This test checks lowering of OpenMP declare reduction Directive, with combiner +! via a subroutine call. + +!RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=52 %s -o - | FileCheck %s + +subroutine combine_me(out, in) + integer out, in + out = out + in +end subroutine combine_me + +function func(x, n) + integer func + integer x(n) + integer res + interface + subroutine combine_me(out, in) + integer out, in + end subroutine combine_me + end interface +!CHECK: omp.declare_reduction @red_add : i32 init { +!CHECK: ^bb0(%[[OMP_ORIG_ARG_I:.*]]: i32): +!CHECK: %[[OMP_PRIV:.*]] = fir.alloca i32 +!CHECK: %[[OMP_ORIG:.*]] = fir.alloca i32 +!CHECK: fir.store %[[OMP_ORIG_ARG_I]] to %[[OMP_ORIG]] : !fir.ref +!CHECK: %[[OMP_ORIG_DECL:.*]]:2 = hlfir.declare %[[OMP_ORIG]] {uniq_name = "omp_orig"} : (!fir.ref) -> (!fir.ref, !fir.ref) +!CHECK: fir.store %[[OMP_ORIG_ARG_I]] to %[[OMP_PRIV]] : !fir.ref +!CHECK: %[[OMP_PRIV_DECL:.*]]:2 = hlfir.declare %[[OMP_PRIV]] {uniq_name = "omp_priv"} : (!fir.ref) -> (!fir.ref, !fir.ref) +!CHECK: %[[CONST_0:.*]] = arith.constant 0 : i32 +!CHECK: omp.yield(%[[CONST_0]] : i32) +!CHECK: } combiner { +!CHECK: ^bb0(%[[LHS_ARG:.*]]: i32, %[[RHS_ARG:.*]]: i32): +!CHECK: %[[OMP_OUT:.*]] = fir.alloca i32 +!CHECK: %[[OMP_IN:.*]] = fir.alloca i32 +!CHECK: fir.store %[[RHS_ARG]] to %[[OMP_IN]] : !fir.ref +!CHECK: %[[OMP_IN_DECL:.*]]:2 = hlfir.declare %[[OMP_IN]] {uniq_name = "omp_in"} : (!fir.ref) -> (!fir.ref, !fir.ref) +!CHECK: fir.store %[[LHS_ARG]] to %[[OMP_OUT]] : !fir.ref +!CHECK: %[[OMP_OUT_DECL:.*]]:2 = hlfir.declare %[[OMP_OUT]] {uniq_name = "omp_out"} : (!fir.ref) -> (!fir.ref, !fir.ref) +!CHECK: fir.call @_QPcombine_me(%[[OMP_OUT_DECL]]#0, %[[OMP_IN_DECL]]#0) fastmath : (!fir.ref, !fir.ref) -> () +!CHECK: %[[OMP_OUT_VAL:.*]] = fir.load %[[OMP_OUT_DECL]]#0 : !fir.ref +!CHECK: omp.yield(%[[OMP_OUT_VAL]] : i32) +!CHECK: } +!CHECK: func.func @_QPcombine_me(%[[OUT:.*]]: !fir.ref {fir.bindc_name = "out"}, %[[IN:.*]]: !fir.ref {fir.bindc_name = "in"}) { +!CHECK: %[[SCOPE:.*]] = fir.dummy_scope : !fir.dscope +!CHECK: %[[IN_DECL:.*]]:2 = hlfir.declare %[[IN]] dummy_scope %[[SCOPE]] arg 2 {uniq_name = "_QFcombine_meEin"} : (!fir.ref, !fir.dscope) -> (!fir.ref, !fir.ref) +!CHECK: %[[OUT_DECL:.*]]:2 = hlfir.declare %[[OUT]] dummy_scope %[[SCOPE]] arg 1 {uniq_name = "_QFcombine_meEout"} : (!fir.ref, !fir.dscope) -> (!fir.ref, !fir.ref) +!CHECK: %[[OUT_VAL:.*]] = fir.load %[[OUT_DECL]]#0 : !fir.ref +!CHECK: %[[IN_VAL:.*]] = fir.load %[[IN_DECL]]#0 : !fir.ref +!CHECK: %[[SUM:.*]] = arith.addi %[[OUT_VAL]], %[[IN_VAL]] : i32 +!CHECK: hlfir.assign %[[SUM]] to %[[OUT_DECL]]#0 : i32, !fir.ref +!CHECK: return +!CHECK: } +!$omp declare reduction(red_add:integer(4):combine_me(omp_out,omp_in)) initializer(omp_priv=0) + res=0 +!$omp simd reduction(red_add:res) + do i=1,n + res=res+x(i) + enddo + func=res +end function func + diff --git a/flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f90 b/flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f90 index 41c7d69ebb3ba..f56875dcb518b 100644 --- a/flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f90 +++ b/flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f90 @@ -18,7 +18,7 @@ program reduce end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_heap_Uxi32 : !fir.ref>>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_heap_Uxi32 : !fir.ref>>> attributes {byref_element_type = !fir.array} alloc { ! CHECK: %[[VAL_10:.*]] = fir.alloca !fir.box>> ! CHECK: omp.yield(%[[VAL_10]] : !fir.ref>>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/parallel-reduction-array-lb.f90 b/flang/test/Lower/OpenMP/parallel-reduction-array-lb.f90 index aa91e1e0e8b15..d9ba3bed464f8 100644 --- a/flang/test/Lower/OpenMP/parallel-reduction-array-lb.f90 +++ b/flang/test/Lower/OpenMP/parallel-reduction-array-lb.f90 @@ -12,7 +12,7 @@ program reduce end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_3x2xi32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_3x2xi32 : !fir.ref>> {{.*}} alloc { ! CHECK: %[[VAL_15:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_15]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/parallel-reduction-array.f90 b/flang/test/Lower/OpenMP/parallel-reduction-array.f90 index 59595de338d50..636660f279e85 100644 --- a/flang/test/Lower/OpenMP/parallel-reduction-array.f90 +++ b/flang/test/Lower/OpenMP/parallel-reduction-array.f90 @@ -17,7 +17,7 @@ program reduce print *,i end program -! CPU-LABEL: omp.declare_reduction @add_reduction_byref_box_3xi32 : !fir.ref>> alloc { +! CPU-LABEL: omp.declare_reduction @add_reduction_byref_box_3xi32 : !fir.ref>> attributes {byref_element_type = !fir.array<3xi32>} alloc { ! CPU: %[[VAL_8:.*]] = fir.alloca !fir.box> ! CPU: omp.yield(%[[VAL_8]] : !fir.ref>>) ! CPU-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/parallel-reduction-array2.f90 b/flang/test/Lower/OpenMP/parallel-reduction-array2.f90 index 14338c6f50817..9cf8a63427ed1 100644 --- a/flang/test/Lower/OpenMP/parallel-reduction-array2.f90 +++ b/flang/test/Lower/OpenMP/parallel-reduction-array2.f90 @@ -13,7 +13,7 @@ program reduce print *,i end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_3xi32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_3xi32 : !fir.ref>> {{.*}} alloc { ! CHECK: %[[VAL_8:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_8]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/parallel-reduction-pointer-array.f90 b/flang/test/Lower/OpenMP/parallel-reduction-pointer-array.f90 index 36344458d1cae..3de2ba8f61f8e 100644 --- a/flang/test/Lower/OpenMP/parallel-reduction-pointer-array.f90 +++ b/flang/test/Lower/OpenMP/parallel-reduction-pointer-array.f90 @@ -19,7 +19,7 @@ program reduce end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_ptr_Uxi32 : !fir.ref>>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_ptr_Uxi32 : !fir.ref>>> attributes {byref_element_type = !fir.array} alloc { ! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.box>> ! CHECK: omp.yield(%[[VAL_3]] : !fir.ref>>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/parallel-reduction3.f90 b/flang/test/Lower/OpenMP/parallel-reduction3.f90 index 6ff7f96b2b9bf..7437e1d35a624 100644 --- a/flang/test/Lower/OpenMP/parallel-reduction3.f90 +++ b/flang/test/Lower/OpenMP/parallel-reduction3.f90 @@ -1,7 +1,7 @@ ! RUN: bbc -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s ! RUN: %flang_fc1 -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxi32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxi32 : !fir.ref>> {{.*}} alloc { ! CHECK: %[[VAL_8:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_8]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/reduction-array-intrinsic.f90 b/flang/test/Lower/OpenMP/reduction-array-intrinsic.f90 index bd91fa51a6988..779322712dbfe 100644 --- a/flang/test/Lower/OpenMP/reduction-array-intrinsic.f90 +++ b/flang/test/Lower/OpenMP/reduction-array-intrinsic.f90 @@ -9,7 +9,7 @@ subroutine max_array_reduction(l, r) !$omp end parallel end subroutine -! CHECK-LABEL: omp.declare_reduction @max_byref_box_Uxi32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @max_byref_box_Uxi32 : !fir.ref>> {{.*}} alloc { ! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_3]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/sections-array-reduction.f90 b/flang/test/Lower/OpenMP/sections-array-reduction.f90 index 1d286008a11f3..57e46c7bc8cae 100644 --- a/flang/test/Lower/OpenMP/sections-array-reduction.f90 +++ b/flang/test/Lower/OpenMP/sections-array-reduction.f90 @@ -14,7 +14,7 @@ subroutine sectionsReduction(x) end subroutine -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxf32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxf32 : !fir.ref>> {{.*}} alloc { ! [...] ! CHECK: omp.yield ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/taskgroup-task-array-reduction.f90 b/flang/test/Lower/OpenMP/taskgroup-task-array-reduction.f90 index 18a4f75b86309..3a63bb09c59de 100644 --- a/flang/test/Lower/OpenMP/taskgroup-task-array-reduction.f90 +++ b/flang/test/Lower/OpenMP/taskgroup-task-array-reduction.f90 @@ -1,7 +1,7 @@ ! RUN: bbc -emit-hlfir -fopenmp -fopenmp-version=50 -o - %s 2>&1 | FileCheck %s ! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=50 -o - %s 2>&1 | FileCheck %s -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxf32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxf32 : !fir.ref>> {{.*}} alloc { ! [...] ! CHECK: omp.yield ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 index 2cd953de0dffa..ed81577ecce16 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 @@ -32,7 +32,7 @@ program reduce15 print *,"min: ", mins end program -! CHECK-LABEL: omp.declare_reduction @min_byref_box_heap_Uxi32 : !fir.ref>>> alloc { +! CHECK-LABEL: omp.declare_reduction @min_byref_box_heap_Uxi32 : !fir.ref>>> {{.*}} alloc { ! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.box>> ! CHECK: omp.yield(%[[VAL_3]] : !fir.ref>>>) ! CHECK-LABEL: } init { @@ -93,7 +93,7 @@ program reduce15 ! CHECK: omp.yield ! CHECK: } -! CHECK-LABEL: omp.declare_reduction @max_byref_box_heap_Uxi32 : !fir.ref>>> alloc { +! CHECK-LABEL: omp.declare_reduction @max_byref_box_heap_Uxi32 : !fir.ref>>> {{.*}} alloc { ! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.box>> ! CHECK: omp.yield(%[[VAL_3]] : !fir.ref>>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f90 index 663851cba46c6..d8c0a36db126e 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f90 @@ -18,7 +18,7 @@ program reduce end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_heap_i32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_heap_i32 : !fir.ref>> attributes {byref_element_type = i32} alloc { ! CHECK: %[[VAL_2:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_2]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f90 index 7184b3b102fd8..7ce1be03682b4 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f90 @@ -22,7 +22,7 @@ subroutine reduce(r) end subroutine end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxf64 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxf64 : !fir.ref>> {{.*}} alloc { ! CHECK: %[[VAL_8:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_8]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-array-lb.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-array-lb.f90 index 2233a74600948..ec448cf20f111 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-array-lb.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-array-lb.f90 @@ -11,7 +11,7 @@ program reduce !$omp end parallel do end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_2xi32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_2xi32 : !fir.ref>> {{.*}} alloc { ! CHECK: } combiner { ! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>>, %[[ARG1:.*]]: !fir.ref>>): ! CHECK: %[[ARR0:.*]] = fir.load %[[ARG0]] : !fir.ref>> diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-array-lb2.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-array-lb2.f90 index 211bde19da8db..9da05a290ec21 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-array-lb2.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-array-lb2.f90 @@ -19,7 +19,7 @@ subroutine sub(a, lb, ub) end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxi32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxi32 : !fir.ref>> {{.*}} alloc { ! CHECK: } combiner { ! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>>, %[[ARG1:.*]]: !fir.ref>>): ! CHECK: %[[ARR0:.*]] = fir.load %[[ARG0]] : !fir.ref>> diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-array.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-array.f90 index afaeba27c5eae..14b657c8e180d 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-array.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-array.f90 @@ -14,7 +14,7 @@ program reduce print *,r end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_2xi32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_2xi32 : !fir.ref>> attributes {byref_element_type = !fir.array<2xi32>} alloc { ! CHECK: %[[VAL_8:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_8]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-array2.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-array2.f90 index 25b2e97a1b7f7..d0a0c38e4ccb1 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-array2.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-array2.f90 @@ -14,7 +14,7 @@ program reduce print *,r end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_2xi32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_2xi32 : !fir.ref>> {{.*}} alloc { ! CHECK: %[[VAL_8:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_8]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f90 index edd2bcb1d6be8..60a162d8f8002 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f90 @@ -24,7 +24,7 @@ program main endprogram -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_3x3xf64 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_3x3xf64 : !fir.ref>> {{.*}} alloc { ! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_3]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-pointer.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-pointer.f90 index 27b726376fbeb..f640f5caddf76 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-pointer.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-pointer.f90 @@ -18,7 +18,7 @@ program reduce_pointer deallocate(v) end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_ptr_i32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_ptr_i32 : !fir.ref>> {{.*}} alloc { ! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_3]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/do_concurrent_reduce_allocatable.f90 b/flang/test/Lower/do_concurrent_reduce_allocatable.f90 index 873fd10dd1b97..4fb67c094b594 100644 --- a/flang/test/Lower/do_concurrent_reduce_allocatable.f90 +++ b/flang/test/Lower/do_concurrent_reduce_allocatable.f90 @@ -8,7 +8,7 @@ subroutine do_concurrent_allocatable end do end subroutine -! CHECK: fir.declare_reduction @[[RED_OP:.*]] : ![[RED_TYPE:.*]] alloc { +! CHECK: fir.declare_reduction @[[RED_OP:.*]] : ![[RED_TYPE:.*]] attributes {byref_element_type = !fir.array} alloc { ! CHECK: %[[ALLOC:.*]] = fir.alloca ! CHECK: fir.yield(%[[ALLOC]] : ![[RED_TYPE]]) ! CHECK: } init { diff --git a/flang/test/Parser/OpenMP/allocate-align-tree.f90 b/flang/test/Parser/OpenMP/allocate-align-tree.f90 index d799aa10a82ff..e440d23904693 100644 --- a/flang/test/Parser/OpenMP/allocate-align-tree.f90 +++ b/flang/test/Parser/OpenMP/allocate-align-tree.f90 @@ -28,7 +28,7 @@ end program allocate_align_tree !CHECK-NEXT: | | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'j' !CHECK-NEXT: | | OmpClauseList -> OmpClause -> Align -> OmpAlignClause -> Scalar -> Integer -> Constant -> Expr = '16_4' !CHECK-NEXT: | | | LiteralConstant -> IntLiteralConstant = '16' -!CHECK-NEXT: | | Flags = None +!CHECK-NEXT: | | Flags = {} !CHECK-NEXT: | Block !CHECK-NEXT: | | ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OmpAllocateDirective !CHECK-NEXT: | | | OmpBeginDirective @@ -38,7 +38,7 @@ end program allocate_align_tree !CHECK-NEXT: | | | | | LiteralConstant -> IntLiteralConstant = '32' !CHECK-NEXT: | | | | OmpClause -> Allocator -> Scalar -> Integer -> Expr = '2_8' !CHECK-NEXT: | | | | | Designator -> DataRef -> Name = 'omp_large_cap_mem_alloc' -!CHECK-NEXT: | | | | Flags = None +!CHECK-NEXT: | | | | Flags = {} !CHECK-NEXT: | | | Block !CHECK-NEXT: | | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AllocateStmt diff --git a/flang/test/Parser/OpenMP/allocate-tree-spec-part.f90 b/flang/test/Parser/OpenMP/allocate-tree-spec-part.f90 index 800e4a57d5f0e..92ddbbdce05c5 100644 --- a/flang/test/Parser/OpenMP/allocate-tree-spec-part.f90 +++ b/flang/test/Parser/OpenMP/allocate-tree-spec-part.f90 @@ -23,7 +23,7 @@ end program allocate_tree !CHECK-NEXT: | | | | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'f' !CHECK-NEXT: | | | | OmpClauseList -> OmpClause -> Allocator -> Scalar -> Integer -> Expr = '1_8' !CHECK-NEXT: | | | | | Designator -> DataRef -> Name = 'omp_default_mem_alloc' -!CHECK-NEXT: | | | | Flags = None +!CHECK-NEXT: | | | | Flags = {} !CHECK-NEXT: | | | Block !CHECK-NEXT: | ExecutionPart -> Block !CHECK-NEXT: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AssignmentStmt = 'f=2_4' @@ -37,7 +37,7 @@ end program allocate_tree !CHECK-NEXT: | | | | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'w' !CHECK-NEXT: | | | | OmpClauseList -> OmpClause -> Allocator -> Scalar -> Integer -> Expr = '3_8' !CHECK-NEXT: | | | | | Designator -> DataRef -> Name = 'omp_const_mem_alloc' -!CHECK-NEXT: | | | | Flags = None +!CHECK-NEXT: | | | | Flags = {} !CHECK-NEXT: | | | Block !CHECK-NEXT: | | | | ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OmpAllocateDirective !CHECK-NEXT: | | | | | OmpBeginDirective @@ -45,7 +45,7 @@ end program allocate_tree !CHECK-NEXT: | | | | | | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'xarray' !CHECK-NEXT: | | | | | | OmpClauseList -> OmpClause -> Allocator -> Scalar -> Integer -> Expr = '2_8' !CHECK-NEXT: | | | | | | | Designator -> DataRef -> Name = 'omp_large_cap_mem_alloc' -!CHECK-NEXT: | | | | | | Flags = None +!CHECK-NEXT: | | | | | | Flags = {} !CHECK-NEXT: | | | | | Block !CHECK-NEXT: | | | | | | ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OmpAllocateDirective !CHECK-NEXT: | | | | | | | OmpBeginDirective @@ -53,12 +53,12 @@ end program allocate_tree !CHECK-NEXT: | | | | | | | | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'zarray' !CHECK-NEXT: | | | | | | | | OmpClauseList -> OmpClause -> Allocator -> Scalar -> Integer -> Expr = '1_8' !CHECK-NEXT: | | | | | | | | | Designator -> DataRef -> Name = 'omp_default_mem_alloc' -!CHECK-NEXT: | | | | | | | | Flags = None +!CHECK-NEXT: | | | | | | | | Flags = {} !CHECK-NEXT: | | | | | | | Block !CHECK-NEXT: | | | | | | | | ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OmpAllocateDirective !CHECK-NEXT: | | | | | | | | | OmpBeginDirective !CHECK-NEXT: | | | | | | | | | | OmpDirectiveName -> llvm::omp::Directive = allocate !CHECK-NEXT: | | | | | | | | | | OmpClauseList -> -!CHECK-NEXT: | | | | | | | | | | Flags = None +!CHECK-NEXT: | | | | | | | | | | Flags = {} !CHECK-NEXT: | | | | | | | | | Block !CHECK-NEXT: | | | | | | | | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AllocateStmt diff --git a/flang/test/Parser/OpenMP/allocate-tree.f90 b/flang/test/Parser/OpenMP/allocate-tree.f90 index 021d8104a7e62..17ffb76aeed96 100644 --- a/flang/test/Parser/OpenMP/allocate-tree.f90 +++ b/flang/test/Parser/OpenMP/allocate-tree.f90 @@ -24,7 +24,7 @@ end program allocate_tree !CHECK-NEXT: | | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'w' !CHECK-NEXT: | | OmpClauseList -> OmpClause -> Allocator -> Scalar -> Integer -> Expr = '3_8' !CHECK-NEXT: | | | Designator -> DataRef -> Name = 'omp_const_mem_alloc' -!CHECK-NEXT: | | Flags = None +!CHECK-NEXT: | | Flags = {} !CHECK-NEXT: | Block !CHECK: ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OmpAllocateDirective @@ -33,7 +33,7 @@ end program allocate_tree !CHECK-NEXT: | | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'xarray' !CHECK-NEXT: | | OmpClauseList -> OmpClause -> Allocator -> Scalar -> Integer -> Expr = '2_8' !CHECK-NEXT: | | | Designator -> DataRef -> Name = 'omp_large_cap_mem_alloc' -!CHECK-NEXT: | | Flags = None +!CHECK-NEXT: | | Flags = {} !CHECK-NEXT: | Block !CHECK-NEXT: | | ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OmpAllocateDirective !CHECK-NEXT: | | | OmpBeginDirective @@ -41,13 +41,13 @@ end program allocate_tree !CHECK-NEXT: | | | | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'zarray' !CHECK-NEXT: | | | | OmpClauseList -> OmpClause -> Allocator -> Scalar -> Integer -> Expr = '1_8' !CHECK-NEXT: | | | | | Designator -> DataRef -> Name = 'omp_default_mem_alloc' -!CHECK-NEXT: | | | | Flags = None +!CHECK-NEXT: | | | | Flags = {} !CHECK-NEXT: | | | Block !CHECK-NEXT: | | | | ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OmpAllocateDirective !CHECK-NEXT: | | | | | OmpBeginDirective !CHECK-NEXT: | | | | | | OmpDirectiveName -> llvm::omp::Directive = allocate !CHECK-NEXT: | | | | | | OmpClauseList -> -!CHECK-NEXT: | | | | | | Flags = None +!CHECK-NEXT: | | | | | | Flags = {} !CHECK-NEXT: | | | | | Block !CHECK-NEXT: | | | | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AllocateStmt diff --git a/flang/test/Parser/OpenMP/allocators-unparse.f90 b/flang/test/Parser/OpenMP/allocators-unparse.f90 index 079d6acf114d5..31c7ed59fcc19 100644 --- a/flang/test/Parser/OpenMP/allocators-unparse.f90 +++ b/flang/test/Parser/OpenMP/allocators-unparse.f90 @@ -33,7 +33,7 @@ end subroutine allocate !PARSE-TREE-NEXT: | | OmpClauseList -> OmpClause -> Allocate -> OmpAllocateClause !PARSE-TREE-NEXT: | | | Modifier -> OmpAllocatorSimpleModifier -> Scalar -> Integer -> Expr -> Designator -> DataRef -> Name = 'omp_default_mem_alloc' !PARSE-TREE-NEXT: | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'arr1' -!PARSE-TREE-NEXT: | | Flags = None +!PARSE-TREE-NEXT: | | Flags = {} !PARSE-TREE-NEXT: | Block !PARSE-TREE-NEXT: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AllocateStmt !PARSE-TREE-NEXT: | | | Allocation @@ -49,7 +49,7 @@ end subroutine allocate !PARSE-TREE-NEXT: | | OmpClause -> Allocate -> OmpAllocateClause !PARSE-TREE-NEXT: | | | Modifier -> OmpAllocatorSimpleModifier -> Scalar -> Integer -> Expr -> Designator -> DataRef -> Name = 'omp_default_mem_alloc' !PARSE-TREE-NEXT: | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'arr2' -!PARSE-TREE-NEXT: | | Flags = None +!PARSE-TREE-NEXT: | | Flags = {} !PARSE-TREE-NEXT: | Block !PARSE-TREE-NEXT: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AllocateStmt !PARSE-TREE-NEXT: | | | Allocation @@ -61,7 +61,7 @@ end subroutine allocate !PARSE-TREE-NEXT: | | OmpClauseList -> OmpClause -> Allocate -> OmpAllocateClause !PARSE-TREE-NEXT: | | | Modifier -> OmpAlignModifier -> Scalar -> Integer -> Expr -> LiteralConstant -> IntLiteralConstant = '32' !PARSE-TREE-NEXT: | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'arr2' -!PARSE-TREE-NEXT: | | Flags = None +!PARSE-TREE-NEXT: | | Flags = {} !PARSE-TREE-NEXT: | Block !PARSE-TREE-NEXT: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AllocateStmt !PARSE-TREE-NEXT: | | | Allocation @@ -73,4 +73,4 @@ end subroutine allocate !PARSE-TREE-NEXT: | OmpEndDirective !PARSE-TREE-NEXT: | | OmpDirectiveName -> llvm::omp::Directive = allocators !PARSE-TREE-NEXT: | | OmpClauseList -> -!PARSE-TREE-NEXT: | | Flags = None +!PARSE-TREE-NEXT: | | Flags = {} diff --git a/flang/test/Parser/OpenMP/assumption.f90 b/flang/test/Parser/OpenMP/assumption.f90 index 86cbad9e42f78..fd5cfab6253c2 100644 --- a/flang/test/Parser/OpenMP/assumption.f90 +++ b/flang/test/Parser/OpenMP/assumption.f90 @@ -43,39 +43,39 @@ end subroutine sub1 !PARSE-TREE: | OmpBeginDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = assume !PARSE-TREE: | | OmpClauseList -> OmpClause -> NoOpenmp -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | OmpEndDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = assume !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPAssumeConstruct !PARSE-TREE: | OmpBeginDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = assume !PARSE-TREE: | | OmpClauseList -> OmpClause -> NoParallelism -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | OmpEndDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = assume !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPAssumeConstruct !PARSE-TREE: | OmpBeginDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = assume !PARSE-TREE: | | OmpClauseList -> OmpClause -> NoOpenmpRoutines -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | OmpEndDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = assume !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPAssumeConstruct !PARSE-TREE: | OmpBeginDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = assume !PARSE-TREE: | | OmpClauseList -> OmpClause -> Absent -> OmpAbsentClause -> llvm::omp::Directive = allocate !PARSE-TREE: | | OmpClause -> Contains -> OmpContainsClause -> llvm::omp::Directive = workshare !PARSE-TREE: | | llvm::omp::Directive = task -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> BlockConstruct !PARSE-TREE: | | | BlockStmt -> @@ -89,7 +89,7 @@ end subroutine sub1 !PARSE-TREE: | | OmpClauseList -> OmpClause -> Holds -> OmpHoldsClause -> Expr -> EQ !PARSE-TREE: | | | Expr -> LiteralConstant -> IntLiteralConstant = '1' !PARSE-TREE: | | | Expr -> LiteralConstant -> IntLiteralConstant = '1' -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> BlockConstruct !PARSE-TREE: | | | BlockStmt -> @@ -124,7 +124,7 @@ end subroutine sub2 !PARSE-TREE: | OmpBeginDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = assume !PARSE-TREE: | | OmpClauseList -> OmpClause -> NoOpenmp -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AssignmentStmt !PARSE-TREE: | | | Variable -> Designator -> DataRef -> Name = 'r' @@ -134,7 +134,7 @@ end subroutine sub2 !PARSE-TREE: | OmpEndDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = assume !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} program p !$omp assumes no_openmp @@ -147,5 +147,5 @@ end program p !PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPDeclarativeAssumes -> OmpDirectiveSpecification !PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = assumes !PARSE-TREE: | OmpClauseList -> OmpClause -> NoOpenmp -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !PARSE-TREE: ImplicitPart -> diff --git a/flang/test/Parser/OpenMP/atomic-compare.f90 b/flang/test/Parser/OpenMP/atomic-compare.f90 index 9b9c4f02df9c1..7e80b9c8505e5 100644 --- a/flang/test/Parser/OpenMP/atomic-compare.f90 +++ b/flang/test/Parser/OpenMP/atomic-compare.f90 @@ -20,7 +20,7 @@ subroutine f00(a, b) !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = atomic !PARSE-TREE: | | OmpClauseList -> OmpClause -> Update -> !PARSE-TREE: | | OmpClause -> Compare -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> IfStmt !PARSE-TREE: | | | Scalar -> Logical -> Expr = 'x llvm::omp::Directive = atomic !PARSE-TREE: | | OmpClauseList -> OmpClause -> Update -> !PARSE-TREE: | | OmpClause -> Compare -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> IfConstruct !PARSE-TREE: | | | IfThenStmt @@ -112,7 +112,7 @@ subroutine f02(a, b) !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = atomic !PARSE-TREE: | | OmpClauseList -> OmpClause -> Update -> !PARSE-TREE: | | OmpClause -> Compare -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> IfConstruct !PARSE-TREE: | | | IfThenStmt @@ -150,7 +150,7 @@ subroutine g00(a, b) !PARSE-TREE: | | OmpClauseList -> OmpClause -> Update -> !PARSE-TREE: | | OmpClause -> Capture !PARSE-TREE: | | OmpClause -> Compare -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AssignmentStmt = 'v=x' !PARSE-TREE: | | | Variable = 'v' @@ -172,7 +172,7 @@ subroutine g00(a, b) !PARSE-TREE: | OmpEndDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = atomic !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} subroutine g01(a, b) integer :: a, b @@ -202,7 +202,7 @@ subroutine g01(a, b) !PARSE-TREE: | | OmpClauseList -> OmpClause -> Update -> !PARSE-TREE: | | OmpClause -> Capture !PARSE-TREE: | | OmpClause -> Compare -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AssignmentStmt = 'v=x' !PARSE-TREE: | | | Variable = 'v' @@ -227,7 +227,7 @@ subroutine g01(a, b) !PARSE-TREE: | OmpEndDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = atomic !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} subroutine g02(a, b) integer :: a, b @@ -259,7 +259,7 @@ subroutine g02(a, b) !PARSE-TREE: | | OmpClauseList -> OmpClause -> Update -> !PARSE-TREE: | | OmpClause -> Capture !PARSE-TREE: | | OmpClause -> Compare -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> IfConstruct !PARSE-TREE: | | | IfThenStmt @@ -287,4 +287,4 @@ subroutine g02(a, b) !PARSE-TREE: | OmpEndDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = atomic !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} diff --git a/flang/test/Parser/OpenMP/atomic-end.f90 b/flang/test/Parser/OpenMP/atomic-end.f90 index b971bb6f3d1da..fd1f44426283b 100644 --- a/flang/test/Parser/OpenMP/atomic-end.f90 +++ b/flang/test/Parser/OpenMP/atomic-end.f90 @@ -19,7 +19,7 @@ subroutine f00 !PARSE-TREE: | OmpBeginDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = atomic !PARSE-TREE: | | OmpClauseList -> OmpClause -> Read -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AssignmentStmt = 'v=x' !PARSE-TREE: | | | Variable = 'v' @@ -29,7 +29,7 @@ subroutine f00 !PARSE-TREE: | OmpEndDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = atomic !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} subroutine f01 @@ -50,7 +50,7 @@ subroutine f01 !PARSE-TREE: | OmpBeginDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = atomic !PARSE-TREE: | | OmpClauseList -> OmpClause -> Read -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AssignmentStmt = 'v=x' !PARSE-TREE: | | | Variable = 'v' @@ -60,4 +60,4 @@ subroutine f01 !PARSE-TREE: | OmpEndDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = atomic !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} diff --git a/flang/test/Parser/OpenMP/atomic-label-do.f90 b/flang/test/Parser/OpenMP/atomic-label-do.f90 index 06197587b2d19..f0c83c01f7a21 100644 --- a/flang/test/Parser/OpenMP/atomic-label-do.f90 +++ b/flang/test/Parser/OpenMP/atomic-label-do.f90 @@ -29,7 +29,7 @@ subroutine f !PARSE-TREE: | | | OmpBeginDirective !PARSE-TREE: | | | | OmpDirectiveName -> llvm::omp::Directive = atomic !PARSE-TREE: | | | | OmpClauseList -> OmpClause -> Write -!PARSE-TREE: | | | | Flags = None +!PARSE-TREE: | | | | Flags = {} !PARSE-TREE: | | | Block !PARSE-TREE: | | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AssignmentStmt = 'x=i' !PARSE-TREE: | | | | | Variable = 'x' diff --git a/flang/test/Parser/OpenMP/bind-clause.f90 b/flang/test/Parser/OpenMP/bind-clause.f90 index 6910ffbba204f..af89719c04e6d 100644 --- a/flang/test/Parser/OpenMP/bind-clause.f90 +++ b/flang/test/Parser/OpenMP/bind-clause.f90 @@ -21,6 +21,6 @@ subroutine f00 !PARSE-TREE: | OmpBeginLoopDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = loop !PARSE-TREE: | | OmpClauseList -> OmpClause -> Bind -> OmpBindClause -> Binding = Parallel -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct diff --git a/flang/test/Parser/OpenMP/construct-prefix-conflict.f90 b/flang/test/Parser/OpenMP/construct-prefix-conflict.f90 index 4573a83c8e358..d344f9afc90cc 100644 --- a/flang/test/Parser/OpenMP/construct-prefix-conflict.f90 +++ b/flang/test/Parser/OpenMP/construct-prefix-conflict.f90 @@ -79,7 +79,7 @@ subroutine f01(x) !PARSE-TREE: | | | OmpClauseList -> OmpClause -> Map -> OmpMapClause !PARSE-TREE: | | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | | | bool = 'true' -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AssignmentStmt !PARSE-TREE: | | | Variable -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | | Expr -> Add @@ -118,7 +118,7 @@ subroutine f02(x) !PARSE-TREE: | | | OmpClauseList -> OmpClause -> Map -> OmpMapClause !PARSE-TREE: | | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | | | bool = 'true' -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AssignmentStmt !PARSE-TREE: | | | Variable -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | | Expr -> Add @@ -157,7 +157,7 @@ subroutine f03(x) !PARSE-TREE: | | | OmpClauseList -> OmpClause -> To -> OmpToClause !PARSE-TREE: | | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | | | bool = 'true' -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AssignmentStmt !PARSE-TREE: | | | Variable -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | | Expr -> Add diff --git a/flang/test/Parser/OpenMP/cross-label-do.f90 b/flang/test/Parser/OpenMP/cross-label-do.f90 index 52ac264756dfc..fd648e0248258 100644 --- a/flang/test/Parser/OpenMP/cross-label-do.f90 +++ b/flang/test/Parser/OpenMP/cross-label-do.f90 @@ -32,7 +32,7 @@ subroutine f00 !PARSE-TREE: | | | OmpBeginLoopDirective !PARSE-TREE: | | | | OmpDirectiveName -> llvm::omp::Directive = do !PARSE-TREE: | | | | OmpClauseList -> -!PARSE-TREE: | | | | Flags = None +!PARSE-TREE: | | | | Flags = {} !PARSE-TREE: | | | Block !PARSE-TREE: | | | | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct !PARSE-TREE: | | | | | NonLabelDoStmt diff --git a/flang/test/Parser/OpenMP/declare-reduction-multi.f90 b/flang/test/Parser/OpenMP/declare-reduction-multi.f90 index f8104254aa6b1..7e462e0265800 100644 --- a/flang/test/Parser/OpenMP/declare-reduction-multi.f90 +++ b/flang/test/Parser/OpenMP/declare-reduction-multi.f90 @@ -63,7 +63,7 @@ program omp_examples !PARSE-TREE: | | | | | Name = 'r' !PARSE-TREE: | | | Expr = '0_4' !PARSE-TREE: | | | | LiteralConstant -> IntLiteralConstant = '0' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare reduction(*:tt:omp_out%r = omp_out%r * omp_in%r) initializer(omp_priv%r = 1) !CHECK-NEXT: !$OMP DECLARE REDUCTION(*:tt: omp_out%r = omp_out%r * omp_in%r) INITIALIZER(om& @@ -103,7 +103,7 @@ program omp_examples !PARSE-TREE: | | | | | Name = 'r' !PARSE-TREE: | | | Expr = '1_4' !PARSE-TREE: | | | | LiteralConstant -> IntLiteralConstant = '1' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare reduction(max:tt:omp_out = mymax(omp_out, omp_in)) initializer(omp_priv%r = 0) !CHECK-NEXT: !$OMP DECLARE REDUCTION(max:tt: omp_out = mymax(omp_out, omp_in)) INITIALIZER(& @@ -140,7 +140,7 @@ program omp_examples !PARSE-TREE: | | | | | Name = 'r' !PARSE-TREE: | | | Expr = '0_4' !PARSE-TREE: | | | | LiteralConstant -> IntLiteralConstant = '0' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare reduction(min:tt:omp_out%r = min(omp_out%r, omp_in%r)) initializer(omp_priv%r = 1) !CHECK-NEXT: !$OMP DECLARE REDUCTION(min:tt: omp_out%r = min(omp_out%r, omp_in%r)) INITIALI& @@ -183,7 +183,7 @@ program omp_examples !PARSE-TREE: | | | | | Name = 'r' !PARSE-TREE: | | | Expr = '1_4' !PARSE-TREE: | | | | LiteralConstant -> IntLiteralConstant = '1' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} call random_number(values%r) @@ -197,7 +197,7 @@ program omp_examples !PARSE-TREE: | | OmpClauseList -> OmpClause -> Reduction -> OmpReductionClause !PARSE-TREE: | | | Modifier -> OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Add !PARSE-TREE: | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'sum' -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct @@ -215,7 +215,7 @@ program omp_examples !PARSE-TREE: | | OmpClauseList -> OmpClause -> Reduction -> OmpReductionClause !PARSE-TREE: | | | Modifier -> OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Multiply !PARSE-TREE: | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'prod' -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct @@ -233,7 +233,7 @@ program omp_examples !PARSE-TREE: | | OmpClauseList -> OmpClause -> Reduction -> OmpReductionClause !PARSE-TREE: | | | Modifier -> OmpReductionIdentifier -> ProcedureDesignator -> Name = 'max' !PARSE-TREE: | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'big' -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct @@ -251,7 +251,7 @@ program omp_examples !PARSE-TREE: | | OmpClauseList -> OmpClause -> Reduction -> OmpReductionClause !PARSE-TREE: | | | Modifier -> OmpReductionIdentifier -> ProcedureDesignator -> Name = 'min' !PARSE-TREE: | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'small' -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct diff --git a/flang/test/Parser/OpenMP/declare-reduction-operator.f90 b/flang/test/Parser/OpenMP/declare-reduction-operator.f90 index 0d337c1ef42f3..1099daf9de06f 100644 --- a/flang/test/Parser/OpenMP/declare-reduction-operator.f90 +++ b/flang/test/Parser/OpenMP/declare-reduction-operator.f90 @@ -73,7 +73,7 @@ subroutine reduce_1 ( n, tts ) !PARSE-TREE: | | | | | ComponentSpec !PARSE-TREE: | | | | | | ComponentDataSource -> Expr = '0_4' !PARSE-TREE: | | | | | | | LiteralConstant -> IntLiteralConstant = '0' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare reduction(+ : tt : omp_out = tt(omp_out%x - omp_in%x , omp_out%y - omp_in%y)) initializer(omp_priv = tt(0,0)) @@ -134,7 +134,7 @@ subroutine reduce_1 ( n, tts ) !PARSE-TREE: | | | | | ComponentSpec !PARSE-TREE: | | | | | | ComponentDataSource -> Expr = '0_4' !PARSE-TREE: | | | | | | | LiteralConstant -> IntLiteralConstant = '0' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare reduction(+ :tt2 : omp_out = tt2(omp_out%x - omp_in%x , omp_out%y - omp_in%y)) initializer(omp_priv = tt2(0,0)) type(tt) :: diffp = tt( 0, 0 ) diff --git a/flang/test/Parser/OpenMP/declare-reduction-unparse.f90 b/flang/test/Parser/OpenMP/declare-reduction-unparse.f90 index 31431f5d20c45..6ca7b0fe79198 100644 --- a/flang/test/Parser/OpenMP/declare-reduction-unparse.f90 +++ b/flang/test/Parser/OpenMP/declare-reduction-unparse.f90 @@ -52,7 +52,7 @@ end subroutine initme !PARSE-TREE: | | | | ActualArgSpec !PARSE-TREE: | | | | | ActualArg -> Expr = '0_4' !PARSE-TREE: | | | | | | LiteralConstant -> IntLiteralConstant = '0' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} res=init !$omp simd reduction(red_add:res) @@ -69,7 +69,7 @@ end subroutine initme !PARSE-TREE: | | OmpClauseList -> OmpClause -> Reduction -> OmpReductionClause !PARSE-TREE: | | | Modifier -> OmpReductionIdentifier -> ProcedureDesignator -> Name = 'red_add' !PARSE-TREE: | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'res' -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct @@ -119,4 +119,4 @@ end program main !PARSE-TREE: | | | | Designator -> DataRef -> Name = 'omp_priv' !PARSE-TREE: | | | Expr = '0_4' !PARSE-TREE: | | | | LiteralConstant -> IntLiteralConstant = '0' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} diff --git a/flang/test/Parser/OpenMP/declare-target-indirect-tree.f90 b/flang/test/Parser/OpenMP/declare-target-indirect-tree.f90 index 16dc4eb44e6fd..e2645bae3034d 100644 --- a/flang/test/Parser/OpenMP/declare-target-indirect-tree.f90 +++ b/flang/test/Parser/OpenMP/declare-target-indirect-tree.f90 @@ -20,7 +20,7 @@ function func1() result(i) !CHECK-NEXT: | OmpClause -> Indirect -> OmpIndirectClause -> Scalar -> Logical -> Expr = '.true._4' !CHECK-NEXT: | | LiteralConstant -> LogicalLiteralConstant !CHECK-NEXT: | | | bool = 'true' - !CHECK-NEXT: | Flags = None + !CHECK-NEXT: | Flags = {} character(1) :: i i = 'a' return @@ -33,7 +33,7 @@ function func2() result(i) !CHECK-NEXT: | OmpClauseList -> OmpClause -> Enter -> OmpEnterClause !CHECK-NEXT: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'func2' !CHECK-NEXT: | OmpClause -> Indirect -> OmpIndirectClause -> - !CHECK-NEXT: | Flags = None + !CHECK-NEXT: | Flags = {} character(1) :: i i = 'b' return diff --git a/flang/test/Parser/OpenMP/declare-target-to-clause.f90 b/flang/test/Parser/OpenMP/declare-target-to-clause.f90 index 8198f44bcec18..efcdc44e0f64e 100644 --- a/flang/test/Parser/OpenMP/declare-target-to-clause.f90 +++ b/flang/test/Parser/OpenMP/declare-target-to-clause.f90 @@ -18,4 +18,4 @@ module m !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | OmpObject -> Designator -> DataRef -> Name = 'y' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} diff --git a/flang/test/Parser/OpenMP/declare-variant.f90 b/flang/test/Parser/OpenMP/declare-variant.f90 index f5c34abd84ac7..8d8280d89e7e8 100644 --- a/flang/test/Parser/OpenMP/declare-variant.f90 +++ b/flang/test/Parser/OpenMP/declare-variant.f90 @@ -13,7 +13,7 @@ subroutine sub0 !PARSE-TREE: | | OmpTraitSetSelectorName -> Value = Construct !PARSE-TREE: | | OmpTraitSelector !PARSE-TREE: | | | OmpTraitSelectorName -> llvm::omp::Directive = parallel -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare variant (sub:vsub) match (construct={parallel}) contains @@ -43,7 +43,7 @@ subroutine sub (v1) !PARSE-TREE: | | OmpTraitSetSelectorName -> Value = Construct !PARSE-TREE: | | OmpTraitSelector !PARSE-TREE: | | | OmpTraitSelectorName -> llvm::omp::Directive = dispatch -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare variant(vsub), match(construct={dispatch}) integer, value :: v1 @@ -75,7 +75,7 @@ subroutine sub (v1) !PARSE-TREE: | | | OmpTraitSelectorName -> llvm::omp::Directive = dispatch !PARSE-TREE: | OmpClause -> AppendArgs -> OmpAppendArgsClause -> OmpAppendOp -> OmpInteropType -> Value = Target !PARSE-TREE: | OmpAppendOp -> OmpInteropType -> Value = Target -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare variant(vsub), match(construct={dispatch}), append_args (interop(target), interop(target)) integer, value :: v1 @@ -107,7 +107,7 @@ subroutine sub (v1, v2) !PARSE-TREE: | OmpClause -> AdjustArgs -> OmpAdjustArgsClause !PARSE-TREE: | | OmpAdjustOp -> Value = Need_Device_Ptr !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'v2' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare variant(vsub) match ( construct = { dispatch } ) adjust_args(nothing : v1 ) adjust_args(need_device_ptr : v2) end @@ -143,4 +143,4 @@ subroutine f2 (x, y) !PARSE-TREE: | | | OmpTraitSelectorName -> Value = Simd !PARSE-TREE: | | | Properties !PARSE-TREE: | | | | OmpTraitProperty -> OmpClause -> Uniform -> Name = 'y' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} diff --git a/flang/test/Parser/OpenMP/declare_target-device_type.f90 b/flang/test/Parser/OpenMP/declare_target-device_type.f90 index 7df796288f4d4..a505b9113d819 100644 --- a/flang/test/Parser/OpenMP/declare_target-device_type.f90 +++ b/flang/test/Parser/OpenMP/declare_target-device_type.f90 @@ -10,7 +10,7 @@ subroutine openmp_declare_target !PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Host !PARSE-TREE: | OmpClause -> Enter -> OmpEnterClause !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare target device_type(host) enter(x) !CHECK: !$omp declare target device_type(nohost) enter(x) @@ -20,7 +20,7 @@ subroutine openmp_declare_target !PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Nohost !PARSE-TREE: | OmpClause -> Enter -> OmpEnterClause !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare target device_type(nohost) enter(x) !CHECK: !$omp declare target device_type(any) enter(x) @@ -30,7 +30,7 @@ subroutine openmp_declare_target !PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Any !PARSE-TREE: | OmpClause -> Enter -> OmpEnterClause !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare target device_type(any) enter(x) !CHECK: !$omp declare target device_type(host) to(x) @@ -41,7 +41,7 @@ subroutine openmp_declare_target !PARSE-TREE: | OmpClause -> To -> OmpToClause !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare target device_type(host) to(x) !CHECK: !$omp declare target device_type(nohost) to(x) @@ -52,7 +52,7 @@ subroutine openmp_declare_target !PARSE-TREE: | OmpClause -> To -> OmpToClause !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare target device_type(nohost) to(x) !CHECK: !$omp declare target device_type(any) to(x) @@ -63,7 +63,7 @@ subroutine openmp_declare_target !PARSE-TREE: | OmpClause -> To -> OmpToClause !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare target device_type(any) to(x) !CHECK: !$omp declare target device_type(host) enter(y) to(x) @@ -76,7 +76,7 @@ subroutine openmp_declare_target !PARSE-TREE: | OmpClause -> To -> OmpToClause !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare target device_type(host) enter(y) to(x) !CHECK: !$omp declare target device_type(nohost) enter(y) to(x) @@ -89,7 +89,7 @@ subroutine openmp_declare_target !PARSE-TREE: | OmpClause -> To -> OmpToClause !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare target device_type(nohost) enter(y) to(x) !CHECK: !$omp declare target device_type(any) enter(y) to(x) @@ -102,7 +102,7 @@ subroutine openmp_declare_target !PARSE-TREE: | OmpClause -> To -> OmpToClause !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp declare target device_type(any) enter(y) to(x) integer :: a(1024), i diff --git a/flang/test/Parser/OpenMP/dispatch.f90 b/flang/test/Parser/OpenMP/dispatch.f90 index 131b4d1f9ddb6..36f301ce98058 100644 --- a/flang/test/Parser/OpenMP/dispatch.f90 +++ b/flang/test/Parser/OpenMP/dispatch.f90 @@ -33,14 +33,14 @@ subroutine sub(x) !PARSE-TREE: | | | | | LiteralConstant -> IntLiteralConstant = '1' !PARSE-TREE: | | | | Expr = '1_4' !PARSE-TREE: | | | | | LiteralConstant -> IntLiteralConstant = '1' -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AssignmentStmt ![...] !PARSE-TREE: | OmpEndDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = dispatch !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !$omp dispatch device(3) nowait nocontext(.false.) novariants(1.eq.1) r = func(a, b, c) @@ -57,7 +57,7 @@ subroutine sub(x) !PARSE-TREE: | | | Scalar -> Integer -> Expr = '3_4' !PARSE-TREE: | | | | LiteralConstant -> IntLiteralConstant = '3' !PARSE-TREE: | | OmpClause -> IsDevicePtr -> OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AssignmentStmt !PARSE-TREE-NOT: OmpEndDirective diff --git a/flang/test/Parser/OpenMP/dyn-groupprivate-clause.f90 b/flang/test/Parser/OpenMP/dyn-groupprivate-clause.f90 index 599821dbe3377..404f69380bfb7 100644 --- a/flang/test/Parser/OpenMP/dyn-groupprivate-clause.f90 +++ b/flang/test/Parser/OpenMP/dyn-groupprivate-clause.f90 @@ -20,7 +20,7 @@ subroutine f00(n) !PARSE-TREE: | OmpClauseList -> OmpClause -> DynGroupprivate -> OmpDynGroupprivateClause !PARSE-TREE: | | Scalar -> Integer -> Expr = 'n' !PARSE-TREE: | | | Designator -> DataRef -> Name = 'n' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} subroutine f01(n) @@ -43,7 +43,7 @@ subroutine f01(n) !PARSE-TREE: | | Modifier -> OmpFallbackModifier -> Value = Abort !PARSE-TREE: | | Scalar -> Integer -> Expr = 'n' !PARSE-TREE: | | | Designator -> DataRef -> Name = 'n' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} subroutine f02(n) @@ -67,4 +67,4 @@ subroutine f02(n) !PARSE-TREE: | | Modifier -> OmpAccessGroup -> Value = Cgroup !PARSE-TREE: | | Scalar -> Integer -> Expr = 'n' !PARSE-TREE: | | | Designator -> DataRef -> Name = 'n' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} diff --git a/flang/test/Parser/OpenMP/enter-automap-modifier.f90 b/flang/test/Parser/OpenMP/enter-automap-modifier.f90 index bc5b5eb3e7ef3..71d804548e552 100644 --- a/flang/test/Parser/OpenMP/enter-automap-modifier.f90 +++ b/flang/test/Parser/OpenMP/enter-automap-modifier.f90 @@ -16,4 +16,4 @@ program automap !PARSE-TREE: | OmpClauseList -> OmpClause -> Enter -> OmpEnterClause !PARSE-TREE: | | Modifier -> OmpAutomapModifier -> Value = Automap !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} diff --git a/flang/test/Parser/OpenMP/fuse02.f90 b/flang/test/Parser/OpenMP/fuse02.f90 index cc3de48dd658a..4b1819f3896cf 100644 --- a/flang/test/Parser/OpenMP/fuse02.f90 +++ b/flang/test/Parser/OpenMP/fuse02.f90 @@ -28,13 +28,13 @@ subroutine fuse_on_fuse !CHECK-PARSE-NEXT: | | | OmpBeginLoopDirective !CHECK-PARSE-NEXT: | | | | OmpDirectiveName -> llvm::omp::Directive = fuse !CHECK-PARSE-NEXT: | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | Flags = {} !CHECK-PARSE-NEXT: | | | Block !CHECK-PARSE-NEXT: | | | | ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPLoopConstruct !CHECK-PARSE-NEXT: | | | | | OmpBeginLoopDirective !CHECK-PARSE-NEXT: | | | | | | OmpDirectiveName -> llvm::omp::Directive = fuse !CHECK-PARSE-NEXT: | | | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | | | Flags = {} !CHECK-PARSE-NEXT: | | | | | Block !CHECK-PARSE-NEXT: | | | | | | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct !CHECK-PARSE-NEXT: | | | | | | | NonLabelDoStmt @@ -61,7 +61,7 @@ subroutine fuse_on_fuse !CHECK-PARSE-NEXT: | | | | | OmpEndLoopDirective !CHECK-PARSE-NEXT: | | | | | | OmpDirectiveName -> llvm::omp::Directive = fuse !CHECK-PARSE-NEXT: | | | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | | | Flags = {} !CHECK-PARSE-NEXT: | | | | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct !CHECK-PARSE-NEXT: | | | | | NonLabelDoStmt !CHECK-PARSE-NEXT: | | | | | | LoopControl -> LoopBounds @@ -76,7 +76,7 @@ subroutine fuse_on_fuse !CHECK-PARSE-NEXT: | | | OmpEndLoopDirective !CHECK-PARSE-NEXT: | | | | OmpDirectiveName -> llvm::omp::Directive = fuse !CHECK-PARSE-NEXT: | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | Flags = {} !CHECK-UNPARSE: SUBROUTINE fuse_on_fuse !CHECK-UNPARSE-NEXT: IMPLICIT NONE diff --git a/flang/test/Parser/OpenMP/groupprivate.f90 b/flang/test/Parser/OpenMP/groupprivate.f90 index 8bd840147a2dd..b069eb751b90d 100644 --- a/flang/test/Parser/OpenMP/groupprivate.f90 +++ b/flang/test/Parser/OpenMP/groupprivate.f90 @@ -22,9 +22,9 @@ module m !PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'y' !PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Nohost -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPGroupprivate -> OmpDirectiveSpecification !PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = groupprivate !PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'z' !PARSE-TREE: | OmpClauseList -> -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} diff --git a/flang/test/Parser/OpenMP/in-reduction-clause.f90 b/flang/test/Parser/OpenMP/in-reduction-clause.f90 index 6059fb27d5be3..eb39398c3468a 100644 --- a/flang/test/Parser/OpenMP/in-reduction-clause.f90 +++ b/flang/test/Parser/OpenMP/in-reduction-clause.f90 @@ -46,7 +46,7 @@ end subroutine omp_in_reduction_taskgroup !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> InReduction -> OmpInReductionClause !PARSE-TREE-NEXT: OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Add !PARSE-TREE-NEXT: OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'z' -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} subroutine omp_in_reduction_parallel() integer :: z @@ -77,5 +77,5 @@ end subroutine omp_in_reduction_parallel !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> InReduction -> OmpInReductionClause !PARSE-TREE-NEXT: OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Add !PARSE-TREE-NEXT: OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'z' -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} diff --git a/flang/test/Parser/OpenMP/interop-construct.f90 b/flang/test/Parser/OpenMP/interop-construct.f90 index 82a1b1195dc3b..c080d477d1325 100644 --- a/flang/test/Parser/OpenMP/interop-construct.f90 +++ b/flang/test/Parser/OpenMP/interop-construct.f90 @@ -21,7 +21,7 @@ END SUBROUTINE test_interop_01 !PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = interop !PARSE-TREE: | | | OmpClauseList -> OmpClause -> Device -> OmpDeviceClause !PARSE-TREE: | | | | Scalar -> Integer -> Expr -> LiteralConstant -> IntLiteralConstant = '1' -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> PrintStmt !PARSE-TREE: | | | Format -> Star !PARSE-TREE: | | | OutputItem -> Expr -> LiteralConstant -> CharLiteralConstant @@ -64,7 +64,7 @@ END SUBROUTINE test_interop_02 !PARSE-TREE: | | | | OmpObject -> Designator -> DataRef -> Name = 'obj' !PARSE-TREE: | | | OmpClause -> Use -> OmpUseClause -> OmpObject -> Designator -> DataRef -> Name = 'obj1' !PARSE-TREE: | | | OmpClause -> Destroy -> OmpDestroyClause -> OmpObject -> Designator -> DataRef -> Name = 'obj3' -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> PrintStmt !PARSE-TREE: | | | Format -> Star !PARSE-TREE: | | | OutputItem -> Expr -> LiteralConstant -> CharLiteralConstant @@ -104,7 +104,7 @@ END SUBROUTINE test_interop_03 !PARSE-TREE: | | | OmpClause -> Depend -> OmpDependClause -> TaskDep !PARSE-TREE: | | | | Modifier -> OmpTaskDependenceType -> Value = Inout !PARSE-TREE: | | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'obj' -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> PrintStmt !PARSE-TREE: | | | Format -> Star !PARSE-TREE: | | | OutputItem -> Expr -> LiteralConstant -> CharLiteralConstant @@ -159,7 +159,7 @@ END SUBROUTINE test_interop_04 !PARSE-TREE: | | | | Modifier -> OmpTaskDependenceType -> Value = Inout !PARSE-TREE: | | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'arr' !PARSE-TREE: | | | OmpClause -> Nowait -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> PrintStmt !PARSE-TREE: | | | Format -> Star !PARSE-TREE: | | | OutputItem -> Expr -> LiteralConstant -> CharLiteralConstant @@ -200,7 +200,7 @@ END SUBROUTINE test_interop_05 !PARSE-TREE: | | | OmpClause -> Device -> OmpDeviceClause !PARSE-TREE: | | | | Modifier -> OmpDeviceModifier -> Value = Device_Num !PARSE-TREE: | | | | Scalar -> Integer -> Expr -> LiteralConstant -> IntLiteralConstant = '0' -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> PrintStmt !PARSE-TREE: | | | Format -> Star !PARSE-TREE: | | | OutputItem -> Expr -> LiteralConstant -> CharLiteralConstant diff --git a/flang/test/Parser/OpenMP/linear-clause.f90 b/flang/test/Parser/OpenMP/linear-clause.f90 index b53dfe5f941a3..fb02f251fc300 100644 --- a/flang/test/Parser/OpenMP/linear-clause.f90 +++ b/flang/test/Parser/OpenMP/linear-clause.f90 @@ -22,7 +22,7 @@ subroutine f00(x) !PARSE-TREE: | OmpClauseList -> OmpClause -> Linear -> OmpLinearClause !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !PARSE-TREE: DoConstruct subroutine f01(x) @@ -48,7 +48,7 @@ subroutine f01(x) !PARSE-TREE: | | Modifier -> OmpStepSimpleModifier -> Scalar -> Integer -> Expr = '2_4' !PARSE-TREE: | | | LiteralConstant -> IntLiteralConstant = '2' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !PARSE-TREE: DoConstruct subroutine f02(x) @@ -74,7 +74,7 @@ subroutine f02(x) !PARSE-TREE: | | Modifier -> OmpStepComplexModifier -> Scalar -> Integer -> Expr = '3_4' !PARSE-TREE: | | | LiteralConstant -> IntLiteralConstant = '3' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !PARSE-TREE: DoConstruct subroutine f03(x) @@ -93,7 +93,7 @@ subroutine f03(x) !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | Modifier -> OmpLinearModifier -> Value = Uval !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} subroutine f04(x) integer :: x @@ -113,4 +113,4 @@ subroutine f04(x) !PARSE-TREE: | | Modifier -> OmpStepComplexModifier -> Scalar -> Integer -> Expr = '3_4' !PARSE-TREE: | | | LiteralConstant -> IntLiteralConstant = '3' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} diff --git a/flang/test/Parser/OpenMP/loop-transformation-construct01.f90 b/flang/test/Parser/OpenMP/loop-transformation-construct01.f90 index 979dd0c57e8b5..16154b3bfdf53 100644 --- a/flang/test/Parser/OpenMP/loop-transformation-construct01.f90 +++ b/flang/test/Parser/OpenMP/loop-transformation-construct01.f90 @@ -23,14 +23,14 @@ subroutine loop_transformation_construct !CHECK-PARSE-NEXT: | | | OmpBeginLoopDirective !CHECK-PARSE-NEXT: | | | | OmpDirectiveName -> llvm::omp::Directive = do !CHECK-PARSE-NEXT: | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | Flags = {} !CHECK-PARSE-NEXT: | | | Block !CHECK-PARSE-NEXT: | | | | ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPLoopConstruct !CHECK-PARSE-NEXT: | | | | | OmpBeginLoopDirective !CHECK-PARSE-NEXT: | | | | | | OmpDirectiveName -> llvm::omp::Directive = unroll !CHECK-PARSE-NEXT: | | | | | | OmpClauseList -> OmpClause -> Partial -> Scalar -> Integer -> Constant -> Expr = '1_4' !CHECK-PARSE-NEXT: | | | | | | | LiteralConstant -> IntLiteralConstant = '1' -!CHECK-PARSE-NEXT: | | | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | | | Flags = {} !CHECK-PARSE-NEXT: | | | | | Block !CHECK-PARSE-NEXT: | | | | | | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct !CHECK-PARSE-NEXT: | | | | | | | NonLabelDoStmt @@ -60,11 +60,11 @@ subroutine loop_transformation_construct !CHECK-PARSE-NEXT: | | | | | OmpEndLoopDirective !CHECK-PARSE-NEXT: | | | | | | OmpDirectiveName -> llvm::omp::Directive = unroll !CHECK-PARSE-NEXT: | | | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | | | Flags = {} !CHECK-PARSE-NEXT: | | | OmpEndLoopDirective !CHECK-PARSE-NEXT: | | | | OmpDirectiveName -> llvm::omp::Directive = do !CHECK-PARSE-NEXT: | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | Flags = {} !CHECK-UNPARSE: SUBROUTINE loop_transformation_construct !CHECK-UNPARSE-NEXT: IMPLICIT NONE diff --git a/flang/test/Parser/OpenMP/loop-transformation-construct02.f90 b/flang/test/Parser/OpenMP/loop-transformation-construct02.f90 index 814a885f14a18..52a78112b3dc4 100644 --- a/flang/test/Parser/OpenMP/loop-transformation-construct02.f90 +++ b/flang/test/Parser/OpenMP/loop-transformation-construct02.f90 @@ -25,21 +25,21 @@ subroutine loop_transformation_construct !CHECK-PARSE-NEXT: | | | OmpBeginLoopDirective !CHECK-PARSE-NEXT: | | | | OmpDirectiveName -> llvm::omp::Directive = do !CHECK-PARSE-NEXT: | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | Flags = {} !CHECK-PARSE-NEXT: | | | Block !CHECK-PARSE-NEXT: | | | | ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPLoopConstruct !CHECK-PARSE-NEXT: | | | | | OmpBeginLoopDirective !CHECK-PARSE-NEXT: | | | | | | OmpDirectiveName -> llvm::omp::Directive = unroll !CHECK-PARSE-NEXT: | | | | | | OmpClauseList -> OmpClause -> Partial -> Scalar -> Integer -> Constant -> Expr = '1_4' !CHECK-PARSE-NEXT: | | | | | | | LiteralConstant -> IntLiteralConstant = '1' -!CHECK-PARSE-NEXT: | | | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | | | Flags = {} !CHECK-PARSE-NEXT: | | | | | Block !CHECK-PARSE-NEXT: | | | | | | ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPLoopConstruct !CHECK-PARSE-NEXT: | | | | | | | OmpBeginLoopDirective !CHECK-PARSE-NEXT: | | | | | | | | OmpDirectiveName -> llvm::omp::Directive = tile !CHECK-PARSE-NEXT: | | | | | | | | OmpClauseList -> OmpClause -> Sizes -> Scalar -> Integer -> Expr = '2_4' !CHECK-PARSE-NEXT: | | | | | | | | | LiteralConstant -> IntLiteralConstant = '2' -!CHECK-PARSE-NEXT: | | | | | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | | | | | Flags = {} !CHECK-PARSE-NEXT: | | | | | | | Block !CHECK-PARSE-NEXT: | | | | | | | | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct !CHECK-PARSE-NEXT: | | | | | | | | | NonLabelDoStmt @@ -69,15 +69,15 @@ subroutine loop_transformation_construct !CHECK-PARSE-NEXT: | | | | | | | OmpEndLoopDirective !CHECK-PARSE-NEXT: | | | | | | | | OmpDirectiveName -> llvm::omp::Directive = tile !CHECK-PARSE-NEXT: | | | | | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | | | | | Flags = {} !CHECK-PARSE-NEXT: | | | | | OmpEndLoopDirective !CHECK-PARSE-NEXT: | | | | | | OmpDirectiveName -> llvm::omp::Directive = unroll !CHECK-PARSE-NEXT: | | | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | | | Flags = {} !CHECK-PARSE-NEXT: | | | OmpEndLoopDirective !CHECK-PARSE-NEXT: | | | | OmpDirectiveName -> llvm::omp::Directive = do !CHECK-PARSE-NEXT: | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | Flags = {} !CHECK-UNPARSE: SUBROUTINE loop_transformation_construct !CHECK-UNPARSE-NEXT: IMPLICIT NONE diff --git a/flang/test/Parser/OpenMP/loop-transformation-construct03.f90 b/flang/test/Parser/OpenMP/loop-transformation-construct03.f90 index e431b6d535ff5..10d87c45b4802 100644 --- a/flang/test/Parser/OpenMP/loop-transformation-construct03.f90 +++ b/flang/test/Parser/OpenMP/loop-transformation-construct03.f90 @@ -25,7 +25,7 @@ subroutine loop_transformation_construct7 !CHECK-PARSE-NEXT: | | | | OmpClauseList -> OmpClause -> Collapse -> Scalar -> Integer -> Constant -> Expr = '2_4' !CHECK-PARSE-NEXT: | | | | | LiteralConstant -> IntLiteralConstant = '2' !CHECK-PARSE-NEXT: | | | | OmpClause -> Private -> OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'b' -!CHECK-PARSE-NEXT: | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | Flags = {} !CHECK-PARSE-NEXT: | | | Block !CHECK-PARSE-NEXT: | | | | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct !CHECK-PARSE-NEXT: | | | | | NonLabelDoStmt diff --git a/flang/test/Parser/OpenMP/loop-transformation-construct04.f90 b/flang/test/Parser/OpenMP/loop-transformation-construct04.f90 index e37e2bbfe155b..4944347ea5bad 100644 --- a/flang/test/Parser/OpenMP/loop-transformation-construct04.f90 +++ b/flang/test/Parser/OpenMP/loop-transformation-construct04.f90 @@ -25,13 +25,13 @@ subroutine loop_transformation_construct !CHECK-PARSE-NEXT: | | | OmpBeginLoopDirective !CHECK-PARSE-NEXT: | | | | OmpDirectiveName -> llvm::omp::Directive = do !CHECK-PARSE-NEXT: | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | Flags = {} !CHECK-PARSE-NEXT: | | | Block !CHECK-PARSE-NEXT: | | | | ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPLoopConstruct !CHECK-PARSE-NEXT: | | | | | OmpBeginLoopDirective !CHECK-PARSE-NEXT: | | | | | | OmpDirectiveName -> llvm::omp::Directive = fuse !CHECK-PARSE-NEXT: | | | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | | | Flags = {} !CHECK-PARSE-NEXT: | | | | | Block !CHECK-PARSE-NEXT: | | | | | | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct !CHECK-PARSE-NEXT: | | | | | | | NonLabelDoStmt @@ -58,11 +58,11 @@ subroutine loop_transformation_construct !CHECK-PARSE-NEXT: | | | | | OmpEndLoopDirective !CHECK-PARSE-NEXT: | | | | | | OmpDirectiveName -> llvm::omp::Directive = fuse !CHECK-PARSE-NEXT: | | | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | | | Flags = {} !CHECK-PARSE-NEXT: | | | OmpEndLoopDirective !CHECK-PARSE-NEXT: | | | | OmpDirectiveName -> llvm::omp::Directive = do !CHECK-PARSE-NEXT: | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | Flags = {} !CHECK-UNPARSE: SUBROUTINE loop_transformation_construct !CHECK-UNPARSE-NEXT: IMPLICIT NONE diff --git a/flang/test/Parser/OpenMP/loop-transformation-construct05.f90 b/flang/test/Parser/OpenMP/loop-transformation-construct05.f90 index 6d3303841d506..f26679388346c 100644 --- a/flang/test/Parser/OpenMP/loop-transformation-construct05.f90 +++ b/flang/test/Parser/OpenMP/loop-transformation-construct05.f90 @@ -27,13 +27,13 @@ subroutine loop_transformation_construct !CHECK-PARSE-NEXT: | | | OmpBeginLoopDirective !CHECK-PARSE-NEXT: | | | | OmpDirectiveName -> llvm::omp::Directive = do !CHECK-PARSE-NEXT: | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | Flags = {} !CHECK-PARSE-NEXT: | | | Block !CHECK-PARSE-NEXT: | | | | ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPLoopConstruct !CHECK-PARSE-NEXT: | | | | | OmpBeginLoopDirective !CHECK-PARSE-NEXT: | | | | | | OmpDirectiveName -> llvm::omp::Directive = fuse !CHECK-PARSE-NEXT: | | | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | | | Flags = {} !CHECK-PARSE-NEXT: | | | | | Block !CHECK-PARSE-NEXT: | | | | | | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct !CHECK-PARSE-NEXT: | | | | | | | NonLabelDoStmt @@ -51,7 +51,7 @@ subroutine loop_transformation_construct !CHECK-PARSE-NEXT: | | | | | | | | OmpDirectiveName -> llvm::omp::Directive = tile !CHECK-PARSE-NEXT: | | | | | | | | OmpClauseList -> OmpClause -> Sizes -> Scalar -> Integer -> Expr = '2_4' !CHECK-PARSE-NEXT: | | | | | | | | | LiteralConstant -> IntLiteralConstant = '2' -!CHECK-PARSE-NEXT: | | | | | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | | | | | Flags = {} !CHECK-PARSE-NEXT: | | | | | | | Block !CHECK-PARSE-NEXT: | | | | | | | | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct !CHECK-PARSE-NEXT: | | | | | | | | | NonLabelDoStmt @@ -67,11 +67,11 @@ subroutine loop_transformation_construct !CHECK-PARSE-NEXT: | | | | | OmpEndLoopDirective !CHECK-PARSE-NEXT: | | | | | | OmpDirectiveName -> llvm::omp::Directive = fuse !CHECK-PARSE-NEXT: | | | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | | | Flags = {} !CHECK-PARSE-NEXT: | | | OmpEndLoopDirective !CHECK-PARSE-NEXT: | | | | OmpDirectiveName -> llvm::omp::Directive = do !CHECK-PARSE-NEXT: | | | | OmpClauseList -> -!CHECK-PARSE-NEXT: | | | | Flags = None +!CHECK-PARSE-NEXT: | | | | Flags = {} !CHECK-UNPARSE: SUBROUTINE loop_transformation_construct !CHECK-UNPARSE-NEXT: IMPLICIT NONE diff --git a/flang/test/Parser/OpenMP/map-modifiers-v61.f90 b/flang/test/Parser/OpenMP/map-modifiers-v61.f90 index 79bf73a658875..f1e41fb0c5152 100644 --- a/flang/test/Parser/OpenMP/map-modifiers-v61.f90 +++ b/flang/test/Parser/OpenMP/map-modifiers-v61.f90 @@ -19,7 +19,7 @@ subroutine f00(x) !PARSE-TREE: | | Modifier -> OmpAttachModifier -> Value = Always !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} subroutine f01(x) @@ -40,7 +40,7 @@ subroutine f01(x) !PARSE-TREE: | | Modifier -> OmpAttachModifier -> Value = Auto !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} subroutine f02(x) @@ -61,4 +61,4 @@ subroutine f02(x) !PARSE-TREE: | | Modifier -> OmpAttachModifier -> Value = Never !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} diff --git a/flang/test/Parser/OpenMP/metadirective-dirspec.f90 b/flang/test/Parser/OpenMP/metadirective-dirspec.f90 index b64ceb1a98164..a24027161ef09 100644 --- a/flang/test/Parser/OpenMP/metadirective-dirspec.f90 +++ b/flang/test/Parser/OpenMP/metadirective-dirspec.f90 @@ -164,7 +164,7 @@ subroutine f03 !PARSE-TREE: | | | | | | | | | | DataRef -> Name = 'omp_out' !PARSE-TREE: | | | | | | | | | | Name = 'x' !PARSE-TREE: | | | OmpClauseList -> -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} subroutine f04 !$omp metadirective when(user={condition(.true.)}: & diff --git a/flang/test/Parser/OpenMP/metadirective-flush.f90 b/flang/test/Parser/OpenMP/metadirective-flush.f90 index 083791097c67d..e4e521ed07073 100644 --- a/flang/test/Parser/OpenMP/metadirective-flush.f90 +++ b/flang/test/Parser/OpenMP/metadirective-flush.f90 @@ -25,7 +25,7 @@ subroutine f00() !PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = flush !PARSE-TREE: | | | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | | OmpClauseList -> OmpClause -> SeqCst -!PARSE-TREE: | | | Flags = DeprecatedSyntax +!PARSE-TREE: | | | Flags = {DeprecatedSyntax} subroutine f01() integer :: x @@ -51,4 +51,4 @@ subroutine f01() !PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = flush !PARSE-TREE: | | | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | | OmpClauseList -> OmpClause -> SeqCst -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} diff --git a/flang/test/Parser/OpenMP/openmp6-directive-spellings.f90 b/flang/test/Parser/OpenMP/openmp6-directive-spellings.f90 index 7a627913f9555..9f39066f131cd 100644 --- a/flang/test/Parser/OpenMP/openmp6-directive-spellings.f90 +++ b/flang/test/Parser/OpenMP/openmp6-directive-spellings.f90 @@ -38,7 +38,7 @@ subroutine f00 !PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = cancellation point !PARSE-TREE: | OmpClauseList -> OmpClause -> CancellationConstructType -> OmpCancellationConstructTypeClause !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = parallel -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} subroutine f01 type :: t @@ -66,7 +66,7 @@ subroutine f01 !PARSE-TREE: | | | DataRef -> Name = 'v' !PARSE-TREE: | | | Name = 'x' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} subroutine f02 type :: t @@ -107,7 +107,7 @@ subroutine f02 !PARSE-TREE: | | | | | | | | DataRef -> Name = 'omp_in' !PARSE-TREE: | | | | | | | | Name = 'x' !PARSE-TREE: | OmpClauseList -> -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} subroutine f03 !$omp declare_simd @@ -120,7 +120,7 @@ subroutine f03 !PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPDeclareSimdConstruct -> OmpDirectiveSpecification !PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare simd !PARSE-TREE: | OmpClauseList -> -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} subroutine f04 !$omp declare_target @@ -133,7 +133,7 @@ subroutine f04 !PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification !PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target !PARSE-TREE: | OmpClauseList -> -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} subroutine f05 implicit none @@ -164,7 +164,7 @@ subroutine g05 !PARSE-TREE: | | | | OmpTraitProperty -> Scalar -> Expr = '.true._4' !PARSE-TREE: | | | | | LiteralConstant -> LogicalLiteralConstant !PARSE-TREE: | | | | | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} subroutine f06 implicit none @@ -217,7 +217,7 @@ subroutine f07 !PARSE-TREE: | | Modifier -> OmpMapType -> Value = To !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'i' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} subroutine f08 implicit none @@ -237,7 +237,7 @@ subroutine f08 !PARSE-TREE: | | Modifier -> OmpMapType -> Value = From !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'i' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} subroutine f09 implicit none @@ -256,4 +256,4 @@ subroutine f09 !PARSE-TREE: | OmpClauseList -> OmpClause -> To -> OmpToClause !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'i' !PARSE-TREE: | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} diff --git a/flang/test/Parser/OpenMP/order-clause01.f90 b/flang/test/Parser/OpenMP/order-clause01.f90 index 087e400934de5..5fc1b580b64f2 100644 --- a/flang/test/Parser/OpenMP/order-clause01.f90 +++ b/flang/test/Parser/OpenMP/order-clause01.f90 @@ -18,7 +18,7 @@ subroutine test_do_order() !PARSE-TREE-NEXT: OmpDirectiveName -> llvm::omp::Directive = do !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Order -> OmpOrderClause !PARSE-TREE-NEXT: Ordering = Concurrent -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} subroutine test_simd_order_reproducible() integer :: i, j = 1 @@ -36,7 +36,7 @@ subroutine test_simd_order_reproducible() !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Order -> OmpOrderClause !PARSE-TREE-NEXT: OmpOrderModifier -> Value = Reproducible !PARSE-TREE-NEXT: Ordering = Concurrent -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} subroutine test_do_simd_order_unconstrained() integer :: i, j = 1 @@ -54,7 +54,7 @@ subroutine test_do_simd_order_unconstrained() !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Order -> OmpOrderClause !PARSE-TREE-NEXT: OmpOrderModifier -> Value = Unconstrained !PARSE-TREE-NEXT: Ordering = Concurrent -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} subroutine test_parallel_do_order() integer :: i, j = 1 @@ -71,7 +71,7 @@ subroutine test_parallel_do_order() !PARSE-TREE-NEXT: OmpDirectiveName -> llvm::omp::Directive = parallel do !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Order -> OmpOrderClause !PARSE-TREE-NEXT: Ordering = Concurrent -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} subroutine test_parallel_do_simd_order_reproducible() integer :: i, j = 1 @@ -89,7 +89,7 @@ subroutine test_parallel_do_simd_order_reproducible() !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Order -> OmpOrderClause !PARSE-TREE-NEXT: OmpOrderModifier -> Value = Reproducible !PARSE-TREE-NEXT: Ordering = Concurrent -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} subroutine test_target_simd_order_unconstrained() integer :: i, j = 1 @@ -107,7 +107,7 @@ subroutine test_target_simd_order_unconstrained() !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Order -> OmpOrderClause !PARSE-TREE-NEXT: OmpOrderModifier -> Value = Unconstrained !PARSE-TREE-NEXT: Ordering = Concurrent -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} subroutine test_target_parallel_do_order() integer :: i, j = 1 @@ -124,7 +124,7 @@ subroutine test_target_parallel_do_order() !PARSE-TREE-NEXT: OmpDirectiveName -> llvm::omp::Directive = target parallel do !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Order -> OmpOrderClause !PARSE-TREE-NEXT: Ordering = Concurrent -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} subroutine test_target_parallel_do_simd_order_reproducible() integer :: i, j = 1 @@ -142,7 +142,7 @@ subroutine test_target_parallel_do_simd_order_reproducible() !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Order -> OmpOrderClause !PARSE-TREE-NEXT: OmpOrderModifier -> Value = Reproducible !PARSE-TREE-NEXT: Ordering = Concurrent -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} subroutine test_teams_distribute_simd_order_unconstrained() integer :: i, j = 1 @@ -160,7 +160,7 @@ subroutine test_teams_distribute_simd_order_unconstrained() !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Order -> OmpOrderClause !PARSE-TREE-NEXT: OmpOrderModifier -> Value = Unconstrained !PARSE-TREE-NEXT: Ordering = Concurrent -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} subroutine test_teams_distribute_parallel_do_order() integer :: i, j = 1 @@ -177,7 +177,7 @@ subroutine test_teams_distribute_parallel_do_order() !PARSE-TREE-NEXT: OmpDirectiveName -> llvm::omp::Directive = teams distribute parallel do !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Order -> OmpOrderClause !PARSE-TREE-NEXT: Ordering = Concurrent -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} subroutine test_teams_distribute_parallel_do_simd_order_reproducible() integer :: i, j = 1 @@ -195,7 +195,7 @@ subroutine test_teams_distribute_parallel_do_simd_order_reproducible() !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Order -> OmpOrderClause !PARSE-TREE-NEXT: OmpOrderModifier -> Value = Reproducible !PARSE-TREE-NEXT: Ordering = Concurrent -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} subroutine test_target_teams_distribute_simd_order_unconstrained() integer :: i, j = 1 @@ -213,7 +213,7 @@ subroutine test_target_teams_distribute_simd_order_unconstrained() !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Order -> OmpOrderClause !PARSE-TREE-NEXT: OmpOrderModifier -> Value = Unconstrained !PARSE-TREE-NEXT: Ordering = Concurrent -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} subroutine test_target_teams_distribute_parallel_do_order() integer :: i, j = 1 @@ -230,7 +230,7 @@ subroutine test_target_teams_distribute_parallel_do_order() !PARSE-TREE-NEXT: OmpDirectiveName -> llvm::omp::Directive = target teams distribute parallel do !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Order -> OmpOrderClause !PARSE-TREE-NEXT: Ordering = Concurrent -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} subroutine test_target_teams_distribute_parallel_do_simd_order_reproducible() integer :: i, j = 1 @@ -248,7 +248,7 @@ subroutine test_target_teams_distribute_parallel_do_simd_order_reproducible() !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Order -> OmpOrderClause !PARSE-TREE-NEXT: OmpOrderModifier -> Value = Reproducible !PARSE-TREE-NEXT: Ordering = Concurrent -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} subroutine test_taskloop_simd_order_unconstrained() integer :: i, j = 1 @@ -266,4 +266,4 @@ subroutine test_taskloop_simd_order_unconstrained() !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Order -> OmpOrderClause !PARSE-TREE-NEXT: OmpOrderModifier -> Value = Unconstrained !PARSE-TREE-NEXT: Ordering = Concurrent -!PARSE-TREE-NEXT: Flags = None +!PARSE-TREE-NEXT: Flags = {} diff --git a/flang/test/Parser/OpenMP/ordered-block-vs-standalone.f90 b/flang/test/Parser/OpenMP/ordered-block-vs-standalone.f90 index 58f1eae07ca6f..abc4258472646 100644 --- a/flang/test/Parser/OpenMP/ordered-block-vs-standalone.f90 +++ b/flang/test/Parser/OpenMP/ordered-block-vs-standalone.f90 @@ -11,7 +11,7 @@ subroutine standalone ! CHECK: OpenMPConstruct -> OpenMPStandaloneConstruct ! CHECK-NEXT: | OmpDirectiveName -> llvm::omp::Directive = ordered ! CHECK-NEXT: | OmpClauseList -> - ! CHECK-NEXT: | Flags = None + ! CHECK-NEXT: | Flags = {} !$omp ordered depend(source) x(i, j) = i + j end do @@ -29,7 +29,7 @@ subroutine strict_block ! CHECK-NEXT: | OmpBeginDirective ! CHECK-NEXT: | | OmpDirectiveName -> llvm::omp::Directive = ordered ! CHECK-NEXT: | | OmpClauseList -> - ! CHECK-NEXT: | | Flags = None + ! CHECK-NEXT: | | Flags = {} !$omp ordered block tmp = i + j @@ -50,7 +50,7 @@ subroutine loose_block ! CHECK-NEXT: | OmpBeginDirective ! CHECK-NEXT: | | OmpDirectiveName -> llvm::omp::Directive = ordered ! CHECK-NEXT: | | OmpClauseList -> - ! CHECK-NEXT: | | Flags = None + ! CHECK-NEXT: | | Flags = {} !$omp ordered tmp = i + j x(i, j) = tmp diff --git a/flang/test/Parser/OpenMP/replayable-clause.f90 b/flang/test/Parser/OpenMP/replayable-clause.f90 index c1733449fcb70..24ccc01780898 100644 --- a/flang/test/Parser/OpenMP/replayable-clause.f90 +++ b/flang/test/Parser/OpenMP/replayable-clause.f90 @@ -17,7 +17,7 @@ subroutine f00 !PARSE-TREE: | OmpBeginDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = task !PARSE-TREE: | | OmpClauseList -> OmpClause -> Replayable -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block @@ -41,7 +41,7 @@ subroutine f01(x) !PARSE-TREE: | OmpClause -> Replayable -> OmpReplayableClause -> Scalar -> Logical -> Constant -> Expr = '.true._4' !PARSE-TREE: | | LiteralConstant -> LogicalLiteralConstant !PARSE-TREE: | | | bool = 'true' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} subroutine f02 @@ -57,4 +57,4 @@ subroutine f02 !PARSE-TREE: | OmpClauseList -> OmpClause -> Replayable -> OmpReplayableClause -> Scalar -> Logical -> Constant -> Expr = '.false._4' !PARSE-TREE: | | LiteralConstant -> LogicalLiteralConstant !PARSE-TREE: | | | bool = 'false' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} diff --git a/flang/test/Parser/OpenMP/requires.f90 b/flang/test/Parser/OpenMP/requires.f90 index ab4f4371480f7..49d78737f415f 100644 --- a/flang/test/Parser/OpenMP/requires.f90 +++ b/flang/test/Parser/OpenMP/requires.f90 @@ -8,7 +8,7 @@ !PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPRequiresConstruct -> OmpDirectiveSpecification !PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = requires !PARSE-TREE: | OmpClauseList -> OmpClause -> AtomicDefaultMemOrder -> OmpAtomicDefaultMemOrderClause -> OmpMemoryOrderType = Seq_Cst -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp requires unified_shared_memory unified_address @@ -18,7 +18,7 @@ !PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = requires !PARSE-TREE: | OmpClauseList -> OmpClause -> UnifiedSharedMemory !PARSE-TREE: | OmpClause -> UnifiedAddress -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp requires dynamic_allocators reverse_offload @@ -28,7 +28,7 @@ !PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = requires !PARSE-TREE: | OmpClauseList -> OmpClause -> DynamicAllocators !PARSE-TREE: | OmpClause -> ReverseOffload -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp requires self_maps(.true.) unified_address(.false.) @@ -42,7 +42,7 @@ !PARSE-TREE: | OmpClause -> UnifiedAddress -> OmpUnifiedAddressClause -> Scalar -> Logical -> Constant -> Expr = '.false._4' !PARSE-TREE: | | LiteralConstant -> LogicalLiteralConstant !PARSE-TREE: | | | bool = 'false' -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} !$omp requires device_safesync @@ -51,6 +51,6 @@ !PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPRequiresConstruct -> OmpDirectiveSpecification !PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = requires !PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceSafesync -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} end diff --git a/flang/test/Parser/OpenMP/sections.f90 b/flang/test/Parser/OpenMP/sections.f90 index 76e6b90f05721..54b3e6641c147 100644 --- a/flang/test/Parser/OpenMP/sections.f90 +++ b/flang/test/Parser/OpenMP/sections.f90 @@ -17,13 +17,13 @@ subroutine openmp_sections(x, y) !PARSE-TREE: | OmpBeginSectionsDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = sections !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | OpenMPConstruct -> OpenMPSectionConstruct !PARSE-TREE: | | Block !PARSE-TREE: | OmpEndSectionsDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = sections !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !============================================================================== ! single section, without `!$omp section` @@ -39,7 +39,7 @@ subroutine openmp_sections(x, y) !PARSE-TREE: | OmpBeginSectionsDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = sections !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | OpenMPConstruct -> OpenMPSectionConstruct !PARSE-TREE: | | Block !PARSE-TREE: | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> CallStmt = 'CALL f1()' @@ -48,7 +48,7 @@ subroutine openmp_sections(x, y) !PARSE-TREE: | OmpEndSectionsDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = sections !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !============================================================================== ! single section with `!$omp section` @@ -66,12 +66,12 @@ subroutine openmp_sections(x, y) !PARSE-TREE: | OmpBeginSectionsDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = sections !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | OpenMPConstruct -> OpenMPSectionConstruct !PARSE-TREE: | | OmpDirectiveSpecification !PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = section !PARSE-TREE: | | | OmpClauseList -> -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} !PARSE-TREE: | | Block !PARSE-TREE: | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> CallStmt = 'CALL f1()' !PARSE-TREE: | | | | Call @@ -79,7 +79,7 @@ subroutine openmp_sections(x, y) !PARSE-TREE: | OmpEndSectionsDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = sections !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !============================================================================== ! multiple sections @@ -105,12 +105,12 @@ subroutine openmp_sections(x, y) !PARSE-TREE: | OmpBeginSectionsDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = sections !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | OpenMPConstruct -> OpenMPSectionConstruct !PARSE-TREE: | | OmpDirectiveSpecification !PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = section !PARSE-TREE: | | | OmpClauseList -> -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} !PARSE-TREE: | | Block !PARSE-TREE: | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> CallStmt = 'CALL f1()' !PARSE-TREE: | | | | Call @@ -119,7 +119,7 @@ subroutine openmp_sections(x, y) !PARSE-TREE: | | OmpDirectiveSpecification !PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = section !PARSE-TREE: | | | OmpClauseList -> -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} !PARSE-TREE: | | Block !PARSE-TREE: | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> CallStmt = 'CALL f2()' !PARSE-TREE: | | | | Call @@ -128,7 +128,7 @@ subroutine openmp_sections(x, y) !PARSE-TREE: | | OmpDirectiveSpecification !PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = section !PARSE-TREE: | | | OmpClauseList -> -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} !PARSE-TREE: | | Block !PARSE-TREE: | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> CallStmt = 'CALL f3()' !PARSE-TREE: | | | | Call @@ -136,7 +136,7 @@ subroutine openmp_sections(x, y) !PARSE-TREE: | OmpEndSectionsDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = sections !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !============================================================================== ! multiple sections with clauses @@ -163,12 +163,12 @@ subroutine openmp_sections(x, y) !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = sections !PARSE-TREE: | | OmpClauseList -> OmpClause -> Private -> OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | OmpClause -> Firstprivate -> OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'y' -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | OpenMPConstruct -> OpenMPSectionConstruct !PARSE-TREE: | | OmpDirectiveSpecification !PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = section !PARSE-TREE: | | | OmpClauseList -> -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} !PARSE-TREE: | | Block !PARSE-TREE: | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> CallStmt = 'CALL f1()' !PARSE-TREE: | | | | Call @@ -177,7 +177,7 @@ subroutine openmp_sections(x, y) !PARSE-TREE: | | OmpDirectiveSpecification !PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = section !PARSE-TREE: | | | OmpClauseList -> -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} !PARSE-TREE: | | Block !PARSE-TREE: | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> CallStmt = 'CALL f2()' !PARSE-TREE: | | | | Call @@ -186,7 +186,7 @@ subroutine openmp_sections(x, y) !PARSE-TREE: | | OmpDirectiveSpecification !PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = section !PARSE-TREE: | | | OmpClauseList -> -!PARSE-TREE: | | | Flags = None +!PARSE-TREE: | | | Flags = {} !PARSE-TREE: | | Block !PARSE-TREE: | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> CallStmt = 'CALL f3()' !PARSE-TREE: | | | | Call @@ -194,6 +194,6 @@ subroutine openmp_sections(x, y) !PARSE-TREE: | OmpEndSectionsDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = sections !PARSE-TREE: | | OmpClauseList -> OmpClause -> Nowait -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} END subroutine openmp_sections diff --git a/flang/test/Parser/OpenMP/taskgraph.f90 b/flang/test/Parser/OpenMP/taskgraph.f90 index fa9994f41345e..a5966802aede8 100644 --- a/flang/test/Parser/OpenMP/taskgraph.f90 +++ b/flang/test/Parser/OpenMP/taskgraph.f90 @@ -17,7 +17,7 @@ subroutine f00 !PARSE-TREE: | OmpBeginDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = taskgraph !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> BlockConstruct !PARSE-TREE: | | | BlockStmt -> @@ -54,23 +54,23 @@ subroutine f01(x, y) !PARSE-TREE: | | | Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | OmpClause -> GraphReset -> OmpGraphResetClause -> Scalar -> Logical -> Expr = 'y' !PARSE-TREE: | | | Designator -> DataRef -> Name = 'y' -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OmpBlockConstruct !PARSE-TREE: | | | OmpBeginDirective !PARSE-TREE: | | | | OmpDirectiveName -> llvm::omp::Directive = task !PARSE-TREE: | | | | OmpClauseList -> -!PARSE-TREE: | | | | Flags = None +!PARSE-TREE: | | | | Flags = {} !PARSE-TREE: | | | Block !PARSE-TREE: | | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> ContinueStmt !PARSE-TREE: | | | OmpEndDirective !PARSE-TREE: | | | | OmpDirectiveName -> llvm::omp::Directive = task !PARSE-TREE: | | | | OmpClauseList -> -!PARSE-TREE: | | | | Flags = None +!PARSE-TREE: | | | | Flags = {} !PARSE-TREE: | OmpEndDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = taskgraph !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} subroutine f02 @@ -87,9 +87,9 @@ subroutine f02 !PARSE-TREE: | OmpBeginDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = taskgraph !PARSE-TREE: | | OmpClauseList -> OmpClause -> GraphReset -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | OmpEndDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = taskgraph !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} diff --git a/flang/test/Parser/OpenMP/threadprivate.f90 b/flang/test/Parser/OpenMP/threadprivate.f90 index 69b281f848375..b7dfd952bb4a7 100644 --- a/flang/test/Parser/OpenMP/threadprivate.f90 +++ b/flang/test/Parser/OpenMP/threadprivate.f90 @@ -22,4 +22,4 @@ module m !PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Name = 'blk' !PARSE-TREE: | OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'b' !PARSE-TREE: | OmpClauseList -> -!PARSE-TREE: | Flags = None +!PARSE-TREE: | Flags = {} diff --git a/flang/test/Parser/OpenMP/tile.f90 b/flang/test/Parser/OpenMP/tile.f90 index 82004fd37a0f2..483261f9d6d98 100644 --- a/flang/test/Parser/OpenMP/tile.f90 +++ b/flang/test/Parser/OpenMP/tile.f90 @@ -19,7 +19,7 @@ subroutine openmp_tiles(x) !PARSE-TREE: OmpBeginLoopDirective !PARSE-TREE: OmpClauseList -> OmpClause -> Sizes -> Scalar -> Integer -> Expr = '2_4' !PARSE-TREE: LiteralConstant -> IntLiteralConstant = '2' -!PARSE-TREE: Flags = None +!PARSE-TREE: Flags = {} !PARSE-TREE: DoConstruct !PARSE-TREE: EndDoStmt !PARSE-TREE: OmpEndLoopDirective diff --git a/flang/test/Parser/OpenMP/transparent-clause.f90 b/flang/test/Parser/OpenMP/transparent-clause.f90 index 3512326b321e6..f9471b55e6c83 100644 --- a/flang/test/Parser/OpenMP/transparent-clause.f90 +++ b/flang/test/Parser/OpenMP/transparent-clause.f90 @@ -25,7 +25,7 @@ subroutine f00(x) !PARSE-TREE: | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' !PARSE-TREE: | | | bool = 'true' !PARSE-TREE: | | OmpClause -> Transparent -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block @@ -44,12 +44,12 @@ subroutine f01 !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = task !PARSE-TREE: | | OmpClauseList -> OmpClause -> Transparent -> OmpTransparentClause -> Scalar -> Integer -> Expr = '0_4' !PARSE-TREE: | | | LiteralConstant -> IntLiteralConstant = '0' -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | OmpEndDirective !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = task !PARSE-TREE: | | OmpClauseList -> -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} subroutine f02 @@ -73,6 +73,6 @@ subroutine f02 !PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = taskloop !PARSE-TREE: | | OmpClauseList -> OmpClause -> Transparent -> OmpTransparentClause -> Scalar -> Integer -> Expr = '2_4' !PARSE-TREE: | | | LiteralConstant -> IntLiteralConstant = '2' -!PARSE-TREE: | | Flags = None +!PARSE-TREE: | | Flags = {} !PARSE-TREE: | Block !PARSE-TREE: | | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct diff --git a/flang/test/Parser/OpenMP/unroll-heuristic.f90 b/flang/test/Parser/OpenMP/unroll-heuristic.f90 index c181a06b457f3..6ce7b7e12c8a6 100644 --- a/flang/test/Parser/OpenMP/unroll-heuristic.f90 +++ b/flang/test/Parser/OpenMP/unroll-heuristic.f90 @@ -22,7 +22,7 @@ END subroutine openmp_parse_unroll_heuristic !PTREE-NEXT: | OmpBeginLoopDirective !PTREE-NEXT: | | OmpDirectiveName -> llvm::omp::Directive = unroll !PTREE-NEXT: | | OmpClauseList -> -!PTREE-NEXT: | | Flags = None +!PTREE-NEXT: | | Flags = {} !PTREE-NEXT: | Block !PTREE-NEXT: | | ExecutionPartConstruct -> ExecutableConstruct -> DoConstruct !PTREE-NEXT: | | | NonLabelDoStmt @@ -43,4 +43,4 @@ END subroutine openmp_parse_unroll_heuristic !PTREE-NEXT: | OmpEndLoopDirective !PTREE-NEXT: | | OmpDirectiveName -> llvm::omp::Directive = unroll !PTREE-NEXT: | | OmpClauseList -> -!PTREE-NEXT: | | Flags = None +!PTREE-NEXT: | | Flags = {} diff --git a/libc/cmake/modules/LLVMLibCArchitectures.cmake b/libc/cmake/modules/LLVMLibCArchitectures.cmake index 6c730f807de6d..939fc1226a4e9 100644 --- a/libc/cmake/modules/LLVMLibCArchitectures.cmake +++ b/libc/cmake/modules/LLVMLibCArchitectures.cmake @@ -215,6 +215,37 @@ else() "Unsupported libc target operating system ${LIBC_TARGET_OS}") endif() +# If the compiler target triple is not the same as the triple specified by +# LIBC_TARGET_TRIPLE or LLVM_RUNTIMES_TARGET, we will add a --target option +# if the compiler is clang. If the compiler is GCC we just error out as there +# is no equivalent of an option like --target. +if(explicit_target_triple AND + (NOT (libc_compiler_triple STREQUAL explicit_target_triple))) + set(LIBC_CROSSBUILD TRUE) + if(CMAKE_COMPILER_IS_GNUCXX) + message(FATAL_ERROR + "GCC target triple (${libc_compiler_triple}) and the explicity " + "specified target triple (${explicit_target_triple}) do not match.") + else() + list(APPEND + LIBC_COMPILE_OPTIONS_DEFAULT "--target=${explicit_target_triple}") + endif() +endif() + +if(LIBC_TARGET_OS_IS_DARWIN) + execute_process( + COMMAND xcrun --sdk macosx --show-sdk-path + OUTPUT_VARIABLE MACOSX_SDK_PATH + RESULT_VARIABLE MACOSX_SDK_PATH_RESULT + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + if(MACOSX_SDK_PATH_RESULT EQUAL 0) + list(APPEND LIBC_COMPILE_OPTIONS_DEFAULT "-I" "${MACOSX_SDK_PATH}/usr/include") + else() + message(WARNING "Could not find macOS SDK path. `xcrun --sdk macosx --show-sdk-path` failed.") + endif() +endif() + # Windows does not support full mode build. if (LIBC_TARGET_OS_IS_WINDOWS AND LLVM_LIBC_FULL_BUILD) message(FATAL_ERROR "Windows does not support full mode build.") diff --git a/libc/config/darwin/aarch64/entrypoints.txt b/libc/config/darwin/aarch64/entrypoints.txt index e3c6c2b30c415..3909417f9730d 100644 --- a/libc/config/darwin/aarch64/entrypoints.txt +++ b/libc/config/darwin/aarch64/entrypoints.txt @@ -111,6 +111,7 @@ if(LLVM_LIBC_FULL_BUILD) libc.src.setjmp.setjmp libc.src.setjmp.siglongjmp libc.src.setjmp.sigsetjmp + libc.src.stdlib._Exit ) endif() diff --git a/libc/include/llvm-libc-macros/darwin/CMakeLists.txt b/libc/include/llvm-libc-macros/darwin/CMakeLists.txt new file mode 100644 index 0000000000000..ea08c63c00301 --- /dev/null +++ b/libc/include/llvm-libc-macros/darwin/CMakeLists.txt @@ -0,0 +1,5 @@ +add_header( + time_macros + HDR + time-macros.h +) diff --git a/libc/include/llvm-libc-macros/darwin/time-macros.h b/libc/include/llvm-libc-macros/darwin/time-macros.h new file mode 100644 index 0000000000000..477dfa8eda85f --- /dev/null +++ b/libc/include/llvm-libc-macros/darwin/time-macros.h @@ -0,0 +1,14 @@ +//===-- Definition of macros from time.h ---------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_MACROS_LINUX_TIME_MACROS_H +#define LLVM_LIBC_MACROS_LINUX_TIME_MACROS_H + +#include <_time.h> + +#endif // LLVM_LIBC_MACROS_LINUX_TIME_MACROS_H diff --git a/libc/include/llvm-libc-macros/time-macros.h b/libc/include/llvm-libc-macros/time-macros.h index 30e0a310a5485..c026df29b1e7f 100644 --- a/libc/include/llvm-libc-macros/time-macros.h +++ b/libc/include/llvm-libc-macros/time-macros.h @@ -7,6 +7,8 @@ #include "linux/time-macros.h" #elif defined(__ELF__) #include "baremetal/time-macros.h" +#elif defined(__APPLE__) +#include "darwin/time-macros.h" #else #define CLOCKS_PER_SEC 1000000 #endif diff --git a/libc/include/llvm-libc-types/clockid_t.h b/libc/include/llvm-libc-types/clockid_t.h index 4b059599502c4..926948717c664 100644 --- a/libc/include/llvm-libc-types/clockid_t.h +++ b/libc/include/llvm-libc-types/clockid_t.h @@ -9,6 +9,12 @@ #ifndef LLVM_LIBC_TYPES_CLOCKID_T_H #define LLVM_LIBC_TYPES_CLOCKID_T_H +#if defined(__APPLE__) +// Darwin provides its own defintion for clockid_t . Use that to prevent +// redeclaration errors and correctness. +#include <_time.h> +#else typedef int clockid_t; +#endif // __APPLE__ #endif // LLVM_LIBC_TYPES_CLOCKID_T_H diff --git a/libc/include/llvm-libc-types/struct_timespec.h b/libc/include/llvm-libc-types/struct_timespec.h index 28b5a571f6790..8993ecc7db8f0 100644 --- a/libc/include/llvm-libc-types/struct_timespec.h +++ b/libc/include/llvm-libc-types/struct_timespec.h @@ -9,6 +9,11 @@ #ifndef LLVM_LIBC_TYPES_STRUCT_TIMESPEC_H #define LLVM_LIBC_TYPES_STRUCT_TIMESPEC_H +#if defined(__APPLE__) +// Darwin provides its own definition for struct timespec. Include it directly +// to ensure type compatibility and avoid redefinition errors. +#include +#else #include "time_t.h" struct timespec { @@ -16,5 +21,6 @@ struct timespec { /* TODO: BIG_ENDIAN may require padding. */ long tv_nsec; /* Nanoseconds. */ }; +#endif // __APPLE__ #endif // LLVM_LIBC_TYPES_STRUCT_TIMESPEC_H diff --git a/libc/include/llvm-libc-types/struct_timeval.h b/libc/include/llvm-libc-types/struct_timeval.h index 9595d85a46c8f..41f0b4e92932e 100644 --- a/libc/include/llvm-libc-types/struct_timeval.h +++ b/libc/include/llvm-libc-types/struct_timeval.h @@ -12,9 +12,15 @@ #include "suseconds_t.h" #include "time_t.h" +#if defined(__APPLE__) +// Darwin provides its own definition for struct timeval. Include it directly +// to ensure type compatibility and avoid redefinition errors. +#include +#else struct timeval { time_t tv_sec; // Seconds suseconds_t tv_usec; // Micro seconds }; +#endif // __APPLE__ #endif // LLVM_LIBC_TYPES_STRUCT_TIMEVAL_H diff --git a/libc/include/llvm-libc-types/suseconds_t.h b/libc/include/llvm-libc-types/suseconds_t.h index 8e926e8401f5c..acc1822cb59e1 100644 --- a/libc/include/llvm-libc-types/suseconds_t.h +++ b/libc/include/llvm-libc-types/suseconds_t.h @@ -14,6 +14,12 @@ // types...] and suseconds_t are no greater than the width of type long. // The kernel expects 64 bit suseconds_t at least on x86_64. +#if defined(__APPLE__) +// Darwin provides its own definition for suseconds_t. Include it directly +// to ensure type compatibility and avoid redefinition errors. +#include +#else typedef long suseconds_t; +#endif // __APPLE__ #endif // LLVM_LIBC_TYPES_SUSECONDS_T_H diff --git a/libc/include/llvm-libc-types/time_t_32.h b/libc/include/llvm-libc-types/time_t_32.h index 2c415f6fa9dca..8d7a81e5ce7f7 100644 --- a/libc/include/llvm-libc-types/time_t_32.h +++ b/libc/include/llvm-libc-types/time_t_32.h @@ -9,6 +9,12 @@ #ifndef LLVM_LIBC_TYPES_TIME_T_32_H #define LLVM_LIBC_TYPES_TIME_T_32_H +#if defined(__APPLE__) +// Darwin provides its own definition for time_t. Include it directly +// to ensure type compatibility and avoid redefinition errors. +#include +#else typedef __INT32_TYPE__ time_t; +#endif // __APPLE__ #endif // LLVM_LIBC_TYPES_TIME_T_32_H diff --git a/libc/include/llvm-libc-types/time_t_64.h b/libc/include/llvm-libc-types/time_t_64.h index 8f7fd3233646e..c8267abe31289 100644 --- a/libc/include/llvm-libc-types/time_t_64.h +++ b/libc/include/llvm-libc-types/time_t_64.h @@ -9,6 +9,12 @@ #ifndef LLVM_LIBC_TYPES_TIME_T_64_H #define LLVM_LIBC_TYPES_TIME_T_64_H +#if defined(__APPLE__) +// Darwin provides its own definition for time_t. Include it directly +// to ensure type compatibility and avoid redefinition errors. +#include +#else typedef __INT64_TYPE__ time_t; +#endif // __APPLE__ #endif // LLVM_LIBC_TYPES_TIME_T_64_H diff --git a/libc/include/sys/syscall.h.def b/libc/include/sys/syscall.h.def index 60e5024e500e3..f7e53cc4942d5 100644 --- a/libc/include/sys/syscall.h.def +++ b/libc/include/sys/syscall.h.def @@ -9,7 +9,7 @@ #ifndef LLVM_LIBC_SYS_SYSCALL_H #define LLVM_LIBC_SYS_SYSCALL_H -//TODO: Handle non-linux syscalls +#if defined(__linux__) #include @@ -2361,5 +2361,6 @@ #define SYS_writev __NR_writev #endif +#endif // __linux__ #endif // LLVM_LIBC_SYS_SYSCALL_H diff --git a/libc/src/__support/OSUtil/darwin/CMakeLists.txt b/libc/src/__support/OSUtil/darwin/CMakeLists.txt index 4241bb37684f7..9e69bf7d0cbab 100644 --- a/libc/src/__support/OSUtil/darwin/CMakeLists.txt +++ b/libc/src/__support/OSUtil/darwin/CMakeLists.txt @@ -4,13 +4,16 @@ endif() add_subdirectory(${LIBC_TARGET_ARCHITECTURE}) -add_header_library( +add_object_library( darwin_util + SRCS + exit.cpp HDRS io.h syscall.h DEPENDS - .${LIBC_TARGET_ARCHITECTURE}.darwin_util + .${LIBC_TARGET_ARCHITECTURE}.darwin_${LIBC_TARGET_ARCHITECTURE}_util libc.src.__support.common libc.src.__support.CPP.string_view + libc.include.sys_syscall ) diff --git a/libc/src/__support/OSUtil/darwin/aarch64/CMakeLists.txt b/libc/src/__support/OSUtil/darwin/aarch64/CMakeLists.txt index 5ab95b01758c8..b36fe22017f34 100644 --- a/libc/src/__support/OSUtil/darwin/aarch64/CMakeLists.txt +++ b/libc/src/__support/OSUtil/darwin/aarch64/CMakeLists.txt @@ -1,5 +1,5 @@ add_header_library( - darwin_util + darwin_aarch64_util HDRS syscall.h DEPENDS diff --git a/libc/src/__support/OSUtil/darwin/exit.cpp b/libc/src/__support/OSUtil/darwin/exit.cpp new file mode 100644 index 0000000000000..a5fa4a7522189 --- /dev/null +++ b/libc/src/__support/OSUtil/darwin/exit.cpp @@ -0,0 +1,23 @@ +//===------------ MacOS implementation of an exit function ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/__support/OSUtil/darwin/syscall.h" // syscall_impl +#include "src/__support/common.h" +#include "src/__support/macros/config.h" +#include "sys/syscall.h" // For syscall numbers. + +namespace LIBC_NAMESPACE_DECL { +namespace internal { + +[[noreturn]] void exit(int status) { + for (;;) + LIBC_NAMESPACE::syscall_impl(SYS_exit, status); +} + +} // namespace internal +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/__support/time/darwin/CMakeLists.txt b/libc/src/__support/time/darwin/CMakeLists.txt new file mode 100644 index 0000000000000..a06a41289a41c --- /dev/null +++ b/libc/src/__support/time/darwin/CMakeLists.txt @@ -0,0 +1,12 @@ +add_object_library( + clock_gettime + SRCS + clock_gettime.cpp + HDRS + ../clock_gettime.h + DEPENDS + libc.src.__support.common + libc.src.__support.error_or + libc.hdr.types.struct_timeval + libc.hdr.types.struct_timespec +) diff --git a/libc/src/__support/time/darwin/clock_gettime.cpp b/libc/src/__support/time/darwin/clock_gettime.cpp new file mode 100644 index 0000000000000..aa483aa9a01d2 --- /dev/null +++ b/libc/src/__support/time/darwin/clock_gettime.cpp @@ -0,0 +1,42 @@ +//===-- Darwin implementation of internal clock_gettime -------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/__support/time/clock_gettime.h" +#include "hdr/errno_macros.h" // For EINVAL +#include "hdr/time_macros.h" +#include "hdr/types/struct_timespec.h" +#include "hdr/types/struct_timeval.h" +#include "src/__support/OSUtil/syscall.h" // For syscall_impl +#include "src/__support/common.h" +#include "src/__support/error_or.h" +#include // For SYS_gettimeofday +#include // For struct timezone + +namespace LIBC_NAMESPACE_DECL { +namespace internal { + +ErrorOr clock_gettime(clockid_t clockid, struct timespec *ts) { + if (clockid != CLOCK_REALTIME) + return Error(EINVAL); + struct timeval tv; + // The second argument to gettimeofday is a timezone pointer + // The third argument is mach_absolute_time + // Both of these, we don't need here, so they are 0 + long ret = LIBC_NAMESPACE::syscall_impl( + SYS_gettimeofday, reinterpret_cast(&tv), 0, 0); + if (ret != 0) + // The syscall returns -1 on error and sets errno. + return Error(EINVAL); + + ts->tv_sec = tv.tv_sec; + ts->tv_nsec = tv.tv_usec * 1000; + return 0; +} + +} // namespace internal +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/time/darwin/CMakeLists.txt b/libc/src/time/darwin/CMakeLists.txt new file mode 100644 index 0000000000000..6d68086c72584 --- /dev/null +++ b/libc/src/time/darwin/CMakeLists.txt @@ -0,0 +1,10 @@ +add_entrypoint_object( + clock_gettime + SRCS + clock_gettime.cpp + HDRS + # The public header is part of the parent directory's library. + DEPENDS + libc.src.__support.time.clock_gettime + libc.src.errno.errno +) diff --git a/libc/src/time/darwin/clock_gettime.cpp b/libc/src/time/darwin/clock_gettime.cpp new file mode 100644 index 0000000000000..ecf116bbc5521 --- /dev/null +++ b/libc/src/time/darwin/clock_gettime.cpp @@ -0,0 +1,28 @@ +//===---- Darwin implementation of the POSIX clock_gettime function --===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===-----------------------------------------------------------------===// + +#include "src/time/clock_gettime.h" + +#include "src/__support/common.h" +#include "src/__support/libc_errno.h" +#include "src/__support/macros/config.h" +#include "src/__support/time/clock_gettime.h" + +namespace LIBC_NAMESPACE_DECL { + +LLVM_LIBC_FUNCTION(int, clock_gettime, + (clockid_t clockid, struct timespec *ts)) { + auto result = internal::clock_gettime(clockid, ts); + if (!result.has_value()) { + libc_errno = result.error(); + return -1; + } + return 0; +} + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/test/UnitTest/ExecuteFunctionUnix.cpp b/libc/test/UnitTest/ExecuteFunctionUnix.cpp index 7c2eb7c6e887c..ab18f7a2ebf52 100644 --- a/libc/test/UnitTest/ExecuteFunctionUnix.cpp +++ b/libc/test/UnitTest/ExecuteFunctionUnix.cpp @@ -57,7 +57,7 @@ ProcessStatus invoke_in_subprocess(FunctionCaller *func, int timeout_ms) { } ::close(pipe_fds[1]); - struct pollfd poll_fd{pipe_fds[0], POLLIN, 0}; + pollfd poll_fd{pipe_fds[0], POLLIN, 0}; // No events requested so this call will only return after the timeout or if // the pipes peer was closed, signaling the process exited. if (::poll(&poll_fd, 1, timeout_ms) == -1) { diff --git a/libc/test/src/__support/time/darwin/CMakeLists.txt b/libc/test/src/__support/time/darwin/CMakeLists.txt new file mode 100644 index 0000000000000..ee1247b354173 --- /dev/null +++ b/libc/test/src/__support/time/darwin/CMakeLists.txt @@ -0,0 +1,8 @@ +add_libc_test( + clock_gettime + SUITE libc-support-time-tests + SRCS clock_gettime.cpp + DEPENDS + libc.src.__support.CPP.expected + libc.src.__support.time.darwin.clock_gettime +) diff --git a/libc/test/src/__support/time/darwin/clock_gettime.cpp b/libc/test/src/__support/time/darwin/clock_gettime.cpp new file mode 100644 index 0000000000000..d593c5d02744a --- /dev/null +++ b/libc/test/src/__support/time/darwin/clock_gettime.cpp @@ -0,0 +1,20 @@ +//===-- unit tests for darwin's time utilities --------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/__support/time/clock_gettime.h" +#include "src/__support/CPP/expected.h" +#include "test/UnitTest/Test.h" + +template +using expected = LIBC_NAMESPACE::cpp::expected; + +TEST(LlvmLibcSupportDarwinClockGetTime, BasicGetTime) { + struct timespec ts; + auto result = LIBC_NAMESPACE::internal::clock_gettime(CLOCK_REALTIME, &ts); + ASSERT_TRUE(result.has_value()); +} diff --git a/libc/test/src/stdlib/CMakeLists.txt b/libc/test/src/stdlib/CMakeLists.txt index bcd3d139aa46c..05a74be4ca21f 100644 --- a/libc/test/src/stdlib/CMakeLists.txt +++ b/libc/test/src/stdlib/CMakeLists.txt @@ -448,6 +448,19 @@ if(LLVM_LIBC_FULL_BUILD) libc-stdlib-tests SRCS _Exit_test.cpp + DEPENDS + libc.src.__support.OSUtil.osutil + libc.src.stdlib._Exit + ) + + add_libc_test( + exit_test + # The EXPECT_EXITS test is only availible for unit tests. + UNIT_TEST_ONLY + SUITE + libc-stdlib-tests + SRCS + exit_test.cpp DEPENDS libc.src.stdlib._Exit libc.src.stdlib.exit diff --git a/libc/test/src/stdlib/_Exit_test.cpp b/libc/test/src/stdlib/_Exit_test.cpp index 333277dc01dca..57c432828c2f3 100644 --- a/libc/test/src/stdlib/_Exit_test.cpp +++ b/libc/test/src/stdlib/_Exit_test.cpp @@ -7,13 +7,9 @@ //===----------------------------------------------------------------------===// #include "src/stdlib/_Exit.h" -#include "src/stdlib/exit.h" #include "test/UnitTest/Test.h" TEST(LlvmLibcStdlib, _Exit) { EXPECT_EXITS([] { LIBC_NAMESPACE::_Exit(1); }, 1); EXPECT_EXITS([] { LIBC_NAMESPACE::_Exit(65); }, 65); - - EXPECT_EXITS([] { LIBC_NAMESPACE::exit(1); }, 1); - EXPECT_EXITS([] { LIBC_NAMESPACE::exit(65); }, 65); } diff --git a/libc/test/src/stdlib/exit_test.cpp b/libc/test/src/stdlib/exit_test.cpp new file mode 100644 index 0000000000000..5c82d8303036a --- /dev/null +++ b/libc/test/src/stdlib/exit_test.cpp @@ -0,0 +1,15 @@ +//===-- Unittests for exit -----------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/stdlib/exit.h" +#include "test/UnitTest/Test.h" + +TEST(LlvmLibcStdlib, exit) { + EXPECT_EXITS([] { LIBC_NAMESPACE::exit(1); }, 1); + EXPECT_EXITS([] { LIBC_NAMESPACE::exit(65); }, 65); +} diff --git a/libclc/clc/lib/generic/atomic/clc_atomic_dec.cl b/libclc/clc/lib/generic/atomic/clc_atomic_dec.cl index 7984dba5731ee..e0d24198d96d9 100644 --- a/libclc/clc/lib/generic/atomic/clc_atomic_dec.cl +++ b/libclc/clc/lib/generic/atomic/clc_atomic_dec.cl @@ -9,7 +9,7 @@ #include #define __CLC_FUNCTION __clc_atomic_dec -#define __CLC_IMPL_FUNCTION __scoped_atomic_fetch_add +#define __CLC_IMPL_FUNCTION __scoped_atomic_udec_wrap #define __CLC_INC_DEC #define __CLC_BODY diff --git a/libclc/clc/lib/generic/atomic/clc_atomic_def.inc b/libclc/clc/lib/generic/atomic/clc_atomic_def.inc index 75561430b33ad..6926b82248bf9 100644 --- a/libclc/clc/lib/generic/atomic/clc_atomic_def.inc +++ b/libclc/clc/lib/generic/atomic/clc_atomic_def.inc @@ -46,7 +46,7 @@ _CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE __CLC_FUNCTION( \ volatile ADDRSPACE __CLC_GENTYPE *Ptr, int MemoryOrder, \ int MemoryScope) { \ - return __CLC_IMPL_FUNCTION(Ptr, (__CLC_GENTYPE)1, MemoryOrder, \ + return __CLC_IMPL_FUNCTION(Ptr, (__CLC_U_GENTYPE)(-1), MemoryOrder, \ MemoryScope); \ } #elif defined(__CLC_RETURN_VOID) diff --git a/libclc/clc/lib/generic/atomic/clc_atomic_inc.cl b/libclc/clc/lib/generic/atomic/clc_atomic_inc.cl index 7171f84c30ce0..d594754677800 100644 --- a/libclc/clc/lib/generic/atomic/clc_atomic_inc.cl +++ b/libclc/clc/lib/generic/atomic/clc_atomic_inc.cl @@ -9,7 +9,7 @@ #include #define __CLC_FUNCTION __clc_atomic_inc -#define __CLC_IMPL_FUNCTION __scoped_atomic_fetch_sub +#define __CLC_IMPL_FUNCTION __scoped_atomic_uinc_wrap #define __CLC_INC_DEC #define __CLC_BODY diff --git a/libcxx/CMakeLists.txt b/libcxx/CMakeLists.txt index 1423b6713fd35..8b4cd2636fd4d 100644 --- a/libcxx/CMakeLists.txt +++ b/libcxx/CMakeLists.txt @@ -763,6 +763,18 @@ config_define(${LIBCXX_ENABLE_WIDE_CHARACTERS} _LIBCPP_HAS_WIDE_CHARACTERS) config_define(${LIBCXX_ENABLE_TIME_ZONE_DATABASE} _LIBCPP_HAS_TIME_ZONE_DATABASE) config_define(${LIBCXX_ENABLE_VENDOR_AVAILABILITY_ANNOTATIONS} _LIBCPP_HAS_VENDOR_AVAILABILITY_ANNOTATIONS) +# Set C library in use +if (RUNTIMES_USE_LIBC STREQUAL "picolibc") + config_define(1 _LIBCPP_LIBC_PICOLIBC) + # picolibc is derived from newlib and behaves the same in regards to libc++ + # so setting both here: + # * _LIBCPP_LIBC_NEWLIB is used now + # * _LIBCPP_LIBC_PICOLIBC can be used for further customizations later + config_define(1 _LIBCPP_LIBC_NEWLIB) +elseif (RUNTIMES_USE_LIBC STREQUAL "newlib") + config_define(1 _LIBCPP_LIBC_NEWLIB) +endif() + # TODO: Remove in LLVM 21. We're leaving an error to make this fail explicitly. if (LIBCXX_ENABLE_ASSERTIONS) message(FATAL_ERROR "LIBCXX_ENABLE_ASSERTIONS has been removed. Please use LIBCXX_HARDENING_MODE instead.") diff --git a/libcxx/include/__algorithm/copy_n.h b/libcxx/include/__algorithm/copy_n.h index f93f39203a7e3..56fb44811fc4a 100644 --- a/libcxx/include/__algorithm/copy_n.h +++ b/libcxx/include/__algorithm/copy_n.h @@ -10,31 +10,63 @@ #define _LIBCPP___ALGORITHM_COPY_N_H #include <__algorithm/copy.h> +#include <__algorithm/iterator_operations.h> #include <__config> #include <__iterator/iterator_traits.h> #include <__type_traits/enable_if.h> #include <__utility/convert_to_integral.h> +#include <__utility/move.h> +#include <__utility/pair.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header #endif +_LIBCPP_PUSH_MACROS +#include <__undef_macros> + _LIBCPP_BEGIN_NAMESPACE_STD +template ::value, int> = 0> +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InIter, _OutIter> +__copy_n(_InIter __first, typename _IterOps<_AlgPolicy>::template __difference_type<_InIter> __n, _OutIter __result) { + return std::__copy(__first, __first + __n, std::move(__result)); +} + +template ::value, int> = 0> +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InIter, _OutIter> +__copy_n(_InIter __first, typename _IterOps<_AlgPolicy>::template __difference_type<_InIter> __n, _OutIter __result) { + while (__n != 0) { + *__result = *__first; + ++__first; + ++__result; + --__n; + } + return std::make_pair(std::move(__first), std::move(__result)); +} + +// The InputIterator case is handled specially here because it's been written in a way to avoid incrementing __first +// if not absolutely required. This was done to allow its use with istream_iterator and we want to avoid breaking +// people, at least currently. +// See https://github.com/llvm/llvm-project/commit/99847d2bf132854fffa019bab19818768102ccad template ::value && - !__has_random_access_iterator_category<_InputIterator>::value, - int> = 0> -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _OutputIterator -copy_n(_InputIterator __first, _Size __orig_n, _OutputIterator __result) { - typedef decltype(std::__convert_to_integral(__orig_n)) _IntegralSize; - _IntegralSize __n = __orig_n; - if (__n > 0) { + __enable_if_t<__has_exactly_input_iterator_category<_InputIterator>::value, int> = 0> +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _OutputIterator +copy_n(_InputIterator __first, _Size __n, _OutputIterator __result) { + using _IntegralSize = decltype(std::__convert_to_integral(__n)); + _IntegralSize __converted = __n; + if (__converted > 0) { *__result = *__first; ++__result; - for (--__n; __n > 0; --__n) { + for (--__converted; __converted > 0; --__converted) { ++__first; *__result = *__first; ++__result; @@ -46,15 +78,17 @@ copy_n(_InputIterator __first, _Size __orig_n, _OutputIterator __result) { template ::value, int> = 0> -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _OutputIterator -copy_n(_InputIterator __first, _Size __orig_n, _OutputIterator __result) { - typedef typename iterator_traits<_InputIterator>::difference_type difference_type; - typedef decltype(std::__convert_to_integral(__orig_n)) _IntegralSize; - _IntegralSize __n = __orig_n; - return std::copy(__first, __first + difference_type(__n), __result); + __enable_if_t::value, int> = 0> +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _OutputIterator +copy_n(_InputIterator __first, _Size __n, _OutputIterator __result) { + using _IntegralSize = decltype(std::__convert_to_integral(__n)); + _IntegralSize __converted = __n; + return std::__copy_n<_ClassicAlgPolicy>(__first, __iterator_difference_type<_InputIterator>(__converted), __result) + .second; } _LIBCPP_END_NAMESPACE_STD +_LIBCPP_POP_MACROS + #endif // _LIBCPP___ALGORITHM_COPY_N_H diff --git a/libcxx/include/__algorithm/find.h b/libcxx/include/__algorithm/find.h index 10379d7074c3a..d03421b846cce 100644 --- a/libcxx/include/__algorithm/find.h +++ b/libcxx/include/__algorithm/find.h @@ -230,7 +230,8 @@ struct __find_segment { template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _InputIterator operator()(_InputIterator __first, _InputIterator __last, _Proj& __proj) const { - return std::__find(__first, __last, __value_, __proj); + return std::__rewrap_iter( + __first, std::__find(std::__unwrap_iter(__first), std::__unwrap_iter(__last), __value_, __proj)); } }; diff --git a/libcxx/include/__algorithm/iterator_operations.h b/libcxx/include/__algorithm/iterator_operations.h index e5c89c1e67e3a..1aa2f8d1604f1 100644 --- a/libcxx/include/__algorithm/iterator_operations.h +++ b/libcxx/include/__algorithm/iterator_operations.h @@ -219,6 +219,9 @@ struct _IterOps<_ClassicAlgPolicy> { template using __policy_iter_diff_t _LIBCPP_NODEBUG = typename _IterOps<_AlgPolicy>::template __difference_type<_Iter>; +template +using __policy_value_type _LIBCPP_NODEBUG = typename _IterOps<_AlgPolicy>::template __value_type<_Iter>; + _LIBCPP_END_NAMESPACE_STD _LIBCPP_POP_MACROS diff --git a/libcxx/include/__algorithm/ranges_copy_n.h b/libcxx/include/__algorithm/ranges_copy_n.h index 1fbc61674e2dd..6bee4c3e7c9e5 100644 --- a/libcxx/include/__algorithm/ranges_copy_n.h +++ b/libcxx/include/__algorithm/ranges_copy_n.h @@ -9,16 +9,12 @@ #ifndef _LIBCPP___ALGORITHM_RANGES_COPY_N_H #define _LIBCPP___ALGORITHM_RANGES_COPY_N_H -#include <__algorithm/copy.h> +#include <__algorithm/copy_n.h> #include <__algorithm/in_out_result.h> #include <__algorithm/iterator_operations.h> -#include <__algorithm/ranges_copy.h> #include <__config> -#include <__functional/identity.h> #include <__iterator/concepts.h> #include <__iterator/incrementable_traits.h> -#include <__iterator/unreachable_sentinel.h> -#include <__iterator/wrap_iter.h> #include <__utility/move.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -37,32 +33,13 @@ namespace ranges { template using copy_n_result = in_out_result<_Ip, _Op>; -// TODO: Merge this with copy_n struct __copy_n { - template - _LIBCPP_HIDE_FROM_ABI constexpr static copy_n_result<_InIter, _OutIter> - __go(_InIter __first, _DiffType __n, _OutIter __result) { - while (__n != 0) { - *__result = *__first; - ++__first; - ++__result; - --__n; - } - return {std::move(__first), std::move(__result)}; - } - - template - _LIBCPP_HIDE_FROM_ABI constexpr static copy_n_result<_InIter, _OutIter> - __go(_InIter __first, _DiffType __n, _OutIter __result) { - auto __ret = std::__copy(__first, __first + __n, __result); - return {__ret.first, __ret.second}; - } - template requires indirectly_copyable<_Ip, _Op> _LIBCPP_HIDE_FROM_ABI constexpr copy_n_result<_Ip, _Op> operator()(_Ip __first, iter_difference_t<_Ip> __n, _Op __result) const { - return __go(std::move(__first), __n, std::move(__result)); + auto __res = std::__copy_n<_RangeAlgPolicy>(std::move(__first), __n, std::move(__result)); + return {std::move(__res.first), std::move(__res.second)}; } }; diff --git a/libcxx/include/__compare/is_eq.h b/libcxx/include/__compare/is_eq.h index 9a82df1ebe88b..ee4d11bc7c792 100644 --- a/libcxx/include/__compare/is_eq.h +++ b/libcxx/include/__compare/is_eq.h @@ -20,12 +20,12 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER >= 20 -_LIBCPP_HIDE_FROM_ABI inline constexpr bool is_eq(partial_ordering __c) noexcept { return __c == 0; } -_LIBCPP_HIDE_FROM_ABI inline constexpr bool is_neq(partial_ordering __c) noexcept { return __c != 0; } -_LIBCPP_HIDE_FROM_ABI inline constexpr bool is_lt(partial_ordering __c) noexcept { return __c < 0; } -_LIBCPP_HIDE_FROM_ABI inline constexpr bool is_lteq(partial_ordering __c) noexcept { return __c <= 0; } -_LIBCPP_HIDE_FROM_ABI inline constexpr bool is_gt(partial_ordering __c) noexcept { return __c > 0; } -_LIBCPP_HIDE_FROM_ABI inline constexpr bool is_gteq(partial_ordering __c) noexcept { return __c >= 0; } +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline constexpr bool is_eq(partial_ordering __c) noexcept { return __c == 0; } +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline constexpr bool is_neq(partial_ordering __c) noexcept { return __c != 0; } +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline constexpr bool is_lt(partial_ordering __c) noexcept { return __c < 0; } +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline constexpr bool is_lteq(partial_ordering __c) noexcept { return __c <= 0; } +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline constexpr bool is_gt(partial_ordering __c) noexcept { return __c > 0; } +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline constexpr bool is_gteq(partial_ordering __c) noexcept { return __c >= 0; } #endif // _LIBCPP_STD_VER >= 20 diff --git a/libcxx/include/__condition_variable/condition_variable.h b/libcxx/include/__condition_variable/condition_variable.h index 1e8edd5dcb009..b7151930e9226 100644 --- a/libcxx/include/__condition_variable/condition_variable.h +++ b/libcxx/include/__condition_variable/condition_variable.h @@ -170,7 +170,7 @@ class _LIBCPP_EXPORTED_FROM_ABI condition_variable { wait_for(unique_lock& __lk, const chrono::duration<_Rep, _Period>& __d, _Predicate __pred); typedef __libcpp_condvar_t* native_handle_type; - _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() { return &__cv_; } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() { return &__cv_; } private: void diff --git a/libcxx/include/__config_site.in b/libcxx/include/__config_site.in index 6dcca1849a96c..b09ca807ee812 100644 --- a/libcxx/include/__config_site.in +++ b/libcxx/include/__config_site.in @@ -42,6 +42,10 @@ #cmakedefine _LIBCPP_HARDENING_MODE_DEFAULT @_LIBCPP_HARDENING_MODE_DEFAULT@ #cmakedefine _LIBCPP_ASSERTION_SEMANTIC_DEFAULT @_LIBCPP_ASSERTION_SEMANTIC_DEFAULT@ +// C libraries +#cmakedefine01 _LIBCPP_LIBC_PICOLIBC +#cmakedefine01 _LIBCPP_LIBC_NEWLIB + // __USE_MINGW_ANSI_STDIO gets redefined on MinGW #ifdef __clang__ # pragma clang diagnostic push diff --git a/libcxx/include/__configuration/platform.h b/libcxx/include/__configuration/platform.h index 88bba5473c608..644fe1724e42e 100644 --- a/libcxx/include/__configuration/platform.h +++ b/libcxx/include/__configuration/platform.h @@ -42,13 +42,6 @@ # define _LIBCPP_GLIBC_PREREQ(a, b) 0 #endif -// This is required in order for _NEWLIB_VERSION to be defined in places where we use it. -// TODO: We shouldn't be including arbitrarily-named headers from libc++ since this can break valid -// user code. Move code paths that need _NEWLIB_VERSION to another customization mechanism. -#if __has_include() -# include -#endif - #ifndef __BYTE_ORDER__ # error \ "Your compiler doesn't seem to define __BYTE_ORDER__, which is required by libc++ to know the endianness of your target platform" diff --git a/libcxx/include/__coroutine/coroutine_handle.h b/libcxx/include/__coroutine/coroutine_handle.h index b7add258510eb..b26a650748832 100644 --- a/libcxx/include/__coroutine/coroutine_handle.h +++ b/libcxx/include/__coroutine/coroutine_handle.h @@ -44,9 +44,9 @@ struct coroutine_handle { } // [coroutine.handle.export.import], export/import - _LIBCPP_HIDE_FROM_ABI constexpr void* address() const noexcept { return __handle_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr void* address() const noexcept { return __handle_; } - _LIBCPP_HIDE_FROM_ABI static constexpr coroutine_handle from_address(void* __addr) noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr coroutine_handle from_address(void* __addr) noexcept { coroutine_handle __tmp; __tmp.__handle_ = __addr; return __tmp; @@ -55,7 +55,7 @@ struct coroutine_handle { // [coroutine.handle.observers], observers _LIBCPP_HIDE_FROM_ABI constexpr explicit operator bool() const noexcept { return __handle_ != nullptr; } - _LIBCPP_HIDE_FROM_ABI bool done() const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool done() const { _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(__is_suspended(), "done() can be called only on suspended coroutines"); return __builtin_coro_done(__handle_); } @@ -100,7 +100,7 @@ struct coroutine_handle { _LIBCPP_HIDE_FROM_ABI constexpr coroutine_handle(nullptr_t) noexcept {} - _LIBCPP_HIDE_FROM_ABI static coroutine_handle from_promise(_Promise& __promise) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static coroutine_handle from_promise(_Promise& __promise) { using _RawPromise = __remove_cv_t<_Promise>; coroutine_handle __tmp; __tmp.__handle_ = @@ -114,9 +114,9 @@ struct coroutine_handle { } // [coroutine.handle.export.import], export/import - _LIBCPP_HIDE_FROM_ABI constexpr void* address() const noexcept { return __handle_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr void* address() const noexcept { return __handle_; } - _LIBCPP_HIDE_FROM_ABI static constexpr coroutine_handle from_address(void* __addr) noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr coroutine_handle from_address(void* __addr) noexcept { coroutine_handle __tmp; __tmp.__handle_ = __addr; return __tmp; @@ -130,7 +130,7 @@ struct coroutine_handle { // [coroutine.handle.observers], observers _LIBCPP_HIDE_FROM_ABI constexpr explicit operator bool() const noexcept { return __handle_ != nullptr; } - _LIBCPP_HIDE_FROM_ABI bool done() const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool done() const { _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(__is_suspended(), "done() can be called only on suspended coroutines"); return __builtin_coro_done(__handle_); } @@ -150,7 +150,7 @@ struct coroutine_handle { } // [coroutine.handle.promise], promise access - _LIBCPP_HIDE_FROM_ABI _Promise& promise() const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _Promise& promise() const { return *static_cast<_Promise*>(__builtin_coro_promise(this->__handle_, alignof(_Promise), false)); } @@ -165,7 +165,7 @@ struct coroutine_handle { // [coroutine.handle.hash] template struct hash> { - _LIBCPP_HIDE_FROM_ABI size_t operator()(const coroutine_handle<_Tp>& __v) const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI size_t operator()(const coroutine_handle<_Tp>& __v) const noexcept { return hash()(__v.address()); } }; diff --git a/libcxx/include/__coroutine/noop_coroutine_handle.h b/libcxx/include/__coroutine/noop_coroutine_handle.h index 692398a8a8431..b9c54d3b42bef 100644 --- a/libcxx/include/__coroutine/noop_coroutine_handle.h +++ b/libcxx/include/__coroutine/noop_coroutine_handle.h @@ -35,7 +35,7 @@ struct coroutine_handle { // [coroutine.handle.noop.observers], observers _LIBCPP_HIDE_FROM_ABI constexpr explicit operator bool() const noexcept { return true; } - _LIBCPP_HIDE_FROM_ABI constexpr bool done() const noexcept { return false; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool done() const noexcept { return false; } // [coroutine.handle.noop.resumption], resumption _LIBCPP_HIDE_FROM_ABI constexpr void operator()() const noexcept {} @@ -43,13 +43,13 @@ struct coroutine_handle { _LIBCPP_HIDE_FROM_ABI constexpr void destroy() const noexcept {} // [coroutine.handle.noop.promise], promise access - _LIBCPP_HIDE_FROM_ABI noop_coroutine_promise& promise() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI noop_coroutine_promise& promise() const noexcept { return *static_cast( __builtin_coro_promise(this->__handle_, alignof(noop_coroutine_promise), false)); } // [coroutine.handle.noop.address], address - _LIBCPP_HIDE_FROM_ABI constexpr void* address() const noexcept { return __handle_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr void* address() const noexcept { return __handle_; } private: _LIBCPP_HIDE_FROM_ABI friend coroutine_handle noop_coroutine() noexcept; @@ -86,7 +86,9 @@ inline noop_coroutine_handle::__noop_coroutine_frame_ty_ noop_coroutine_handle:: # endif // [coroutine.noop.coroutine] -inline _LIBCPP_HIDE_FROM_ABI noop_coroutine_handle noop_coroutine() noexcept { return noop_coroutine_handle(); } +[[nodiscard]] inline _LIBCPP_HIDE_FROM_ABI noop_coroutine_handle noop_coroutine() noexcept { + return noop_coroutine_handle(); +} _LIBCPP_END_NAMESPACE_STD diff --git a/libcxx/include/__cxx03/__fwd/ios.h b/libcxx/include/__cxx03/__fwd/ios.h index dc03e8c6bab2f..3b33b25bfc1f9 100644 --- a/libcxx/include/__cxx03/__fwd/ios.h +++ b/libcxx/include/__cxx03/__fwd/ios.h @@ -31,7 +31,7 @@ using wios = basic_ios; template class _LIBCPP_PREFERRED_NAME(ios) _LIBCPP_IF_WIDE_CHARACTERS(_LIBCPP_PREFERRED_NAME(wios)) basic_ios; -#if defined(_NEWLIB_VERSION) +#if _LIBCPP_LIBC_NEWLIB // On newlib, off_t is 'long int' using streamoff = long int; // for char_traits in #else diff --git a/libcxx/include/__cxx03/__locale b/libcxx/include/__cxx03/__locale index 70dd1e65cfba9..e9cbc1f2d7683 100644 --- a/libcxx/include/__cxx03/__locale +++ b/libcxx/include/__cxx03/__locale @@ -384,7 +384,7 @@ public: static const mask xdigit = _ISXDIGIT; static const mask blank = _ISBLANK; static const mask __regex_word = 0x8000; -#elif defined(_NEWLIB_VERSION) +#elif _LIBCPP_LIBC_NEWLIB // Same type as Newlib's _ctype_ array in newlib/libc/include/ctype.h. typedef char mask; // In case char is signed, static_cast is needed to avoid warning on diff --git a/libcxx/include/__cxx03/__locale_dir/locale_base_api.h b/libcxx/include/__cxx03/__locale_dir/locale_base_api.h index a20f0952f52c3..3dbce825bc76c 100644 --- a/libcxx/include/__cxx03/__locale_dir/locale_base_api.h +++ b/libcxx/include/__cxx03/__locale_dir/locale_base_api.h @@ -17,7 +17,7 @@ # include <__cxx03/__locale_dir/locale_base_api/android.h> #elif defined(__sun__) # include <__cxx03/__locale_dir/locale_base_api/solaris.h> -#elif defined(_NEWLIB_VERSION) +#elif _LIBCPP_LIBC_NEWLIB # include <__cxx03/__locale_dir/locale_base_api/newlib.h> #elif defined(__OpenBSD__) # include <__cxx03/__locale_dir/locale_base_api/openbsd.h> diff --git a/libcxx/include/__cxx03/fstream b/libcxx/include/__cxx03/fstream index 65c2c3e975032..124619cafd3ee 100644 --- a/libcxx/include/__cxx03/fstream +++ b/libcxx/include/__cxx03/fstream @@ -210,7 +210,7 @@ typedef basic_fstream wfstream; _LIBCPP_PUSH_MACROS #include <__cxx03/__undef_macros> -#if defined(_LIBCPP_MSVCRT) || defined(_NEWLIB_VERSION) +#if defined(_LIBCPP_MSVCRT) || _LIBCPP_LIBC_NEWLIB # define _LIBCPP_HAS_NO_OFF_T_FUNCTIONS #endif diff --git a/libcxx/include/__cxx03/locale b/libcxx/include/__cxx03/locale index 79cd50e0e2419..4771539556ea3 100644 --- a/libcxx/include/__cxx03/locale +++ b/libcxx/include/__cxx03/locale @@ -220,7 +220,7 @@ template class messages_byname; # if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) // Most unix variants have catopen. These are the specific ones that don't. -# if !defined(__BIONIC__) && !defined(_NEWLIB_VERSION) && !defined(__EMSCRIPTEN__) +# if !defined(__BIONIC__) && !_LIBCPP_LIBC_NEWLIB && !defined(__EMSCRIPTEN__) # define _LIBCPP_HAS_CATOPEN 1 # include # endif diff --git a/libcxx/include/__cxx03/regex b/libcxx/include/__cxx03/regex index b6a78f27fbd37..bbd6eeee19ee9 100644 --- a/libcxx/include/__cxx03/regex +++ b/libcxx/include/__cxx03/regex @@ -984,7 +984,7 @@ public: typedef _CharT char_type; typedef basic_string string_type; typedef locale locale_type; -#if defined(__BIONIC__) || defined(_NEWLIB_VERSION) +#if defined(__BIONIC__) || _LIBCPP_LIBC_NEWLIB // Originally bionic's ctype_base used its own ctype masks because the // builtin ctype implementation wasn't in libc++ yet. Bionic's ctype mask // was only 8 bits wide and already saturated, so it used a wider type here @@ -993,9 +993,7 @@ public: // implementation, but this was not updated to match. Since then Android has // needed to maintain a stable libc++ ABI, and this can't be changed without // an ABI break. - // We also need this workaround for newlib since _NEWLIB_VERSION is not - // defined yet inside __config, so we can't set the - // _LIBCPP_PROVIDES_DEFAULT_RUNE_TABLE macro. Additionally, newlib is + // We also need this workaround for newlib since newlib is // often used for space constrained environments, so it makes sense not to // duplicate the ctype table. typedef uint16_t char_class_type; diff --git a/libcxx/include/__flat_map/flat_map.h b/libcxx/include/__flat_map/flat_map.h index 159e652e1a326..84b60cdc9ae27 100644 --- a/libcxx/include/__flat_map/flat_map.h +++ b/libcxx/include/__flat_map/flat_map.h @@ -409,41 +409,45 @@ class flat_map { } // iterators - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator begin() noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator begin() noexcept { return iterator(__containers_.keys.begin(), __containers_.values.begin()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator begin() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator begin() const noexcept { return const_iterator(__containers_.keys.begin(), __containers_.values.begin()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator end() noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator end() noexcept { return iterator(__containers_.keys.end(), __containers_.values.end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator end() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator end() const noexcept { return const_iterator(__containers_.keys.end(), __containers_.values.end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 reverse_iterator rbegin() noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 reverse_iterator rbegin() noexcept { return reverse_iterator(end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator rbegin() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator rbegin() const noexcept { return const_reverse_iterator(end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 reverse_iterator rend() noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 reverse_iterator rend() noexcept { return reverse_iterator(begin()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator rend() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator rend() const noexcept { return const_reverse_iterator(begin()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator cbegin() const noexcept { return begin(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator cend() const noexcept { return end(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator crbegin() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator cbegin() const noexcept { + return begin(); + } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator cend() const noexcept { + return end(); + } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator crbegin() const noexcept { return const_reverse_iterator(end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator crend() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator crend() const noexcept { return const_reverse_iterator(begin()); } @@ -452,22 +456,22 @@ class flat_map { return __containers_.keys.empty(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type size() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type size() const noexcept { return __containers_.keys.size(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type max_size() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type max_size() const noexcept { return std::min(__containers_.keys.max_size(), __containers_.values.max_size()); } // [flat.map.access], element access - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& operator[](const key_type& __x) + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& operator[](const key_type& __x) requires is_constructible_v { return try_emplace(__x).first->second; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& operator[](key_type&& __x) + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& operator[](key_type&& __x) requires is_constructible_v { return try_emplace(std::move(__x)).first->second; @@ -476,11 +480,11 @@ class flat_map { template requires(__is_compare_transparent && is_constructible_v && is_constructible_v && !is_convertible_v<_Kp &&, const_iterator> && !is_convertible_v<_Kp &&, iterator>) - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& operator[](_Kp&& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& operator[](_Kp&& __x) { return try_emplace(std::forward<_Kp>(__x)).first->second; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& at(const key_type& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& at(const key_type& __x) { auto __it = find(__x); if (__it == end()) { std::__throw_out_of_range("flat_map::at(const key_type&): Key does not exist"); @@ -488,7 +492,7 @@ class flat_map { return __it->second; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const mapped_type& at(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const mapped_type& at(const key_type& __x) const { auto __it = find(__x); if (__it == end()) { std::__throw_out_of_range("flat_map::at(const key_type&) const: Key does not exist"); @@ -498,7 +502,7 @@ class flat_map { template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& at(const _Kp& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& at(const _Kp& __x) { auto __it = find(__x); if (__it == end()) { std::__throw_out_of_range("flat_map::at(const K&): Key does not exist"); @@ -508,7 +512,7 @@ class flat_map { template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const mapped_type& at(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const mapped_type& at(const _Kp& __x) const { auto __it = find(__x); if (__it == end()) { std::__throw_out_of_range("flat_map::at(const K&) const: Key does not exist"); @@ -596,7 +600,7 @@ class flat_map { insert(sorted_unique, __il.begin(), __il.end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 containers extract() && { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 containers extract() && { auto __guard = std::__make_scope_guard([&]() noexcept { clear() /* noexcept */; }); auto __ret = std::move(__containers_); return __ret; @@ -753,116 +757,121 @@ class flat_map { } // observers - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 key_compare key_comp() const { return __compare_; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 value_compare value_comp() const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 key_compare key_comp() const { return __compare_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 value_compare value_comp() const { return value_compare(__compare_); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const key_container_type& keys() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const key_container_type& keys() const noexcept { return __containers_.keys; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const mapped_container_type& values() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const mapped_container_type& + values() const noexcept { return __containers_.values; } // map operations - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator find(const key_type& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator find(const key_type& __x) { return __find_impl(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator find(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator find(const key_type& __x) const { return __find_impl(*this, __x); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator find(const _Kp& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator find(const _Kp& __x) { return __find_impl(*this, __x); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator find(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator find(const _Kp& __x) const { return __find_impl(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type count(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type count(const key_type& __x) const { return contains(__x) ? 1 : 0; } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type count(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type count(const _Kp& __x) const { return contains(__x) ? 1 : 0; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool contains(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool contains(const key_type& __x) const { return find(__x) != end(); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool contains(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool contains(const _Kp& __x) const { return find(__x) != end(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator lower_bound(const key_type& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator lower_bound(const key_type& __x) { return __lower_bound(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator lower_bound(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator + lower_bound(const key_type& __x) const { return __lower_bound(*this, __x); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator lower_bound(const _Kp& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator lower_bound(const _Kp& __x) { return __lower_bound(*this, __x); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator lower_bound(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator lower_bound(const _Kp& __x) const { return __lower_bound(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator upper_bound(const key_type& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator upper_bound(const key_type& __x) { return __upper_bound(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator upper_bound(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator + upper_bound(const key_type& __x) const { return __upper_bound(*this, __x); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator upper_bound(const _Kp& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator upper_bound(const _Kp& __x) { return __upper_bound(*this, __x); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator upper_bound(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator upper_bound(const _Kp& __x) const { return __upper_bound(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair equal_range(const key_type& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair + equal_range(const key_type& __x) { return __equal_range_impl(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair equal_range(const key_type& __x) const { return __equal_range_impl(*this, __x); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair equal_range(const _Kp& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair + equal_range(const _Kp& __x) { return __equal_range_impl(*this, __x); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair equal_range(const _Kp& __x) const { return __equal_range_impl(*this, __x); } diff --git a/libcxx/include/__flat_map/utils.h b/libcxx/include/__flat_map/utils.h index 3a05c715660dc..4b07e388d0255 100644 --- a/libcxx/include/__flat_map/utils.h +++ b/libcxx/include/__flat_map/utils.h @@ -16,6 +16,7 @@ #include <__utility/exception_guard.h> #include <__utility/forward.h> #include <__utility/move.h> +#include <__vector/container_traits.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header diff --git a/libcxx/include/__flat_set/flat_set.h b/libcxx/include/__flat_set/flat_set.h index 0c8fdb5a803c8..1be38f10ea9f3 100644 --- a/libcxx/include/__flat_set/flat_set.h +++ b/libcxx/include/__flat_set/flat_set.h @@ -339,38 +339,42 @@ class flat_set { } // iterators - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator begin() noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator begin() noexcept { return iterator(std::as_const(__keys_).begin()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator begin() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator begin() const noexcept { return const_iterator(__keys_.begin()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator end() noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator end() noexcept { return iterator(std::as_const(__keys_).end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator end() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator end() const noexcept { return const_iterator(__keys_.end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 reverse_iterator rbegin() noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 reverse_iterator rbegin() noexcept { return reverse_iterator(end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator rbegin() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator rbegin() const noexcept { return const_reverse_iterator(end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 reverse_iterator rend() noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 reverse_iterator rend() noexcept { return reverse_iterator(begin()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator rend() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator rend() const noexcept { return const_reverse_iterator(begin()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator cbegin() const noexcept { return begin(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator cend() const noexcept { return end(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator crbegin() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator cbegin() const noexcept { + return begin(); + } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator cend() const noexcept { + return end(); + } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator crbegin() const noexcept { return const_reverse_iterator(end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator crend() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator crend() const noexcept { return const_reverse_iterator(begin()); } @@ -379,9 +383,13 @@ class flat_set { return __keys_.empty(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type size() const noexcept { return __keys_.size(); } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type size() const noexcept { + return __keys_.size(); + } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type max_size() const noexcept { return __keys_.max_size(); } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type max_size() const noexcept { + return __keys_.max_size(); + } // [flat.set.modifiers], modifiers template @@ -466,7 +474,7 @@ class flat_set { insert(sorted_unique, __il.begin(), __il.end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 container_type extract() && { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 container_type extract() && { auto __guard = std::__make_scope_guard([&]() noexcept { clear() /* noexcept */; }); auto __ret = std::move(__keys_); return __ret; @@ -528,111 +536,117 @@ class flat_set { _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void clear() noexcept { __keys_.clear(); } // observers - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 key_compare key_comp() const { return __compare_; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 value_compare value_comp() const { return __compare_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 key_compare key_comp() const { return __compare_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 value_compare value_comp() const { + return __compare_; + } // set operations - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator find(const key_type& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator find(const key_type& __x) { return __find_impl(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator find(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator find(const key_type& __x) const { return __find_impl(*this, __x); } template requires __is_transparent_v<_Compare> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator find(const _Kp& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator find(const _Kp& __x) { return __find_impl(*this, __x); } template requires __is_transparent_v<_Compare> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator find(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator find(const _Kp& __x) const { return __find_impl(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type count(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type count(const key_type& __x) const { return contains(__x) ? 1 : 0; } template requires __is_transparent_v<_Compare> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type count(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type count(const _Kp& __x) const { return contains(__x) ? 1 : 0; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool contains(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool contains(const key_type& __x) const { return find(__x) != end(); } template requires __is_transparent_v<_Compare> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool contains(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool contains(const _Kp& __x) const { return find(__x) != end(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator lower_bound(const key_type& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator lower_bound(const key_type& __x) { const auto& __keys = __keys_; return iterator(std::lower_bound(__keys.begin(), __keys.end(), __x, __compare_)); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator lower_bound(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator + lower_bound(const key_type& __x) const { return const_iterator(std::lower_bound(__keys_.begin(), __keys_.end(), __x, __compare_)); } template requires __is_transparent_v<_Compare> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator lower_bound(const _Kp& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator lower_bound(const _Kp& __x) { const auto& __keys = __keys_; return iterator(std::lower_bound(__keys.begin(), __keys.end(), __x, __compare_)); } template requires __is_transparent_v<_Compare> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator lower_bound(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator lower_bound(const _Kp& __x) const { return const_iterator(std::lower_bound(__keys_.begin(), __keys_.end(), __x, __compare_)); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator upper_bound(const key_type& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator upper_bound(const key_type& __x) { const auto& __keys = __keys_; return iterator(std::upper_bound(__keys.begin(), __keys.end(), __x, __compare_)); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator upper_bound(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator + upper_bound(const key_type& __x) const { return const_iterator(std::upper_bound(__keys_.begin(), __keys_.end(), __x, __compare_)); } template requires __is_transparent_v<_Compare> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator upper_bound(const _Kp& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator upper_bound(const _Kp& __x) { const auto& __keys = __keys_; return iterator(std::upper_bound(__keys.begin(), __keys.end(), __x, __compare_)); } template requires __is_transparent_v<_Compare> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator upper_bound(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator upper_bound(const _Kp& __x) const { return const_iterator(std::upper_bound(__keys_.begin(), __keys_.end(), __x, __compare_)); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair equal_range(const key_type& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair + equal_range(const key_type& __x) { return __equal_range_impl(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair equal_range(const key_type& __x) const { return __equal_range_impl(*this, __x); } template requires __is_transparent_v<_Compare> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair equal_range(const _Kp& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair + equal_range(const _Kp& __x) { return __equal_range_impl(*this, __x); } template requires __is_transparent_v<_Compare> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair equal_range(const _Kp& __x) const { return __equal_range_impl(*this, __x); } diff --git a/libcxx/include/__functional/weak_result_type.h b/libcxx/include/__functional/weak_result_type.h index aa462e4d5c56f..4232bdc69dd00 100644 --- a/libcxx/include/__functional/weak_result_type.h +++ b/libcxx/include/__functional/weak_result_type.h @@ -13,9 +13,9 @@ #include <__config> #include <__functional/binary_function.h> #include <__functional/unary_function.h> -#include <__type_traits/integral_constant.h> #include <__type_traits/invoke.h> #include <__type_traits/is_same.h> +#include <__type_traits/void_t.h> #include <__utility/declval.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -24,50 +24,36 @@ _LIBCPP_BEGIN_NAMESPACE_STD -template -struct __has_result_type { -private: - template - static false_type __test(...); - template - static true_type __test(typename _Up::result_type* = 0); +template +inline const bool __has_result_type_v = false; -public: - static const bool value = decltype(__test<_Tp>(0))::value; -}; +template +inline const bool __has_result_type_v<_Tp, __void_t > = true; // __weak_result_type template struct __derives_from_unary_function { private: - struct __two { - char __lx; - char __lxx; - }; - static __two __test(...); + static void __find_base(...); template - static __unary_function<_Ap, _Rp> __test(const volatile __unary_function<_Ap, _Rp>*); + static __unary_function<_Ap, _Rp> __find_base(const volatile __unary_function<_Ap, _Rp>*); public: - static const bool value = !is_same::value; - typedef decltype(__test((_Tp*)0)) type; + using type = decltype(__find_base(static_cast<_Tp*>(nullptr))); + static const bool value = !is_same::value; }; template struct __derives_from_binary_function { private: - struct __two { - char __lx; - char __lxx; - }; - static __two __test(...); + static void __find_base(...); template - static __binary_function<_A1, _A2, _Rp> __test(const volatile __binary_function<_A1, _A2, _Rp>*); + static __binary_function<_A1, _A2, _Rp> __find_base(const volatile __binary_function<_A1, _A2, _Rp>*); public: - static const bool value = !is_same::value; - typedef decltype(__test((_Tp*)0)) type; + using type = decltype(__find_base(static_cast<_Tp*>(nullptr))); + static const bool value = !is_same::value; }; template ::value> @@ -85,7 +71,7 @@ struct __maybe_derive_from_binary_function // bool is true template struct __maybe_derive_from_binary_function<_Tp, false> {}; -template ::value> +template > struct __weak_result_type_imp // bool is true : public __maybe_derive_from_unary_function<_Tp>, public __maybe_derive_from_binary_function<_Tp> { diff --git a/libcxx/include/__fwd/ios.h b/libcxx/include/__fwd/ios.h index 831624f4b1c57..fd6738a6b3ae4 100644 --- a/libcxx/include/__fwd/ios.h +++ b/libcxx/include/__fwd/ios.h @@ -31,7 +31,7 @@ using wios = basic_ios; template class _LIBCPP_PREFERRED_NAME(ios) _LIBCPP_IF_WIDE_CHARACTERS(_LIBCPP_PREFERRED_NAME(wios)) basic_ios; -#if defined(_NEWLIB_VERSION) +#if _LIBCPP_LIBC_NEWLIB // On newlib, off_t is 'long int' using streamoff = long int; // for char_traits in #else diff --git a/libcxx/include/__locale b/libcxx/include/__locale index 0948bd29b6f1b..c2602af5f4107 100644 --- a/libcxx/include/__locale +++ b/libcxx/include/__locale @@ -388,7 +388,7 @@ public: static const mask xdigit = _ISXDIGIT; static const mask blank = _ISBLANK; static const mask __regex_word = 0x8000; -# elif defined(_NEWLIB_VERSION) +# elif _LIBCPP_LIBC_NEWLIB // Same type as Newlib's _ctype_ array in newlib/libc/include/ctype.h. typedef char mask; // In case char is signed, static_cast is needed to avoid warning on diff --git a/libcxx/include/__locale_dir/messages.h b/libcxx/include/__locale_dir/messages.h index c04bf04025ff0..686f472840c22 100644 --- a/libcxx/include/__locale_dir/messages.h +++ b/libcxx/include/__locale_dir/messages.h @@ -22,7 +22,7 @@ # if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) // Most unix variants have catopen. These are the specific ones that don't. -# if !defined(__BIONIC__) && !defined(_NEWLIB_VERSION) && !defined(__EMSCRIPTEN__) +# if !defined(__BIONIC__) && !_LIBCPP_LIBC_NEWLIB && !defined(__EMSCRIPTEN__) # define _LIBCPP_HAS_CATOPEN 1 # include # else diff --git a/libcxx/include/__mdspan/extents.h b/libcxx/include/__mdspan/extents.h index 26219557dbae9..d16bbd2af44f1 100644 --- a/libcxx/include/__mdspan/extents.h +++ b/libcxx/include/__mdspan/extents.h @@ -299,11 +299,13 @@ class extents { public: // [mdspan.extents.obs], observers of multidimensional index space - _LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank() noexcept { return __rank_; } - _LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank_dynamic() noexcept { return __rank_dynamic_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank() noexcept { return __rank_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank_dynamic() noexcept { return __rank_dynamic_; } - _LIBCPP_HIDE_FROM_ABI constexpr index_type extent(rank_type __r) const noexcept { return __vals_.__value(__r); } - _LIBCPP_HIDE_FROM_ABI static constexpr size_t static_extent(rank_type __r) noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr index_type extent(rank_type __r) const noexcept { + return __vals_.__value(__r); + } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr size_t static_extent(rank_type __r) noexcept { return _Values::__static_value(__r); } diff --git a/libcxx/include/__mdspan/mdspan.h b/libcxx/include/__mdspan/mdspan.h index c0f27678197ce..9f3139a874ff9 100644 --- a/libcxx/include/__mdspan/mdspan.h +++ b/libcxx/include/__mdspan/mdspan.h @@ -87,12 +87,14 @@ class mdspan { using data_handle_type = typename accessor_type::data_handle_type; using reference = typename accessor_type::reference; - _LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank() noexcept { return extents_type::rank(); } - _LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank_dynamic() noexcept { return extents_type::rank_dynamic(); } - _LIBCPP_HIDE_FROM_ABI static constexpr size_t static_extent(rank_type __r) noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank() noexcept { return extents_type::rank(); } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank_dynamic() noexcept { + return extents_type::rank_dynamic(); + } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr size_t static_extent(rank_type __r) noexcept { return extents_type::static_extent(__r); } - _LIBCPP_HIDE_FROM_ABI constexpr index_type extent(rank_type __r) const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr index_type extent(rank_type __r) const noexcept { return __map_.extents().extent(__r); }; @@ -185,7 +187,7 @@ class mdspan { requires((is_convertible_v<_OtherIndexTypes, index_type> && ...) && (is_nothrow_constructible_v && ...) && (sizeof...(_OtherIndexTypes) == rank())) - _LIBCPP_HIDE_FROM_ABI constexpr reference operator[](_OtherIndexTypes... __indices) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr reference operator[](_OtherIndexTypes... __indices) const { // Note the standard layouts would also check this, but user provided ones may not, so we // check the precondition here _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(__mdspan_detail::__is_multidimensional_index_in(extents(), __indices...), @@ -196,7 +198,8 @@ class mdspan { template requires(is_convertible_v && is_nothrow_constructible_v) - _LIBCPP_HIDE_FROM_ABI constexpr reference operator[](const array< _OtherIndexType, rank()>& __indices) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr reference + operator[](const array< _OtherIndexType, rank()>& __indices) const { return __acc_.access(__ptr_, [&](index_sequence<_Idxs...>) { return __map_(__indices[_Idxs]...); }(make_index_sequence())); @@ -205,7 +208,7 @@ class mdspan { template requires(is_convertible_v && is_nothrow_constructible_v) - _LIBCPP_HIDE_FROM_ABI constexpr reference operator[](span<_OtherIndexType, rank()> __indices) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr reference operator[](span<_OtherIndexType, rank()> __indices) const { return __acc_.access(__ptr_, [&](index_sequence<_Idxs...>) { return __map_(__indices[_Idxs]...); }(make_index_sequence())); @@ -237,24 +240,28 @@ class mdspan { swap(__x.__acc_, __y.__acc_); } - _LIBCPP_HIDE_FROM_ABI constexpr const extents_type& extents() const noexcept { return __map_.extents(); }; - _LIBCPP_HIDE_FROM_ABI constexpr const data_handle_type& data_handle() const noexcept { return __ptr_; }; - _LIBCPP_HIDE_FROM_ABI constexpr const mapping_type& mapping() const noexcept { return __map_; }; - _LIBCPP_HIDE_FROM_ABI constexpr const accessor_type& accessor() const noexcept { return __acc_; }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr const extents_type& extents() const noexcept { + return __map_.extents(); + }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr const data_handle_type& data_handle() const noexcept { return __ptr_; }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr const mapping_type& mapping() const noexcept { return __map_; }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr const accessor_type& accessor() const noexcept { return __acc_; }; // per LWG-4021 "mdspan::is_always_meow() should be noexcept" - _LIBCPP_HIDE_FROM_ABI static constexpr bool is_always_unique() noexcept { return mapping_type::is_always_unique(); }; - _LIBCPP_HIDE_FROM_ABI static constexpr bool is_always_exhaustive() noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr bool is_always_unique() noexcept { + return mapping_type::is_always_unique(); + }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr bool is_always_exhaustive() noexcept { return mapping_type::is_always_exhaustive(); }; - _LIBCPP_HIDE_FROM_ABI static constexpr bool is_always_strided() noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr bool is_always_strided() noexcept { return mapping_type::is_always_strided(); }; - _LIBCPP_HIDE_FROM_ABI constexpr bool is_unique() const { return __map_.is_unique(); }; - _LIBCPP_HIDE_FROM_ABI constexpr bool is_exhaustive() const { return __map_.is_exhaustive(); }; - _LIBCPP_HIDE_FROM_ABI constexpr bool is_strided() const { return __map_.is_strided(); }; - _LIBCPP_HIDE_FROM_ABI constexpr index_type stride(rank_type __r) const { return __map_.stride(__r); }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool is_unique() const { return __map_.is_unique(); }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool is_exhaustive() const { return __map_.is_exhaustive(); }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool is_strided() const { return __map_.is_strided(); }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr index_type stride(rank_type __r) const { return __map_.stride(__r); }; private: _LIBCPP_NO_UNIQUE_ADDRESS data_handle_type __ptr_{}; diff --git a/libcxx/include/__mutex/mutex.h b/libcxx/include/__mutex/mutex.h index 68c8842b35eda..e9cedf8db1cca 100644 --- a/libcxx/include/__mutex/mutex.h +++ b/libcxx/include/__mutex/mutex.h @@ -37,11 +37,11 @@ class _LIBCPP_EXPORTED_FROM_ABI _LIBCPP_CAPABILITY("mutex") mutex { # endif _LIBCPP_ACQUIRE_CAPABILITY() void lock(); - _LIBCPP_TRY_ACQUIRE_CAPABILITY(true) bool try_lock() _NOEXCEPT; + [[__nodiscard__]] _LIBCPP_TRY_ACQUIRE_CAPABILITY(true) bool try_lock() _NOEXCEPT; _LIBCPP_RELEASE_CAPABILITY void unlock() _NOEXCEPT; typedef __libcpp_mutex_t* native_handle_type; - _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() { return &__m_; } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() { return &__m_; } }; static_assert(is_nothrow_default_constructible::value, "the default constructor for std::mutex must be nothrow"); diff --git a/libcxx/include/__thread/thread.h b/libcxx/include/__thread/thread.h index a3b672bc0f0e7..561f092ddb7c0 100644 --- a/libcxx/include/__thread/thread.h +++ b/libcxx/include/__thread/thread.h @@ -242,13 +242,13 @@ class _LIBCPP_EXPORTED_FROM_ABI thread { _LIBCPP_HIDE_FROM_ABI void swap(thread& __t) _NOEXCEPT { std::swap(__t_, __t.__t_); } - _LIBCPP_HIDE_FROM_ABI bool joinable() const _NOEXCEPT { return !__libcpp_thread_isnull(&__t_); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool joinable() const _NOEXCEPT { return !__libcpp_thread_isnull(&__t_); } void join(); void detach(); - _LIBCPP_HIDE_FROM_ABI id get_id() const _NOEXCEPT { return __libcpp_thread_get_id(&__t_); } - _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() _NOEXCEPT { return __t_; } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI id get_id() const _NOEXCEPT { return __libcpp_thread_get_id(&__t_); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() _NOEXCEPT { return __t_; } - static unsigned hardware_concurrency() _NOEXCEPT; + [[__nodiscard__]] static unsigned hardware_concurrency() _NOEXCEPT; }; inline _LIBCPP_HIDE_FROM_ABI void swap(thread& __x, thread& __y) _NOEXCEPT { __x.swap(__y); } diff --git a/libcxx/include/__utility/cmp.h b/libcxx/include/__utility/cmp.h index 68864e23e0397..7cfe640ceb423 100644 --- a/libcxx/include/__utility/cmp.h +++ b/libcxx/include/__utility/cmp.h @@ -31,7 +31,7 @@ concept __comparison_can_promote_to = sizeof(_Tp) < sizeof(_Ip) || (sizeof(_Tp) == sizeof(_Ip) && __signed_integer<_Tp>); template <__signed_or_unsigned_integer _Tp, __signed_or_unsigned_integer _Up> -_LIBCPP_HIDE_FROM_ABI constexpr bool cmp_equal(_Tp __t, _Up __u) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool cmp_equal(_Tp __t, _Up __u) noexcept { if constexpr (is_signed_v<_Tp> == is_signed_v<_Up>) return __t == __u; else if constexpr (__comparison_can_promote_to<_Tp, int> && __comparison_can_promote_to<_Up, int>) @@ -45,12 +45,12 @@ _LIBCPP_HIDE_FROM_ABI constexpr bool cmp_equal(_Tp __t, _Up __u) noexcept { } template <__signed_or_unsigned_integer _Tp, __signed_or_unsigned_integer _Up> -_LIBCPP_HIDE_FROM_ABI constexpr bool cmp_not_equal(_Tp __t, _Up __u) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool cmp_not_equal(_Tp __t, _Up __u) noexcept { return !std::cmp_equal(__t, __u); } template <__signed_or_unsigned_integer _Tp, __signed_or_unsigned_integer _Up> -_LIBCPP_HIDE_FROM_ABI constexpr bool cmp_less(_Tp __t, _Up __u) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool cmp_less(_Tp __t, _Up __u) noexcept { if constexpr (is_signed_v<_Tp> == is_signed_v<_Up>) return __t < __u; else if constexpr (__comparison_can_promote_to<_Tp, int> && __comparison_can_promote_to<_Up, int>) @@ -64,22 +64,22 @@ _LIBCPP_HIDE_FROM_ABI constexpr bool cmp_less(_Tp __t, _Up __u) noexcept { } template <__signed_or_unsigned_integer _Tp, __signed_or_unsigned_integer _Up> -_LIBCPP_HIDE_FROM_ABI constexpr bool cmp_greater(_Tp __t, _Up __u) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool cmp_greater(_Tp __t, _Up __u) noexcept { return std::cmp_less(__u, __t); } template <__signed_or_unsigned_integer _Tp, __signed_or_unsigned_integer _Up> -_LIBCPP_HIDE_FROM_ABI constexpr bool cmp_less_equal(_Tp __t, _Up __u) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool cmp_less_equal(_Tp __t, _Up __u) noexcept { return !std::cmp_greater(__t, __u); } template <__signed_or_unsigned_integer _Tp, __signed_or_unsigned_integer _Up> -_LIBCPP_HIDE_FROM_ABI constexpr bool cmp_greater_equal(_Tp __t, _Up __u) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool cmp_greater_equal(_Tp __t, _Up __u) noexcept { return !std::cmp_less(__t, __u); } template <__signed_or_unsigned_integer _Tp, __signed_or_unsigned_integer _Up> -_LIBCPP_HIDE_FROM_ABI constexpr bool in_range(_Up __u) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool in_range(_Up __u) noexcept { return std::cmp_less_equal(__u, numeric_limits<_Tp>::max()) && std::cmp_greater_equal(__u, numeric_limits<_Tp>::min()); } diff --git a/libcxx/include/__vector/vector.h b/libcxx/include/__vector/vector.h index 7051e044314ea..4961a5fcb2067 100644 --- a/libcxx/include/__vector/vector.h +++ b/libcxx/include/__vector/vector.h @@ -12,11 +12,11 @@ #include <__algorithm/copy.h> #include <__algorithm/copy_n.h> #include <__algorithm/fill_n.h> +#include <__algorithm/iterator_operations.h> #include <__algorithm/max.h> #include <__algorithm/min.h> #include <__algorithm/move.h> #include <__algorithm/move_backward.h> -#include <__algorithm/ranges_copy_n.h> #include <__algorithm/rotate.h> #include <__assert> #include <__config> @@ -314,7 +314,7 @@ class vector { is_constructible::reference>::value, int> = 0> _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI void assign(_ForwardIterator __first, _ForwardIterator __last) { - __assign_with_size(__first, __last, std::distance(__first, __last)); + __assign_with_size<_ClassicAlgPolicy>(__first, __last, std::distance(__first, __last)); } #if _LIBCPP_STD_VER >= 23 @@ -322,7 +322,7 @@ class vector { _LIBCPP_HIDE_FROM_ABI constexpr void assign_range(_Range&& __range) { if constexpr (ranges::forward_range<_Range> || ranges::sized_range<_Range>) { auto __n = static_cast(ranges::distance(__range)); - __assign_with_size(ranges::begin(__range), ranges::end(__range), __n); + __assign_with_size<_RangeAlgPolicy>(ranges::begin(__range), ranges::end(__range), __n); } else { __assign_with_sentinel(ranges::begin(__range), ranges::end(__range)); @@ -518,7 +518,7 @@ class vector { int> = 0> _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI iterator insert(const_iterator __position, _ForwardIterator __first, _ForwardIterator __last) { - return __insert_with_size(__position, __first, __last, std::distance(__first, __last)); + return __insert_with_size<_ClassicAlgPolicy>(__position, __first, __last, std::distance(__first, __last)); } #if _LIBCPP_STD_VER >= 23 @@ -526,7 +526,7 @@ class vector { _LIBCPP_HIDE_FROM_ABI constexpr iterator insert_range(const_iterator __position, _Range&& __range) { if constexpr (ranges::forward_range<_Range> || ranges::sized_range<_Range>) { auto __n = static_cast(ranges::distance(__range)); - return __insert_with_size(__position, ranges::begin(__range), ranges::end(__range), __n); + return __insert_with_size<_RangeAlgPolicy>(__position, ranges::begin(__range), ranges::end(__range), __n); } else { return __insert_with_sentinel(__position, ranges::begin(__range), ranges::end(__range)); @@ -619,12 +619,13 @@ class vector { // The `_Iterator` in `*_with_size` functions can be input-only only if called from `*_range` (since C++23). // Otherwise, `_Iterator` is a forward iterator. - template + template _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI void __assign_with_size(_Iterator __first, _Sentinel __last, difference_type __n); - template ())&&, value_type&&>::value, int> = 0> + template , value_type>::value, int> = 0> _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI void __insert_assign_n_unchecked(_Iterator __first, difference_type __n, pointer __position) { for (pointer __end_position = __position + __n; __position != __end_position; ++__position, (void)++__first) { @@ -633,25 +634,19 @@ class vector { } } - template ())&&, value_type&&>::value, int> = 0> + template , value_type>::value, int> = 0> _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI void __insert_assign_n_unchecked(_Iterator __first, difference_type __n, pointer __position) { -#if _LIBCPP_STD_VER >= 23 - if constexpr (!forward_iterator<_Iterator>) { // Handles input-only sized ranges for insert_range - ranges::copy_n(std::move(__first), __n, __position); - } else -#endif - { - std::copy_n(__first, __n, __position); - } + std::__copy_n<_AlgPolicy>(std::move(__first), __n, __position); } template _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI iterator __insert_with_sentinel(const_iterator __position, _InputIterator __first, _Sentinel __last); - template + template _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI iterator __insert_with_size(const_iterator __position, _Iterator __first, _Sentinel __last, difference_type __n); @@ -1039,20 +1034,14 @@ vector<_Tp, _Allocator>::__assign_with_sentinel(_Iterator __first, _Sentinel __l } template -template +template _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI void vector<_Tp, _Allocator>::__assign_with_size(_Iterator __first, _Sentinel __last, difference_type __n) { size_type __new_size = static_cast(__n); if (__new_size <= capacity()) { if (__new_size > size()) { -#if _LIBCPP_STD_VER >= 23 - auto __mid = ranges::copy_n(std::move(__first), size(), this->__begin_).in; + auto __mid = std::__copy_n<_AlgPolicy>(std::move(__first), size(), this->__begin_).first; __construct_at_end(std::move(__mid), std::move(__last), __new_size - size()); -#else - _Iterator __mid = std::next(__first, size()); - std::copy(__first, __mid, this->__begin_); - __construct_at_end(__mid, __last, __new_size - size()); -#endif } else { pointer __m = std::__copy(std::move(__first), __last, this->__begin_).second; this->__destruct_at_end(__m); @@ -1326,7 +1315,7 @@ vector<_Tp, _Allocator>::__insert_with_sentinel(const_iterator __position, _Inpu } template -template +template _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI typename vector<_Tp, _Allocator>::iterator vector<_Tp, _Allocator>::__insert_with_size( const_iterator __position, _Iterator __first, _Sentinel __last, difference_type __n) { @@ -1347,12 +1336,12 @@ vector<_Tp, _Allocator>::__insert_with_size( __construct_at_end(__m, __last, __n - __dx); if (__dx > 0) { __move_range(__p, __old_last, __p + __n); - __insert_assign_n_unchecked(__first, __dx, __p); + __insert_assign_n_unchecked<_AlgPolicy>(__first, __dx, __p); } } } else { __move_range(__p, __old_last, __p + __n); - __insert_assign_n_unchecked(std::move(__first), __n, __p); + __insert_assign_n_unchecked<_AlgPolicy>(std::move(__first), __n, __p); } } else { __split_buffer __v(__recommend(size() + __n), __p - this->__begin_, this->__alloc_); diff --git a/libcxx/include/barrier b/libcxx/include/barrier index 41fbfb3e8fb7b..5f9b471f01741 100644 --- a/libcxx/include/barrier +++ b/libcxx/include/barrier @@ -158,7 +158,9 @@ class barrier { public: using arrival_token = typename __barrier_base<_CompletionF>::arrival_token; - static _LIBCPP_HIDE_FROM_ABI constexpr ptrdiff_t max() noexcept { return __barrier_base<_CompletionF>::max(); } + [[nodiscard]] static _LIBCPP_HIDE_FROM_ABI constexpr ptrdiff_t max() noexcept { + return __barrier_base<_CompletionF>::max(); + } _LIBCPP_HIDE_FROM_ABI explicit barrier(ptrdiff_t __count, _CompletionF __completion = _CompletionF()) : __b_(__count, std::move(__completion)) { diff --git a/libcxx/include/deque b/libcxx/include/deque index 08bf8141eb782..ad2d759e1fcac 100644 --- a/libcxx/include/deque +++ b/libcxx/include/deque @@ -715,45 +715,53 @@ public: // iterators: - _LIBCPP_HIDE_FROM_ABI iterator begin() _NOEXCEPT { + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI iterator begin() _NOEXCEPT { __map_pointer __mp = __map_.begin() + __start_ / __block_size; return iterator(__mp, __map_.empty() ? 0 : *__mp + __start_ % __block_size); } - _LIBCPP_HIDE_FROM_ABI const_iterator begin() const _NOEXCEPT { + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI const_iterator begin() const _NOEXCEPT { __map_const_pointer __mp = static_cast<__map_const_pointer>(__map_.begin() + __start_ / __block_size); return const_iterator(__mp, __map_.empty() ? 0 : *__mp + __start_ % __block_size); } - _LIBCPP_HIDE_FROM_ABI iterator end() _NOEXCEPT { + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI iterator end() _NOEXCEPT { size_type __p = size() + __start_; __map_pointer __mp = __map_.begin() + __p / __block_size; return iterator(__mp, __map_.empty() ? 0 : *__mp + __p % __block_size); } - _LIBCPP_HIDE_FROM_ABI const_iterator end() const _NOEXCEPT { + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI const_iterator end() const _NOEXCEPT { size_type __p = size() + __start_; __map_const_pointer __mp = static_cast<__map_const_pointer>(__map_.begin() + __p / __block_size); return const_iterator(__mp, __map_.empty() ? 0 : *__mp + __p % __block_size); } - _LIBCPP_HIDE_FROM_ABI reverse_iterator rbegin() _NOEXCEPT { return reverse_iterator(end()); } - _LIBCPP_HIDE_FROM_ABI const_reverse_iterator rbegin() const _NOEXCEPT { return const_reverse_iterator(end()); } - _LIBCPP_HIDE_FROM_ABI reverse_iterator rend() _NOEXCEPT { return reverse_iterator(begin()); } - _LIBCPP_HIDE_FROM_ABI const_reverse_iterator rend() const _NOEXCEPT { return const_reverse_iterator(begin()); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI reverse_iterator rbegin() _NOEXCEPT { return reverse_iterator(end()); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI const_reverse_iterator rbegin() const _NOEXCEPT { + return const_reverse_iterator(end()); + } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI reverse_iterator rend() _NOEXCEPT { return reverse_iterator(begin()); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI const_reverse_iterator rend() const _NOEXCEPT { + return const_reverse_iterator(begin()); + } - _LIBCPP_HIDE_FROM_ABI const_iterator cbegin() const _NOEXCEPT { return begin(); } - _LIBCPP_HIDE_FROM_ABI const_iterator cend() const _NOEXCEPT { return end(); } - _LIBCPP_HIDE_FROM_ABI const_reverse_iterator crbegin() const _NOEXCEPT { return const_reverse_iterator(end()); } - _LIBCPP_HIDE_FROM_ABI const_reverse_iterator crend() const _NOEXCEPT { return const_reverse_iterator(begin()); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI const_iterator cbegin() const _NOEXCEPT { return begin(); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI const_iterator cend() const _NOEXCEPT { return end(); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI const_reverse_iterator crbegin() const _NOEXCEPT { + return const_reverse_iterator(end()); + } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI const_reverse_iterator crend() const _NOEXCEPT { + return const_reverse_iterator(begin()); + } // capacity: - _LIBCPP_HIDE_FROM_ABI size_type size() const _NOEXCEPT { return __size(); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI size_type size() const _NOEXCEPT { return __size(); } _LIBCPP_HIDE_FROM_ABI size_type& __size() _NOEXCEPT { return __size_; } _LIBCPP_HIDE_FROM_ABI const size_type& __size() const _NOEXCEPT { return __size_; } - _LIBCPP_HIDE_FROM_ABI size_type max_size() const _NOEXCEPT { + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI size_type max_size() const _NOEXCEPT { return std::min(__alloc_traits::max_size(__alloc()), numeric_limits::max()); } _LIBCPP_HIDE_FROM_ABI void resize(size_type __n); @@ -762,14 +770,14 @@ public: [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool empty() const _NOEXCEPT { return size() == 0; } // element access: - _LIBCPP_HIDE_FROM_ABI reference operator[](size_type __i) _NOEXCEPT; - _LIBCPP_HIDE_FROM_ABI const_reference operator[](size_type __i) const _NOEXCEPT; - _LIBCPP_HIDE_FROM_ABI reference at(size_type __i); - _LIBCPP_HIDE_FROM_ABI const_reference at(size_type __i) const; - _LIBCPP_HIDE_FROM_ABI reference front() _NOEXCEPT; - _LIBCPP_HIDE_FROM_ABI const_reference front() const _NOEXCEPT; - _LIBCPP_HIDE_FROM_ABI reference back() _NOEXCEPT; - _LIBCPP_HIDE_FROM_ABI const_reference back() const _NOEXCEPT; + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI reference operator[](size_type __i) _NOEXCEPT; + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI const_reference operator[](size_type __i) const _NOEXCEPT; + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI reference at(size_type __i); + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI const_reference at(size_type __i) const; + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI reference front() _NOEXCEPT; + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI const_reference front() const _NOEXCEPT; + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI reference back() _NOEXCEPT; + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI const_reference back() const _NOEXCEPT; // 23.2.2.3 modifiers: _LIBCPP_HIDE_FROM_ABI void push_front(const value_type& __v); diff --git a/libcxx/include/fstream b/libcxx/include/fstream index 1f88d134fe061..fbe579ada41b7 100644 --- a/libcxx/include/fstream +++ b/libcxx/include/fstream @@ -986,7 +986,7 @@ template int basic_filebuf<_CharT, _Traits>::__fseek(FILE* __file, pos_type __offset, int __whence) { # if defined(_LIBCPP_MSVCRT_LIKE) return _fseeki64(__file, __offset, __whence); -# elif defined(_NEWLIB_VERSION) +# elif _LIBCPP_LIBC_NEWLIB return fseek(__file, __offset, __whence); # else return ::fseeko(__file, __offset, __whence); @@ -997,7 +997,7 @@ template typename basic_filebuf<_CharT, _Traits>::pos_type basic_filebuf<_CharT, _Traits>::__ftell(FILE* __file) { # if defined(_LIBCPP_MSVCRT_LIKE) return _ftelli64(__file); -# elif defined(_NEWLIB_VERSION) +# elif _LIBCPP_LIBC_NEWLIB return ftell(__file); # else return ftello(__file); diff --git a/libcxx/include/initializer_list b/libcxx/include/initializer_list index 00e0d4ea4a2df..44cd45668388b 100644 --- a/libcxx/include/initializer_list +++ b/libcxx/include/initializer_list @@ -78,11 +78,17 @@ public: _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 initializer_list() _NOEXCEPT : __begin_(nullptr), __size_(0) {} - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 size_t size() const _NOEXCEPT { return __size_; } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 size_t size() const _NOEXCEPT { + return __size_; + } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Ep* begin() const _NOEXCEPT { return __begin_; } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Ep* begin() const _NOEXCEPT { + return __begin_; + } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Ep* end() const _NOEXCEPT { return __begin_ + __size_; } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Ep* end() const _NOEXCEPT { + return __begin_ + __size_; + } }; template diff --git a/libcxx/include/latch b/libcxx/include/latch index c3b8f62e9b50e..33268d9655f25 100644 --- a/libcxx/include/latch +++ b/libcxx/include/latch @@ -70,7 +70,9 @@ class latch { atomic __a_; public: - static _LIBCPP_HIDE_FROM_ABI constexpr ptrdiff_t max() noexcept { return numeric_limits::max(); } + [[nodiscard]] static _LIBCPP_HIDE_FROM_ABI constexpr ptrdiff_t max() noexcept { + return numeric_limits::max(); + } inline _LIBCPP_HIDE_FROM_ABI constexpr explicit latch(ptrdiff_t __expected) : __a_(__expected) { _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( @@ -97,7 +99,7 @@ public: if (__old == __update) __a_.notify_all(); } - inline _LIBCPP_HIDE_FROM_ABI bool try_wait() const noexcept { + [[nodiscard]] inline _LIBCPP_HIDE_FROM_ABI bool try_wait() const noexcept { auto __value = __a_.load(memory_order_acquire); return try_wait_impl(__value); } diff --git a/libcxx/include/mutex b/libcxx/include/mutex index 0b81f1bb1c8a6..bec0185ede21a 100644 --- a/libcxx/include/mutex +++ b/libcxx/include/mutex @@ -229,12 +229,12 @@ public: recursive_mutex& operator=(const recursive_mutex&) = delete; void lock(); - bool try_lock() _NOEXCEPT; + [[__nodiscard__]] bool try_lock() _NOEXCEPT; void unlock() _NOEXCEPT; typedef __libcpp_recursive_mutex_t* native_handle_type; - _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() { return &__m_; } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() { return &__m_; } }; class _LIBCPP_EXPORTED_FROM_ABI timed_mutex { @@ -251,14 +251,14 @@ public: public: void lock(); - bool try_lock() _NOEXCEPT; + [[__nodiscard__]] bool try_lock() _NOEXCEPT; template - _LIBCPP_HIDE_FROM_ABI bool try_lock_for(const chrono::duration<_Rep, _Period>& __d) { + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool try_lock_for(const chrono::duration<_Rep, _Period>& __d) { return try_lock_until(chrono::steady_clock::now() + __d); } template - _LIBCPP_HIDE_FROM_ABI bool try_lock_until(const chrono::time_point<_Clock, _Duration>& __t) { + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool try_lock_until(const chrono::time_point<_Clock, _Duration>& __t) { using namespace chrono; unique_lock __lk(__m_); bool __no_timeout = _Clock::now() < __t; @@ -288,14 +288,14 @@ public: recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete; void lock(); - bool try_lock() _NOEXCEPT; + [[__nodiscard__]] bool try_lock() _NOEXCEPT; template - _LIBCPP_HIDE_FROM_ABI bool try_lock_for(const chrono::duration<_Rep, _Period>& __d) { + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool try_lock_for(const chrono::duration<_Rep, _Period>& __d) { return try_lock_until(chrono::steady_clock::now() + __d); } template - _LIBCPP_HIDE_FROM_ABI bool try_lock_until(const chrono::time_point<_Clock, _Duration>& __t) { + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool try_lock_until(const chrono::time_point<_Clock, _Duration>& __t) { using namespace chrono; __thread_id __id = this_thread::get_id(); unique_lock __lk(__m_); @@ -320,7 +320,7 @@ public: }; template -_LIBCPP_NO_THREAD_SAFETY_ANALYSIS _LIBCPP_HIDE_FROM_ABI int try_lock(_L0& __l0, _L1& __l1) { +[[__nodiscard__]] _LIBCPP_NO_THREAD_SAFETY_ANALYSIS _LIBCPP_HIDE_FROM_ABI int try_lock(_L0& __l0, _L1& __l1) { unique_lock<_L0> __u0(__l0, try_to_lock_t()); if (__u0.owns_lock()) { if (__l1.try_lock()) { @@ -335,7 +335,8 @@ _LIBCPP_NO_THREAD_SAFETY_ANALYSIS _LIBCPP_HIDE_FROM_ABI int try_lock(_L0& __l0, # ifndef _LIBCPP_CXX03_LANG template -_LIBCPP_NO_THREAD_SAFETY_ANALYSIS _LIBCPP_HIDE_FROM_ABI int try_lock(_L0& __l0, _L1& __l1, _L2& __l2, _L3&... __l3) { +[[__nodiscard__]] _LIBCPP_NO_THREAD_SAFETY_ANALYSIS + _LIBCPP_HIDE_FROM_ABI int try_lock(_L0& __l0, _L1& __l1, _L2& __l2, _L3&... __l3) { int __r = 0; unique_lock<_L0> __u0(__l0, try_to_lock); if (__u0.owns_lock()) { diff --git a/libcxx/include/queue b/libcxx/include/queue index b4b79fb25a35f..a1686bc7c502e 100644 --- a/libcxx/include/queue +++ b/libcxx/include/queue @@ -376,12 +376,12 @@ public: # endif // _LIBCPP_CXX03_LANG [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool empty() const { return c.empty(); } - _LIBCPP_HIDE_FROM_ABI size_type size() const { return c.size(); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI size_type size() const { return c.size(); } - _LIBCPP_HIDE_FROM_ABI reference front() { return c.front(); } - _LIBCPP_HIDE_FROM_ABI const_reference front() const { return c.front(); } - _LIBCPP_HIDE_FROM_ABI reference back() { return c.back(); } - _LIBCPP_HIDE_FROM_ABI const_reference back() const { return c.back(); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI reference front() { return c.front(); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI const_reference front() const { return c.front(); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI reference back() { return c.back(); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI const_reference back() const { return c.back(); } _LIBCPP_HIDE_FROM_ABI void push(const value_type& __v) { c.push_back(__v); } # ifndef _LIBCPP_CXX03_LANG @@ -664,8 +664,10 @@ public: # endif [[__nodiscard__]] _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI bool empty() const { return c.empty(); } - _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI size_type size() const { return c.size(); } - _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI const_reference top() const { return c.front(); } + [[__nodiscard__]] _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI size_type size() const { return c.size(); } + [[__nodiscard__]] _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI const_reference top() const { + return c.front(); + } _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI void push(const value_type& __v); # ifndef _LIBCPP_CXX03_LANG diff --git a/libcxx/include/regex b/libcxx/include/regex index 9bbc3a69021b9..b6c19518be301 100644 --- a/libcxx/include/regex +++ b/libcxx/include/regex @@ -1004,7 +1004,7 @@ public: typedef _CharT char_type; typedef basic_string string_type; typedef locale locale_type; -# if defined(__BIONIC__) || defined(_NEWLIB_VERSION) +# if defined(__BIONIC__) || _LIBCPP_LIBC_NEWLIB // Originally bionic's ctype_base used its own ctype masks because the // builtin ctype implementation wasn't in libc++ yet. Bionic's ctype mask // was only 8 bits wide and already saturated, so it used a wider type here @@ -1013,9 +1013,7 @@ public: // implementation, but this was not updated to match. Since then Android has // needed to maintain a stable libc++ ABI, and this can't be changed without // an ABI break. - // We also need this workaround for newlib since _NEWLIB_VERSION is not - // defined yet inside __config, so we can't set the - // _LIBCPP_PROVIDES_DEFAULT_RUNE_TABLE macro. Additionally, newlib is + // We also need this workaround for newlib since newlib is // often used for space constrained environments, so it makes sense not to // duplicate the ctype table. typedef uint16_t char_class_type; diff --git a/libcxx/include/semaphore b/libcxx/include/semaphore index 99c4ad24b35ec..1f19d50e32af7 100644 --- a/libcxx/include/semaphore +++ b/libcxx/include/semaphore @@ -133,7 +133,7 @@ class counting_semaphore { public: static_assert(__least_max_value >= 0, "The least maximum value must be a positive number"); - static constexpr ptrdiff_t max() noexcept { return __least_max_value; } + [[nodiscard]] static constexpr ptrdiff_t max() noexcept { return __least_max_value; } _LIBCPP_HIDE_FROM_ABI constexpr explicit counting_semaphore(ptrdiff_t __count) : __semaphore_(__count) { _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( @@ -156,12 +156,12 @@ public: } _LIBCPP_HIDE_FROM_ABI void acquire() { __semaphore_.acquire(); } template - _LIBCPP_HIDE_FROM_ABI bool try_acquire_for(chrono::duration<_Rep, _Period> const& __rel_time) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool try_acquire_for(chrono::duration<_Rep, _Period> const& __rel_time) { return __semaphore_.try_acquire_for(chrono::duration_cast(__rel_time)); } - _LIBCPP_HIDE_FROM_ABI bool try_acquire() { return __semaphore_.try_acquire(); } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool try_acquire() { return __semaphore_.try_acquire(); } template - _LIBCPP_HIDE_FROM_ABI bool try_acquire_until(chrono::time_point<_Clock, _Duration> const& __abs_time) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool try_acquire_until(chrono::time_point<_Clock, _Duration> const& __abs_time) { auto const __current = _Clock::now(); if (__current >= __abs_time) return try_acquire(); diff --git a/libcxx/src/include/config_elast.h b/libcxx/src/include/config_elast.h index 7edff2d9375d4..be665a97bf91b 100644 --- a/libcxx/src/include/config_elast.h +++ b/libcxx/src/include/config_elast.h @@ -23,7 +23,7 @@ # define _LIBCPP_ELAST ELAST #elif defined(__LLVM_LIBC__) // No _LIBCPP_ELAST needed for LLVM libc -#elif defined(_NEWLIB_VERSION) +#elif _LIBCPP_LIBC_NEWLIB # define _LIBCPP_ELAST __ELASTERROR #elif defined(__NuttX__) // No _LIBCPP_ELAST needed on NuttX diff --git a/libcxx/src/locale.cpp b/libcxx/src/locale.cpp index 2081e75fdf64b..aca21173dfe02 100644 --- a/libcxx/src/locale.cpp +++ b/libcxx/src/locale.cpp @@ -919,7 +919,7 @@ const ctype::mask* ctype::classic_table() noexcept { return __pctype_func(); # elif defined(__EMSCRIPTEN__) return *__ctype_b_loc(); -# elif defined(_NEWLIB_VERSION) +# elif _LIBCPP_LIBC_NEWLIB // Newlib has a 257-entry table in ctype_.c, where (char)0 starts at [1]. return _ctype_ + 1; # elif defined(_AIX) diff --git a/libcxx/test/benchmarks/algorithms/nonmodifying/find.bench.cpp b/libcxx/test/benchmarks/algorithms/nonmodifying/find.bench.cpp index afea31fb59e95..7780b5a92a6c4 100644 --- a/libcxx/test/benchmarks/algorithms/nonmodifying/find.bench.cpp +++ b/libcxx/test/benchmarks/algorithms/nonmodifying/find.bench.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -83,6 +84,20 @@ int main(int argc, char** argv) { bm.template operator()>("rng::find_if_not(list) (" + comment + ")", ranges_find_if_not); }; + auto register_nested_container_benchmarks = [&](auto bm, std::string comment) { + // ranges_find + bm.template operator()>>( + "rng::find(join_view(vector>)) (" + comment + ")", ranges_find); + bm.template operator()>>( + "rng::find(join_view(vector>)) (" + comment + ")", ranges_find); + bm.template operator()>>( + "rng::find(join_view(list>)) (" + comment + ")", ranges_find); + bm.template operator()>>( + "rng::find(join_view(vector>)) (" + comment + ")", ranges_find); + bm.template operator()>>( + "rng::find(join_view(deque>)) (" + comment + ")", ranges_find); + }; + // Benchmark {std,ranges}::{find,find_if,find_if_not}(normal container) where we // bail out after 25% of elements { @@ -142,6 +157,44 @@ int main(int argc, char** argv) { register_benchmarks(bm, "process all"); } + // Benchmark {std,ranges}::{find,find_if,find_if_not}(join(normal container)) where we process the whole sequence + { + auto bm = [](std::string name, auto find) { + benchmark::RegisterBenchmark( + name, + [find](auto& st) { + std::size_t const size = st.range(0); + std::size_t const seg_size = 256; + std::size_t const segments = (size + seg_size - 1) / seg_size; + using C1 = typename Container::value_type; + using ValueType = typename C1::value_type; + ValueType x = Generate::random(); + ValueType y = random_different_from({x}); + Container c(segments); + auto n = size; + for (auto it = c.begin(); it != c.end(); it++) { + it->resize(std::min(seg_size, n), x); + n -= it->size(); + } + + auto view = c | std::views::join; + + for ([[maybe_unused]] auto _ : st) { + benchmark::DoNotOptimize(c); + benchmark::DoNotOptimize(y); + auto result = find(view.begin(), view.end(), y); + benchmark::DoNotOptimize(result); + } + }) + ->Arg(8) + ->Arg(50) // non power-of-two + ->Arg(1024) + ->Arg(8192) + ->Arg(1 << 15); + }; + register_nested_container_benchmarks(bm, "process all"); + } + // Benchmark {std,ranges}::{find,find_if,find_if_not}(vector) where we process the whole sequence { auto bm = [](std::string name, auto find) { diff --git a/libcxx/test/benchmarks/containers/string.bench.cpp b/libcxx/test/benchmarks/containers/string.bench.cpp index 2484ec8fd955f..98216d22d0144 100644 --- a/libcxx/test/benchmarks/containers/string.bench.cpp +++ b/libcxx/test/benchmarks/containers/string.bench.cpp @@ -541,10 +541,7 @@ struct StringRead { static bool skip() { // Huge does not give us anything that Large doesn't have. Skip it. - if (Length() == ::Length::Huge) { - return true; - } - return false; + return Length() == ::Length::Huge; } std::string name() const { return "BM_StringRead" + Temperature::name() + Depth::name() + Length::name(); } @@ -585,14 +582,6 @@ void sanityCheckGeneratedStrings() { } } -// Some small codegen thunks to easily see generated code. -bool StringEqString(const std::string& a, const std::string& b) { return a == b; } -bool StringEqCStr(const std::string& a, const char* b) { return a == b; } -bool CStrEqString(const char* a, const std::string& b) { return a == b; } -bool StringEqCStrLiteralEmpty(const std::string& a) { return a == ""; } -bool StringEqCStrLiteralSmall(const std::string& a) { return a == SmallStringLiteral; } -bool StringEqCStrLiteralLarge(const std::string& a) { return a == LargeStringLiteral; } - int main(int argc, char** argv) { benchmark::Initialize(&argc, argv); if (benchmark::ReportUnrecognizedArguments(argc, argv)) @@ -615,16 +604,4 @@ int main(int argc, char** argv) { makeCartesianProductBenchmark(); makeCartesianProductBenchmark(); benchmark::RunSpecifiedBenchmarks(); - - if (argc < 0) { - // ODR-use the functions to force them being generated in the binary. - auto functions = std::make_tuple( - StringEqString, - StringEqCStr, - CStrEqString, - StringEqCStrLiteralEmpty, - StringEqCStrLiteralSmall, - StringEqCStrLiteralLarge); - printf("%p", &functions); - } } diff --git a/libcxx/test/libcxx-03/algorithms/half_positive.pass.cpp b/libcxx/test/libcxx-03/algorithms/half_positive.pass.cpp index 292fcf356554b..ad0cac2a1bd65 100644 --- a/libcxx/test/libcxx-03/algorithms/half_positive.pass.cpp +++ b/libcxx/test/libcxx-03/algorithms/half_positive.pass.cpp @@ -41,17 +41,5 @@ int main(int, char**) #endif // !defined(TEST_HAS_NO_INT128) } -#if TEST_STD_VER >= 11 - { - static_assert(test(), ""); - static_assert(test(), ""); - static_assert(test(), ""); - static_assert(test(), ""); -#if !defined(TEST_HAS_NO_INT128) - static_assert(test<__int128_t>(), ""); -#endif // !defined(TEST_HAS_NO_INT128) - } -#endif // TEST_STD_VER >= 11 - return 0; } diff --git a/libcxx/test/libcxx-03/algorithms/robust_against_copying_comparators.pass.cpp b/libcxx/test/libcxx-03/algorithms/robust_against_copying_comparators.pass.cpp index 256251686bb3e..2e3fc6db45c93 100644 --- a/libcxx/test/libcxx-03/algorithms/robust_against_copying_comparators.pass.cpp +++ b/libcxx/test/libcxx-03/algorithms/robust_against_copying_comparators.pass.cpp @@ -82,17 +82,6 @@ struct BinaryTransform { TEST_CONSTEXPR T operator()(T, T) const { return 0; } }; -#if TEST_STD_VER > 17 -template -struct ThreeWay { - int* copies_; - constexpr explicit ThreeWay(int* copies) : copies_(copies) {} - constexpr ThreeWay(const ThreeWay& rhs) : copies_(rhs.copies_) { *copies_ += 1; } - constexpr ThreeWay& operator=(const ThreeWay&) = default; - constexpr std::strong_ordering operator()(T, T) const { return std::strong_ordering::equal; } -}; -#endif - template TEST_CONSTEXPR_CXX20 bool all_the_algorithms() { T a[10] = {}; @@ -109,28 +98,14 @@ TEST_CONSTEXPR_CXX20 bool all_the_algorithms() { int copies = 0; (void)std::adjacent_find(first, last, Equal(&copies)); assert(copies == 0); -#if TEST_STD_VER >= 11 - (void)std::all_of(first, last, UnaryTrue(&copies)); - assert(copies == 0); - (void)std::any_of(first, last, UnaryTrue(&copies)); - assert(copies == 0); -#endif (void)std::binary_search(first, last, value, Less(&copies)); assert(copies == 0); -#if TEST_STD_VER > 17 - (void)std::clamp(value, value, value, Less(&copies)); - assert(copies == 0); -#endif (void)std::count_if(first, last, UnaryTrue(&copies)); assert(copies == 0); (void)std::copy_if(first, last, first2, UnaryTrue(&copies)); assert(copies == 0); (void)std::equal(first, last, first2, Equal(&copies)); assert(copies == 0); -#if TEST_STD_VER > 11 - (void)std::equal(first, last, first2, last2, Equal(&copies)); - assert(copies == 0); -#endif (void)std::equal_range(first, last, value, Less(&copies)); assert(copies == 0); (void)std::find_end(first, last, first2, mid2, Equal(&copies)); @@ -144,10 +119,6 @@ TEST_CONSTEXPR_CXX20 bool all_the_algorithms() { (void)std::for_each(first, last, UnaryVoid(&copies)); assert(copies == 1); copies = 0; -#if TEST_STD_VER > 14 - (void)std::for_each_n(first, count, UnaryVoid(&copies)); - assert(copies == 0); -#endif (void)std::generate(first, last, NullaryValue(&copies)); assert(copies == 0); (void)std::generate_n(first, count, NullaryValue(&copies)); @@ -162,10 +133,6 @@ TEST_CONSTEXPR_CXX20 bool all_the_algorithms() { assert(copies == 0); (void)std::is_permutation(first, last, first2, Equal(&copies)); assert(copies == 0); -#if TEST_STD_VER > 11 - (void)std::is_permutation(first, last, first2, last2, Equal(&copies)); - assert(copies == 0); -#endif (void)std::is_sorted(first, last, Less(&copies)); assert(copies == 0); (void)std::is_sorted_until(first, last, Less(&copies)); @@ -176,52 +143,28 @@ TEST_CONSTEXPR_CXX20 bool all_the_algorithms() { } (void)std::lexicographical_compare(first, last, first2, last2, Less(&copies)); assert(copies == 0); -#if TEST_STD_VER > 17 - (void)std::lexicographical_compare_three_way(first, last, first2, last2, ThreeWay(&copies)); - assert(copies == 0); -#endif (void)std::lower_bound(first, last, value, Less(&copies)); assert(copies == 0); (void)std::make_heap(first, last, Less(&copies)); assert(copies == 0); (void)std::max(value, value, Less(&copies)); assert(copies == 0); -#if TEST_STD_VER >= 11 - (void)std::max({value, value}, Less(&copies)); - assert(copies == 0); -#endif (void)std::max_element(first, last, Less(&copies)); assert(copies == 0); (void)std::merge(first, mid, mid, last, first2, Less(&copies)); assert(copies == 0); (void)std::min(value, value, Less(&copies)); assert(copies == 0); -#if TEST_STD_VER >= 11 - (void)std::min({value, value}, Less(&copies)); - assert(copies == 0); -#endif (void)std::min_element(first, last, Less(&copies)); assert(copies == 0); (void)std::minmax(value, value, Less(&copies)); assert(copies == 0); -#if TEST_STD_VER >= 11 - (void)std::minmax({value, value}, Less(&copies)); - assert(copies == 0); -#endif (void)std::minmax_element(first, last, Less(&copies)); assert(copies == 0); (void)std::mismatch(first, last, first2, Equal(&copies)); assert(copies == 0); -#if TEST_STD_VER > 11 - (void)std::mismatch(first, last, first2, last2, Equal(&copies)); - assert(copies == 0); -#endif (void)std::next_permutation(first, last, Less(&copies)); assert(copies == 0); -#if TEST_STD_VER >= 11 - (void)std::none_of(first, last, UnaryTrue(&copies)); - assert(copies == 0); -#endif (void)std::nth_element(first, mid, last, Less(&copies)); assert(copies == 0); (void)std::partial_sort(first, mid, last, Less(&copies)); @@ -299,14 +242,6 @@ bool test_segmented_iterator() { assert(copies == 1); copies = 0; -#if TEST_STD_VER >= 20 - std::vector> vecs(3, std::vector(10)); - auto v = std::views::join(vecs); - (void)std::for_each(v.begin(), v.end(), UnaryVoid(&copies)); - assert(copies == 1); - copies = 0; -#endif - return true; } @@ -314,10 +249,6 @@ int main(int, char**) { all_the_algorithms(); all_the_algorithms(); assert(test_segmented_iterator()); -#if TEST_STD_VER > 17 - static_assert(all_the_algorithms()); - static_assert(all_the_algorithms()); -#endif return 0; } diff --git a/libcxx/test/libcxx-03/algorithms/robust_against_cpp20_hostile_iterators.compile.pass.cpp b/libcxx/test/libcxx-03/algorithms/robust_against_cpp20_hostile_iterators.compile.pass.cpp index 03fef57ee259a..009a234030198 100644 --- a/libcxx/test/libcxx-03/algorithms/robust_against_cpp20_hostile_iterators.compile.pass.cpp +++ b/libcxx/test/libcxx-03/algorithms/robust_against_cpp20_hostile_iterators.compile.pass.cpp @@ -99,10 +99,6 @@ void test() { (void) std::equal_range(it, it, 0, pred); (void) std::equal(it, it, it); (void) std::equal(it, it, it, pred); -#if TEST_STD_VER > 11 - (void) std::equal(it, it, it, it); - (void) std::equal(it, it, it, it, pred); -#endif (void) std::fill_n(it, 0, 0); (void) std::fill(it, it, 0); (void) std::find_end(it, it, it, it); @@ -112,9 +108,6 @@ void test() { (void) std::find_if_not(it, it, pred); (void) std::find_if(it, it, pred); (void) std::find(it, it, 0); -#if TEST_STD_VER > 14 - (void) std::for_each_n(it, 0, pred); -#endif (void) std::for_each(it, it, pred); (void) std::generate_n(it, 0, pred); (void) std::generate(it, it, pred); @@ -129,20 +122,12 @@ void test() { (void) std::is_partitioned(it, it, pred); (void) std::is_permutation(it, it, it); (void) std::is_permutation(it, it, it, pred); -#if TEST_STD_VER > 11 - (void) std::is_permutation(it, it, it, it); - (void) std::is_permutation(it, it, it, it, pred); -#endif (void) std::is_sorted_until(it, it); (void) std::is_sorted_until(it, it, pred); (void) std::is_sorted(it, it); (void) std::is_sorted(it, it, pred); (void) std::lexicographical_compare(it, it, it, it); (void) std::lexicographical_compare(it, it, it, it, pred); -#if TEST_STD_VER > 17 - (void)std::lexicographical_compare_three_way(it, it, it, it); - (void)std::lexicographical_compare_three_way(it, it, it, it, std::compare_three_way()); -#endif (void) std::lower_bound(it, it, 0); (void) std::lower_bound(it, it, 0, pred); (void) std::make_heap(it, it); @@ -189,14 +174,8 @@ void test() { (void) std::reverse(it, it); (void) std::rotate_copy(it, it, it, it); (void) std::rotate(it, it, it); -#if TEST_STD_VER > 14 - (void) std::sample(it, it, it, 0, rng); -#endif (void) std::search(it, it, it, it); (void) std::search(it, it, it, it, pred); -#if TEST_STD_VER > 14 - (void) std::search(it, it, std::default_searcher>(it, it)); -#endif (void) std::set_difference(it, it, it, it, it); (void) std::set_difference(it, it, it, it, it, pred); (void) std::set_intersection(it, it, it, it, it); @@ -205,10 +184,6 @@ void test() { (void) std::set_symmetric_difference(it, it, it, it, it, pred); (void) std::set_union(it, it, it, it, it); (void) std::set_union(it, it, it, it, it, pred); -#if TEST_STD_VER > 17 - (void) std::shift_left(it, it, 0); - (void) std::shift_right(it, it, 0); -#endif (void) std::shuffle(it, it, rng); (void) std::sort_heap(it, it); (void) std::sort_heap(it, it, pred); diff --git a/libcxx/test/libcxx-03/containers/sequences/vector/asan.pass.cpp b/libcxx/test/libcxx-03/containers/sequences/vector/asan.pass.cpp index 03d2b3e6ce9b9..72875a52246c4 100644 --- a/libcxx/test/libcxx-03/containers/sequences/vector/asan.pass.cpp +++ b/libcxx/test/libcxx-03/containers/sequences/vector/asan.pass.cpp @@ -26,29 +26,6 @@ extern "C" void __sanitizer_set_death_callback(void (*callback)(void)); void do_exit() { exit(0); } int main(int, char**) { -#if TEST_STD_VER >= 11 - { - typedef int T; - typedef cpp17_input_iterator MyInputIter; - std::vector> v; - v.reserve(1); - int i[] = {42}; - v.insert(v.begin(), MyInputIter(i), MyInputIter(i + 1)); - assert(v[0] == 42); - assert(is_contiguous_container_asan_correct(v)); - } - { - typedef char T; - typedef cpp17_input_iterator MyInputIter; - std::vector> v; - v.reserve(1); - char i[] = {'a', 'b'}; - v.insert(v.begin(), MyInputIter(i), MyInputIter(i + 2)); - assert(v[0] == 'a'); - assert(v[1] == 'b'); - assert(is_contiguous_container_asan_correct(v)); - } -#endif // TEST_STD_VER >= 11 { typedef cpp17_input_iterator MyInputIter; // Sould not trigger ASan. diff --git a/libcxx/test/libcxx-03/containers/sequences/vector/asan_throw.pass.cpp b/libcxx/test/libcxx-03/containers/sequences/vector/asan_throw.pass.cpp index dcfa8029cfc0d..c18242af4ed3c 100644 --- a/libcxx/test/libcxx-03/containers/sequences/vector/asan_throw.pass.cpp +++ b/libcxx/test/libcxx-03/containers/sequences/vector/asan_throw.pass.cpp @@ -68,23 +68,6 @@ void test_push_back() { assert(is_contiguous_container_asan_correct(v)); } -void test_emplace_back() { -#if TEST_STD_VER >= 11 - std::vector v; - v.reserve(2); - v.push_back(X(2)); - assert(v.size() == 1); - try { - v.emplace_back(42); - assert(0); - } catch (int e) { - assert(v.size() == 1); - } - assert(v.size() == 1); - assert(is_contiguous_container_asan_correct(v)); -#endif -} - void test_insert_range() { std::vector v; v.reserve(4); @@ -119,24 +102,6 @@ void test_insert() { assert(is_contiguous_container_asan_correct(v)); } -void test_emplace() { -#if TEST_STD_VER >= 11 - std::vector v; - v.reserve(3); - v.insert(v.end(), X(1)); - v.insert(v.begin(), X(2)); - assert(v.size() == 2); - try { - v.emplace(v.end(), 42); - assert(0); - } catch (int e) { - assert(v.size() == 2); - } - assert(v.size() == 2); - assert(is_contiguous_container_asan_correct(v)); -#endif -} - void test_insert_range2() { std::vector v; v.reserve(4); @@ -219,10 +184,8 @@ void test_resize_param() { int main(int, char**) { test_push_back(); - test_emplace_back(); test_insert_range(); test_insert(); - test_emplace(); test_insert_range2(); test_insert_n(); test_insert_n2(); diff --git a/libcxx/test/libcxx-03/depr/depr.default.allocator/allocator.members/construct.cxx20.pass.cpp b/libcxx/test/libcxx-03/depr/depr.default.allocator/allocator.members/construct.cxx20.pass.cpp index 9a37cf8af8e69..4e1cda55c991d 100644 --- a/libcxx/test/libcxx-03/depr/depr.default.allocator/allocator.members/construct.cxx20.pass.cpp +++ b/libcxx/test/libcxx-03/depr/depr.default.allocator/allocator.members/construct.cxx20.pass.cpp @@ -38,24 +38,6 @@ struct A { int move_only_constructed = 0; -#if TEST_STD_VER >= 11 -class move_only { - move_only(const move_only&) = delete; - move_only& operator=(const move_only&) = delete; - -public: - move_only(move_only&&) { ++move_only_constructed; } - move_only& operator=(move_only&&) { return *this; } - - move_only() { ++move_only_constructed; } - ~move_only() { --move_only_constructed; } - -public: - int data; // unused other than to make sizeof(move_only) == sizeof(int). - // but public to suppress "-Wunused-private-field" -}; -#endif // TEST_STD_VER >= 11 - int main(int, char**) { globalMemCounter.reset(); { @@ -107,41 +89,6 @@ int main(int, char**) { assert(globalMemCounter.checkOutstandingNewEq(0)); assert(A_constructed == 0); } -#if TEST_STD_VER >= 11 - { - std::allocator a; - assert(globalMemCounter.checkOutstandingNewEq(0)); - assert(move_only_constructed == 0); - - globalMemCounter.last_new_size = 0; - move_only* ap = a.allocate(3); - DoNotOptimize(ap); - assert(globalMemCounter.checkOutstandingNewEq(1)); - assert(globalMemCounter.checkLastNewSizeEq(3 * sizeof(int))); - assert(move_only_constructed == 0); - - a.construct(ap); - assert(globalMemCounter.checkOutstandingNewEq(1)); - assert(move_only_constructed == 1); - - a.destroy(ap); - assert(globalMemCounter.checkOutstandingNewEq(1)); - assert(move_only_constructed == 0); - - a.construct(ap, move_only()); - assert(globalMemCounter.checkOutstandingNewEq(1)); - assert(move_only_constructed == 1); - - a.destroy(ap); - assert(globalMemCounter.checkOutstandingNewEq(1)); - assert(move_only_constructed == 0); - - a.deallocate(ap, 3); - DoNotOptimize(ap); - assert(globalMemCounter.checkOutstandingNewEq(0)); - assert(move_only_constructed == 0); - } -#endif return 0; } diff --git a/libcxx/test/libcxx-03/input.output/string.streams/stringbuf/const_sso_buffer.pass.cpp b/libcxx/test/libcxx-03/input.output/string.streams/stringbuf/const_sso_buffer.pass.cpp index d6caa3389b8fa..b6cc6e506ff32 100644 --- a/libcxx/test/libcxx-03/input.output/string.streams/stringbuf/const_sso_buffer.pass.cpp +++ b/libcxx/test/libcxx-03/input.output/string.streams/stringbuf/const_sso_buffer.pass.cpp @@ -20,7 +20,6 @@ #include #include "test_macros.h" -#include "min_allocator.h" template struct test_buf : public std::basic_stringbuf { @@ -40,29 +39,6 @@ struct test_buf : public std::basic_stringbuf { explicit test_buf(std::ios_base::openmode which) : std::basic_stringbuf(which) {} explicit test_buf(const std::basic_string& s) : std::basic_stringbuf(s) {} -#if TEST_STD_VER >= 20 - explicit test_buf(const std::allocator& a) : std::basic_stringbuf(a) {} - test_buf(std::ios_base::openmode which, const std::allocator& a) : std::basic_stringbuf(which, a) {} - explicit test_buf(std::basic_string&& s) - : std::basic_stringbuf(std::forward>(s)) {} - - test_buf(const std::basic_string, min_allocator>& s, - const std::allocator& a) - : std::basic_stringbuf(s, a) {} - test_buf(const std::basic_string, min_allocator>& s, - std::ios_base::openmode which, - const std::allocator& a) - : std::basic_stringbuf(s, which, a) {} - test_buf(const std::basic_string, min_allocator>& s) - : std::basic_stringbuf(s) {} -#endif // TEST_STD_VER >= 20 - -#if TEST_STD_VER >= 26 - test_buf(std::basic_string_view s) : std::basic_stringbuf(s) {} - test_buf(std::basic_string_view s, const std::allocator& a) : std::basic_stringbuf(s, a) {} - test_buf(std::basic_string_view s, std::ios_base::openmode which, const std::allocator& a) - : std::basic_stringbuf(s, which, a) {} -#endif // TEST_STD_VER >= 26 }; template @@ -88,76 +64,6 @@ static void test() { assert(b.pptr() == b.pbase()); assert(b.epptr() == b.pbase() + size); // copy so uses size } -#if TEST_STD_VER >= 20 - { - test_buf b = test_buf(std::allocator()); - assert(b.pbase() != nullptr); - assert(b.pptr() == b.pbase()); - assert(b.epptr() == b.pbase() + size); - } - { - test_buf b = test_buf(std::ios_base::out, std::allocator()); - assert(b.pbase() != nullptr); - assert(b.pptr() == b.pbase()); - assert(b.epptr() == b.pbase() + size); - } - { - std::basic_string s; - s.reserve(1024); - std::size_t capacity = s.capacity(); - test_buf b = test_buf(std::move(s)); - assert(b.pbase() != nullptr); - assert(b.pptr() == b.pbase()); - assert(b.epptr() >= b.pbase() + capacity); // move so uses s.capacity() - } - { - std::basic_string, min_allocator> s; - s.reserve(1024); - test_buf b = test_buf(s, std::allocator()); - assert(b.pbase() != nullptr); - assert(b.pptr() == b.pbase()); - assert(b.epptr() == b.pbase() + size); // copy so uses size - } - { - std::basic_string, min_allocator> s; - s.reserve(1024); - test_buf b = test_buf(s, std::ios_base::out, std::allocator()); - assert(b.pbase() != nullptr); - assert(b.pptr() == b.pbase()); - assert(b.epptr() == b.pbase() + size); // copy so uses size - } - { - std::basic_string, min_allocator> s; - s.reserve(1024); - test_buf b = test_buf(s); - assert(b.pbase() != nullptr); - assert(b.pptr() == b.pbase()); - assert(b.epptr() == b.pbase() + size); // copy so uses size - } -#endif // TEST_STD_VER >= 20 -#if TEST_STD_VER >= 26 - { - std::basic_string_view s; - test_buf b = test_buf(s); - assert(b.pbase() != nullptr); - assert(b.pptr() == b.pbase()); - assert(b.epptr() == b.pbase() + size); - } - { - std::basic_string_view s; - test_buf b = test_buf(s, std::allocator()); - assert(b.pbase() != nullptr); - assert(b.pptr() == b.pbase()); - assert(b.epptr() == b.pbase() + size); - } - { - std::basic_string_view s; - test_buf b = test_buf(s, std::ios_base::out, std::allocator()); - assert(b.pbase() != nullptr); - assert(b.pptr() == b.pbase()); - assert(b.epptr() == b.pbase() + size); - } -#endif // TEST_STD_VER >= 26 } int main(int, char**) { diff --git a/libcxx/test/libcxx-03/iterators/bounded_iter/arithmetic.pass.cpp b/libcxx/test/libcxx-03/iterators/bounded_iter/arithmetic.pass.cpp index b4b6e7fa1940c..b5b6916ab1a51 100644 --- a/libcxx/test/libcxx-03/iterators/bounded_iter/arithmetic.pass.cpp +++ b/libcxx/test/libcxx-03/iterators/bounded_iter/arithmetic.pass.cpp @@ -99,14 +99,6 @@ TEST_CONSTEXPR_CXX14 bool tests() { int main(int, char**) { tests(); -#if TEST_STD_VER > 11 - static_assert(tests(), ""); -#endif - -#if TEST_STD_VER > 17 - tests >(); - static_assert(tests >(), ""); -#endif return 0; } diff --git a/libcxx/test/libcxx-03/iterators/bounded_iter/comparison.pass.cpp b/libcxx/test/libcxx-03/iterators/bounded_iter/comparison.pass.cpp index 490bfed54a159..9c6ce283f53d9 100644 --- a/libcxx/test/libcxx-03/iterators/bounded_iter/comparison.pass.cpp +++ b/libcxx/test/libcxx-03/iterators/bounded_iter/comparison.pass.cpp @@ -60,28 +60,11 @@ TEST_CONSTEXPR_CXX14 bool tests() { assert(iter1 >= iter1); } -#if TEST_STD_VER >= 20 - // P1614 - std::same_as decltype(auto) r1 = iter1 <=> iter2; - assert(r1 == std::strong_ordering::less); -#endif - return true; } int main(int, char**) { tests(); -#if TEST_STD_VER > 11 - static_assert(tests(), ""); -#endif - -#if TEST_STD_VER > 17 - tests>(); - static_assert(tests>()); - - tests>(); - static_assert(tests>()); -#endif return 0; } diff --git a/libcxx/test/libcxx-03/iterators/bounded_iter/pointer_traits.pass.cpp b/libcxx/test/libcxx-03/iterators/bounded_iter/pointer_traits.pass.cpp index 671e716d21e26..fe21529aa0d5d 100644 --- a/libcxx/test/libcxx-03/iterators/bounded_iter/pointer_traits.pass.cpp +++ b/libcxx/test/libcxx-03/iterators/bounded_iter/pointer_traits.pass.cpp @@ -37,10 +37,6 @@ TEST_CONSTEXPR_CXX14 bool tests() { std::__bounded_iter const iter2 = std::__make_bounded_iter(Iter(e), Iter(b), Iter(e)); assert(std::__to_address(iter1) == b); // in-bounds iterator assert(std::__to_address(iter2) == e); // out-of-bounds iterator -#if TEST_STD_VER > 17 - assert(std::to_address(iter1) == b); // in-bounds iterator - assert(std::to_address(iter2) == e); // out-of-bounds iterator -#endif } return true; @@ -48,14 +44,6 @@ TEST_CONSTEXPR_CXX14 bool tests() { int main(int, char**) { tests(); -#if TEST_STD_VER > 11 - static_assert(tests(), ""); -#endif - -#if TEST_STD_VER > 17 - tests >(); - static_assert(tests >(), ""); -#endif return 0; } diff --git a/libcxx/test/libcxx-03/iterators/bounded_iter/types.compile.pass.cpp b/libcxx/test/libcxx-03/iterators/bounded_iter/types.compile.pass.cpp index 0d27dff0873b4..43c53a0378c63 100644 --- a/libcxx/test/libcxx-03/iterators/bounded_iter/types.compile.pass.cpp +++ b/libcxx/test/libcxx-03/iterators/bounded_iter/types.compile.pass.cpp @@ -18,32 +18,9 @@ #include "test_macros.h" -#if TEST_STD_VER > 17 -struct Iterator { - struct value_type {}; - using difference_type = int; - struct pointer {}; - using reference = value_type&; - struct iterator_category : std::random_access_iterator_tag {}; - using iterator_concept = std::contiguous_iterator_tag; -}; - -using BoundedIter1 = std::__bounded_iter; -static_assert(std::is_same::value, ""); -static_assert(std::is_same::value, ""); -static_assert(std::is_same::value, ""); -static_assert(std::is_same::value, ""); -static_assert(std::is_same::value, ""); -static_assert(std::is_same::value, ""); -#endif - - using BoundedIter2 = std::__bounded_iter; static_assert(std::is_same::value, ""); static_assert(std::is_same::value, ""); static_assert(std::is_same::value, ""); static_assert(std::is_same::value, ""); static_assert(std::is_same::value, ""); -#if TEST_STD_VER > 17 -static_assert(std::is_same::value, ""); -#endif diff --git a/libcxx/test/libcxx-03/iterators/contiguous_iterators.conv.compile.pass.cpp b/libcxx/test/libcxx-03/iterators/contiguous_iterators.conv.compile.pass.cpp index 4d3690953070f..3e9707a38bcc9 100644 --- a/libcxx/test/libcxx-03/iterators/contiguous_iterators.conv.compile.pass.cpp +++ b/libcxx/test/libcxx-03/iterators/contiguous_iterators.conv.compile.pass.cpp @@ -54,12 +54,3 @@ static_assert(!std::is_constructible::iterator, std::vector::const_iterator, std::vector::iterator>::value, ""); static_assert(!std::is_constructible::const_iterator, std::vector::const_iterator>::value, ""); - -#if TEST_STD_VER >= 20 -static_assert(!std::is_convertible_v::iterator, std::span::iterator>); -static_assert(!std::is_convertible_v::iterator, std::span::iterator>); -static_assert(!std::is_convertible_v::iterator, std::span::iterator>); -static_assert(!std::is_constructible_v::iterator, std::span::iterator>); -static_assert(!std::is_constructible_v::iterator, std::span::iterator>); -static_assert(!std::is_constructible_v::iterator, std::span::iterator>); -#endif diff --git a/libcxx/test/libcxx-03/iterators/contiguous_iterators.pass.cpp b/libcxx/test/libcxx-03/iterators/contiguous_iterators.pass.cpp index f00ca4e879403..56f12e01f0321 100644 --- a/libcxx/test/libcxx-03/iterators/contiguous_iterators.pass.cpp +++ b/libcxx/test/libcxx-03/iterators/contiguous_iterators.pass.cpp @@ -27,14 +27,6 @@ #include "test_macros.h" #include "test_iterators.h" -#if TEST_STD_VER >= 17 -#include -#endif - -#if TEST_STD_VER >= 20 -#include -#endif - class T; // incomplete class my_input_iterator @@ -94,59 +86,12 @@ class my_random_access_iterator friend bool operator>=(const Self&, const Self&); }; -#if TEST_STD_VER >= 20 -class my_contiguous_iterator -{ - struct tag : std::contiguous_iterator_tag {}; - typedef my_contiguous_iterator Self; - int *state_; -public: - typedef tag iterator_category; - typedef int value_type; - typedef int difference_type; - typedef int* pointer; - typedef int& reference; - typedef int element_type; // enable to_address via pointer_traits - - my_contiguous_iterator(); - reference operator*() const; - pointer operator->() const; - reference operator[](difference_type) const; - - Self& operator++(); - Self operator++(int); - Self& operator--(); - Self operator--(int); - friend Self& operator+=(Self&, difference_type); - friend Self& operator-=(Self&, difference_type); - friend Self operator+(Self, difference_type); - friend Self operator+(difference_type, Self); - friend Self operator-(Self, difference_type); - friend difference_type operator-(Self, Self); - friend bool operator==(const Self&, const Self&); - friend bool operator!=(const Self&, const Self&); - friend bool operator<(const Self&, const Self&); - friend bool operator>(const Self&, const Self&); - friend bool operator<=(const Self&, const Self&); - friend bool operator>=(const Self&, const Self&); -}; -#endif - struct fake_deque_iterator : std::deque::iterator { using element_type = int; }; static_assert(std::__has_random_access_iterator_category::value, ""); static_assert(!std::__libcpp_is_contiguous_iterator::value, ""); -#if TEST_STD_VER >= 20 -struct fake2_deque_iterator : std::deque::iterator { - using iterator_concept = std::contiguous_iterator_tag; - using element_type = int; -}; -static_assert(std::__has_random_access_iterator_category::value, ""); -static_assert(std::__libcpp_is_contiguous_iterator::value, ""); -#endif - int main(int, char**) { // basic tests @@ -158,9 +103,6 @@ int main(int, char**) static_assert((!std::__libcpp_is_contiguous_iterator::value), ""); static_assert((!std::__libcpp_is_contiguous_iterator::value), ""); -#if TEST_STD_VER >= 20 - static_assert(( std::__libcpp_is_contiguous_iterator::value), ""); -#endif // move_iterator changes value category, which makes it pretty sketchy to use in optimized codepaths static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); @@ -168,18 +110,12 @@ int main(int, char**) static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); -#if TEST_STD_VER >= 20 - static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); -#endif static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); -#if TEST_STD_VER >= 20 - static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); -#endif static_assert(( std::__libcpp_is_contiguous_iterator >::value), ""); static_assert(( std::__libcpp_is_contiguous_iterator >::value), ""); @@ -192,20 +128,12 @@ int main(int, char**) static_assert(( std::__libcpp_is_contiguous_iterator >::value), ""); static_assert(( std::__libcpp_is_contiguous_iterator > >::value), ""); -#if TEST_STD_VER >= 20 - static_assert(( std::__libcpp_is_contiguous_iterator >::value), ""); - static_assert(( std::__libcpp_is_contiguous_iterator > >::value), ""); -#endif - // iterators in the libc++ test suite static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); -#if TEST_STD_VER >= 20 - static_assert(( std::__libcpp_is_contiguous_iterator >::value), ""); -#endif static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); static_assert((!std::__libcpp_is_contiguous_iterator >::value), ""); @@ -244,22 +172,5 @@ int main(int, char**) static_assert((!std::__libcpp_is_contiguous_iterator::reverse_iterator> ::value), ""); static_assert((!std::__libcpp_is_contiguous_iterator::const_reverse_iterator> ::value), ""); -#if TEST_STD_VER >= 11 - static_assert(( std::__libcpp_is_contiguous_iterator::iterator> ::value), ""); - static_assert(( std::__libcpp_is_contiguous_iterator::const_iterator>::value), ""); -#endif - -#if TEST_STD_VER >= 17 - static_assert(( std::__libcpp_is_contiguous_iterator ::value), ""); - static_assert(( std::__libcpp_is_contiguous_iterator::value), ""); -#endif - -#if TEST_STD_VER >= 20 - static_assert(( std::__libcpp_is_contiguous_iterator::iterator> ::value), ""); - static_assert((!std::__libcpp_is_contiguous_iterator::reverse_iterator>::value), ""); - static_assert(( std::__libcpp_is_contiguous_iterator::iterator> ::value), ""); - static_assert((!std::__libcpp_is_contiguous_iterator::reverse_iterator>::value), ""); -#endif - return 0; } diff --git a/libcxx/test/libcxx-03/iterators/predef.iterators/insert.iterators/back.insert.iter.ops/get_container.pass.cpp b/libcxx/test/libcxx-03/iterators/predef.iterators/insert.iterators/back.insert.iter.ops/get_container.pass.cpp index 9f45848e9d3ff..f9b242ba5f14e 100644 --- a/libcxx/test/libcxx-03/iterators/predef.iterators/insert.iterators/back.insert.iter.ops/get_container.pass.cpp +++ b/libcxx/test/libcxx-03/iterators/predef.iterators/insert.iterators/back.insert.iter.ops/get_container.pass.cpp @@ -17,7 +17,6 @@ #include "test_macros.h" #include "nasty_containers.h" -#include "test_constexpr_container.h" template TEST_CONSTEXPR_CXX20 bool test(C c) { @@ -29,9 +28,6 @@ TEST_CONSTEXPR_CXX20 bool test(C c) { int main(int, char**) { test(std::vector()); test(nasty_vector()); -#if TEST_STD_VER >= 20 - test(ConstexprFixedCapacityDeque()); - static_assert(test(ConstexprFixedCapacityDeque())); -#endif + return 0; } diff --git a/libcxx/test/libcxx-03/iterators/unwrap_iter.pass.cpp b/libcxx/test/libcxx-03/iterators/unwrap_iter.pass.cpp index 8ef2be2b01074..d93c8094e0dc9 100644 --- a/libcxx/test/libcxx-03/iterators/unwrap_iter.pass.cpp +++ b/libcxx/test/libcxx-03/iterators/unwrap_iter.pass.cpp @@ -51,9 +51,6 @@ TEST_CONSTEXPR_CXX20 bool test() { int main(int, char**) { test(); -#if TEST_STD_VER > 17 - static_assert(test()); -#endif return 0; } diff --git a/libcxx/test/libcxx-03/language.support/support.dynamic/libcpp_deallocate.sh.cpp b/libcxx/test/libcxx-03/language.support/support.dynamic/libcpp_deallocate.sh.cpp index a9fe04fb0bcd5..e7824387ea9e5 100644 --- a/libcxx/test/libcxx-03/language.support/support.dynamic/libcpp_deallocate.sh.cpp +++ b/libcxx/test/libcxx-03/language.support/support.dynamic/libcpp_deallocate.sh.cpp @@ -200,13 +200,6 @@ void test_allocator_and_new_match() { stats.reset(); #elif defined(NO_SIZE) stats.reset(); -# if TEST_STD_VER >= 11 - { - int* x = DoNotOptimize(new int(42)); - delete x; - assert(stats.expect_plain()); - } -# endif stats.reset(); { AlignedType* a = DoNotOptimize(new AlignedType()); diff --git a/libcxx/test/libcxx-03/libcpp_alignof.pass.cpp b/libcxx/test/libcxx-03/libcpp_alignof.pass.cpp index 3ae7f7499d796..8d319d9ef2255 100644 --- a/libcxx/test/libcxx-03/libcpp_alignof.pass.cpp +++ b/libcxx/test/libcxx-03/libcpp_alignof.pass.cpp @@ -19,9 +19,6 @@ template void test() { static_assert(_LIBCPP_ALIGNOF(T) == std::alignment_of::value, ""); static_assert(_LIBCPP_ALIGNOF(T) == TEST_ALIGNOF(T), ""); -#if TEST_STD_VER >= 11 - static_assert(_LIBCPP_ALIGNOF(T) == alignof(T), ""); -#endif #ifdef TEST_COMPILER_CLANG static_assert(_LIBCPP_ALIGNOF(T) == _Alignof(T), ""); #endif diff --git a/libcxx/test/libcxx-03/memory/allocation_guard.pass.cpp b/libcxx/test/libcxx-03/memory/allocation_guard.pass.cpp index 5e71decdcabbd..ff6402e718e47 100644 --- a/libcxx/test/libcxx-03/memory/allocation_guard.pass.cpp +++ b/libcxx/test/libcxx-03/memory/allocation_guard.pass.cpp @@ -85,11 +85,7 @@ struct AssignableAllocator { TEST_CONSTEXPR_CXX20 void construct(pointer p, U&& val) { if (stats_ != nullptr) ++stats_->construct_count; -#if TEST_STD_VER > 17 - std::construct_at(std::to_address(p), std::forward(val)); -#else ::new (static_cast(p)) T(std::forward(val)); -#endif } TEST_CONSTEXPR_CXX14 void destroy(pointer p) { diff --git a/libcxx/test/libcxx-03/memory/swap_allocator.pass.cpp b/libcxx/test/libcxx-03/memory/swap_allocator.pass.cpp index 38dde7a1bf636..38fe778433171 100644 --- a/libcxx/test/libcxx-03/memory/swap_allocator.pass.cpp +++ b/libcxx/test/libcxx-03/memory/swap_allocator.pass.cpp @@ -58,24 +58,5 @@ int main(int, char**) { assert(a2.i == 42); } -#if TEST_STD_VER >= 11 - { - NoexceptSwapAlloc noexcept_alloc; - static_assert(noexcept(std::__swap_allocator(noexcept_alloc, noexcept_alloc)), ""); - } - -#if TEST_STD_VER > 11 - { // From C++14, `__swap_allocator` is unconditionally noexcept. - ThrowingSwapAlloc throwing_alloc; - static_assert(noexcept(std::__swap_allocator(throwing_alloc, throwing_alloc)), ""); - } -#else - { // Until C++14, `__swap_allocator` is only noexcept if the underlying `swap` function is `noexcept`. - ThrowingSwapAlloc throwing_alloc; - static_assert(!noexcept(std::__swap_allocator(throwing_alloc, throwing_alloc)), ""); - } -#endif // TEST_STD_VER > 11 -#endif // TEST_STD_VER >= 11 - return 0; } diff --git a/libcxx/test/libcxx-03/numerics/bit.ops.pass.cpp b/libcxx/test/libcxx-03/numerics/bit.ops.pass.cpp index 0b82f352ffe3d..1f208116739da 100644 --- a/libcxx/test/libcxx-03/numerics/bit.ops.pass.cpp +++ b/libcxx/test/libcxx-03/numerics/bit.ops.pass.cpp @@ -29,9 +29,6 @@ TEST_CONSTEXPR_CXX14 bool test() { int main(int, char**) { test(); -#if TEST_STD_VER > 11 - static_assert(test(), ""); -#endif return 0; } diff --git a/libcxx/test/libcxx-03/strings/basic.string/string.capacity/max_size.pass.cpp b/libcxx/test/libcxx-03/strings/basic.string/string.capacity/max_size.pass.cpp index 6bfcb5d4bfcd8..73825ef4845f2 100644 --- a/libcxx/test/libcxx-03/strings/basic.string/string.capacity/max_size.pass.cpp +++ b/libcxx/test/libcxx-03/strings/basic.string/string.capacity/max_size.pass.cpp @@ -112,9 +112,6 @@ TEST_CONSTEXPR_CXX20 bool test() { int main(int, char**) { test(); -#if TEST_STD_VER > 17 - static_assert(test()); -#endif return 0; } diff --git a/libcxx/test/libcxx-03/strings/basic.string/string.cons/copy_shrunk_long.pass.cpp b/libcxx/test/libcxx-03/strings/basic.string/string.cons/copy_shrunk_long.pass.cpp index d4a0b318f36d7..309c84bdffb09 100644 --- a/libcxx/test/libcxx-03/strings/basic.string/string.cons/copy_shrunk_long.pass.cpp +++ b/libcxx/test/libcxx-03/strings/basic.string/string.cons/copy_shrunk_long.pass.cpp @@ -15,7 +15,6 @@ #include "test_macros.h" #include "test_allocator.h" -#include "min_allocator.h" template TEST_CONSTEXPR_CXX20 bool test() { @@ -33,13 +32,6 @@ TEST_CONSTEXPR_CXX20 bool test() { int main(int, char**) { test, test_allocator > >(); -#if TEST_STD_VER >= 11 - test, min_allocator>>(); -#endif -#if TEST_STD_VER > 17 - static_assert(test, test_allocator>>()); - static_assert(test, min_allocator>>()); -#endif return 0; } diff --git a/libcxx/test/libcxx-03/strings/c.strings/constexpr_memmove.pass.cpp b/libcxx/test/libcxx-03/strings/c.strings/constexpr_memmove.pass.cpp index d2ca5a2658524..6c7d194f512bc 100644 --- a/libcxx/test/libcxx-03/strings/c.strings/constexpr_memmove.pass.cpp +++ b/libcxx/test/libcxx-03/strings/c.strings/constexpr_memmove.pass.cpp @@ -148,8 +148,6 @@ TEST_CONSTEXPR_CXX14 bool test() { int main(int, char**) { test(); -#if TEST_STD_VER >= 14 - static_assert(test(), ""); -#endif + return 0; } diff --git a/libcxx/test/libcxx-03/type_traits/is_trivially_relocatable.compile.pass.cpp b/libcxx/test/libcxx-03/type_traits/is_trivially_relocatable.compile.pass.cpp index 09049f38497aa..b3880edb3015e 100644 --- a/libcxx/test/libcxx-03/type_traits/is_trivially_relocatable.compile.pass.cpp +++ b/libcxx/test/libcxx-03/type_traits/is_trivially_relocatable.compile.pass.cpp @@ -128,31 +128,11 @@ static_assert(!std::__libcpp_is_trivially_relocatable::value, ""); #endif -// expected -#if TEST_STD_VER >= 23 -static_assert(std::__libcpp_is_trivially_relocatable >::value); -static_assert(std::__libcpp_is_trivially_relocatable, int>>::value); -static_assert(std::__libcpp_is_trivially_relocatable>>::value); -static_assert(std::__libcpp_is_trivially_relocatable, std::unique_ptr>>::value); - -static_assert(!std::__libcpp_is_trivially_relocatable>::value); -static_assert(!std::__libcpp_is_trivially_relocatable>::value); -static_assert( - !std::__libcpp_is_trivially_relocatable>::value); -#endif - // locale #ifndef TEST_HAS_NO_LOCALIZATION static_assert(std::__libcpp_is_trivially_relocatable::value, ""); #endif -// optional -#if TEST_STD_VER >= 17 -static_assert(std::__libcpp_is_trivially_relocatable>::value, ""); -static_assert(!std::__libcpp_is_trivially_relocatable>::value, ""); -static_assert(std::__libcpp_is_trivially_relocatable>>::value, ""); -#endif // TEST_STD_VER >= 17 - // pair static_assert(std::__libcpp_is_trivially_relocatable >::value, ""); static_assert(!std::__libcpp_is_trivially_relocatable >::value, ""); @@ -165,23 +145,6 @@ static_assert(std::__libcpp_is_trivially_relocatable >::value, ""); -// tuple -#if TEST_STD_VER >= 11 -static_assert(std::__libcpp_is_trivially_relocatable >::value, ""); - -static_assert(std::__libcpp_is_trivially_relocatable >::value, ""); -static_assert(!std::__libcpp_is_trivially_relocatable >::value, ""); -static_assert(std::__libcpp_is_trivially_relocatable > >::value, ""); - -static_assert(std::__libcpp_is_trivially_relocatable >::value, ""); -static_assert(!std::__libcpp_is_trivially_relocatable >::value, ""); -static_assert(!std::__libcpp_is_trivially_relocatable >::value, ""); -static_assert(!std::__libcpp_is_trivially_relocatable >::value, - ""); -static_assert(std::__libcpp_is_trivially_relocatable, std::unique_ptr > >::value, - ""); -#endif // TEST_STD_VER >= 11 - // unique_ptr struct NotTriviallyRelocatableDeleter { NotTriviallyRelocatableDeleter(const NotTriviallyRelocatableDeleter&); @@ -215,21 +178,6 @@ static_assert(!std::__libcpp_is_trivially_relocatable >::value, ""); -// variant -#if TEST_STD_VER >= 17 -static_assert(std::__libcpp_is_trivially_relocatable >::value, ""); -static_assert(!std::__libcpp_is_trivially_relocatable >::value, ""); -static_assert(std::__libcpp_is_trivially_relocatable > >::value, ""); - -static_assert(std::__libcpp_is_trivially_relocatable >::value, ""); -static_assert(!std::__libcpp_is_trivially_relocatable >::value, ""); -static_assert(!std::__libcpp_is_trivially_relocatable >::value, ""); -static_assert(!std::__libcpp_is_trivially_relocatable >::value, - ""); -static_assert(std::__libcpp_is_trivially_relocatable, std::unique_ptr > >::value, - ""); -#endif // TEST_STD_VER >= 17 - // vector static_assert(std::__libcpp_is_trivially_relocatable >::value, ""); static_assert(std::__libcpp_is_trivially_relocatable >::value, ""); diff --git a/libcxx/test/libcxx-03/utilities/function.objects/func.require/bullet_1_2_3.pass.cpp b/libcxx/test/libcxx-03/utilities/function.objects/func.require/bullet_1_2_3.pass.cpp index 48460d1488fd7..a883112100e9d 100644 --- a/libcxx/test/libcxx-03/utilities/function.objects/func.require/bullet_1_2_3.pass.cpp +++ b/libcxx/test/libcxx-03/utilities/function.objects/func.require/bullet_1_2_3.pass.cpp @@ -90,43 +90,6 @@ struct MemFun03 { }; -#if TEST_STD_VER >= 11 - -//============================================================================== -// MemFun11 - C++11 reference qualified test member functions. -struct MemFun11 { - typedef void*& R; - typedef MemFun11 C; -#define F(...) \ - R f(__VA_ARGS__) & { return MethodID::setUncheckedCall(); } \ - R f(__VA_ARGS__) const & { return MethodID::setUncheckedCall(); } \ - R f(__VA_ARGS__) volatile & { return MethodID::setUncheckedCall(); } \ - R f(__VA_ARGS__) const volatile & { return MethodID::setUncheckedCall(); } \ - R f(__VA_ARGS__) && { return MethodID::setUncheckedCall(); } \ - R f(__VA_ARGS__) const && { return MethodID::setUncheckedCall(); } \ - R f(__VA_ARGS__) volatile && { return MethodID::setUncheckedCall(); } \ - R f(__VA_ARGS__) const volatile && { return MethodID::setUncheckedCall(); } -# - F() - F(...) - F(ArgType&&) - F(ArgType&&, ...) - F(ArgType&&, ArgType&&) - F(ArgType&&, ArgType&&, ...) - F(ArgType&&, ArgType&&, ArgType&&) - F(ArgType&&, ArgType&&, ArgType&&, ...) -#undef F -public: - MemFun11() {} -private: - MemFun11(MemFun11 const&); - MemFun11& operator=(MemFun11 const&); -}; - -#endif // TEST_STD_VER >= 11 - - - //============================================================================== // TestCase - A test case for a single member function. // ClassType - The type of the class being tested. @@ -167,10 +130,6 @@ struct TestCaseImp { runTestDispatchIf(NotRValue, tag, dref); runTestDispatchIf(NotRValue, tag, obj_ptr); runTestDispatchIf(NotRValue, tag, der_ptr); -#if TEST_STD_VER >= 11 - runTestDispatchIf(NotRValue, tag, rref); - runTestDispatchIf(NotRValue, tag, drref); -#endif } template @@ -242,61 +201,6 @@ struct TestCaseImp { template struct TestCase : public TestCaseImp {}; -#if TEST_STD_VER >= 11 -template -struct TestCase11 : public TestCaseImp {}; - -template -struct ReferenceWrapper { - using type = Type; - Type* ptr; - - static void fun(Type&) noexcept; - static void fun(Type&&) = delete; - - template ::value>::type> - constexpr ReferenceWrapper(Type2&& t) noexcept : ptr(&t) {} - - constexpr Type& get() const noexcept { return *ptr; } - constexpr operator Type&() const noexcept { return *ptr; } - - template - constexpr std::__invoke_result_t operator()(_ArgTypes&&... __args) const { - return std::__invoke(get(), std::forward<_ArgTypes>(__args)...); - } -}; - -template -struct DerivedFromRefWrap : public ReferenceWrapper { - constexpr DerivedFromRefWrap(Tp& tp) : ReferenceWrapper(tp) {} -}; - -TEST_CONSTEXPR_CXX14 bool test_derived_from_ref_wrap() { - int x = 42; - ReferenceWrapper r(x); - DerivedFromRefWrap d(x); - auto get_fn = &ReferenceWrapper::get; - auto& ret = std::__invoke(get_fn, r); - assert(&ret == &x); - auto& ret2 = std::__invoke(get_fn, d); - assert(&ret2 == &x); - - return true; -} - -TEST_CONSTEXPR_CXX20 bool test_reference_wrapper_reference_wrapper() { - int x = 42; - auto get_fn = &std::reference_wrapper::get; - std::reference_wrapper r(x); - std::reference_wrapper> r2(r); - auto& ret3 = std::__invoke(get_fn, r2); - assert(&ret3 == &x); - - return true; -} -#endif - int main(int, char**) { typedef void*& R; typedef ArgType A; @@ -333,73 +237,5 @@ int main(int, char**) { TestCase::run(); TestCase::run(); -#if TEST_STD_VER >= 11 - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - TestCase11::run(); - - test_derived_from_ref_wrap(); - test_reference_wrapper_reference_wrapper(); -#if TEST_STD_VER > 11 - static_assert(test_derived_from_ref_wrap(), ""); -#endif -#if TEST_STD_VER > 17 - static_assert(test_reference_wrapper_reference_wrapper(), ""); -#endif -#endif // TEST_STD_VER >= 11 - return 0; } diff --git a/libcxx/test/libcxx-03/utilities/function.objects/func.require/bullet_4_5_6.pass.cpp b/libcxx/test/libcxx-03/utilities/function.objects/func.require/bullet_4_5_6.pass.cpp index 0df59290824bb..3a8d141a7d3d5 100644 --- a/libcxx/test/libcxx-03/utilities/function.objects/func.require/bullet_4_5_6.pass.cpp +++ b/libcxx/test/libcxx-03/utilities/function.objects/func.require/bullet_4_5_6.pass.cpp @@ -85,10 +85,6 @@ struct TestCase { runTestDispatch(M, dref2, &dref2.object.object); runTestPropCVDispatch(M, obj_ptr, &obj_ptr->object); runTestPropCVDispatch(M, der_ptr, &der_ptr->object); -#if TEST_STD_VER >= 11 - runTestPropCVDispatch(M, rref, &(rref.get().object)); - runTestPropCVDispatch(M, drref, &(drref.get().object)); -#endif runTestNoPropDispatch(M, dref, &dref.object.object); } { @@ -100,10 +96,6 @@ struct TestCase { runTestDispatch(M, dref2, &dref2.object.object); runTestPropCVDispatch(M, obj_ptr, &obj_ptr->object); runTestPropCVDispatch(M, der_ptr, &der_ptr->object); -#if TEST_STD_VER >= 11 - runTestPropCVDispatch(M, rref, &(rref.get().object)); - runTestPropCVDispatch(M, drref, &(drref.get().object)); -#endif runTestNoPropDispatch(M, dref, &dref.object.object); } { @@ -115,10 +107,6 @@ struct TestCase { runTestDispatch(M, dref2, &dref2.object.object); runTestPropCVDispatch(M, obj_ptr, &obj_ptr->object); runTestPropCVDispatch(M, der_ptr, &der_ptr->object); -#if TEST_STD_VER >= 11 - runTestPropCVDispatch(M, rref, &(rref.get().object)); - runTestPropCVDispatch(M, drref, &(drref.get().object)); -#endif runTestNoPropDispatch(M, dref, &dref.object.object); } { @@ -130,10 +118,6 @@ struct TestCase { runTestDispatch(M, dref2, &dref2.object.object); runTestPropCVDispatch(M, obj_ptr, &obj_ptr->object); runTestPropCVDispatch(M, der_ptr, &der_ptr->object); -#if TEST_STD_VER >= 11 - runTestPropCVDispatch(M, rref, &(rref.get().object)); - runTestPropCVDispatch(M, drref, &(drref.get().object)); -#endif runTestNoPropDispatch(M, dref, &dref.object.object); } } @@ -144,12 +128,6 @@ struct TestCase { runTest (M, C_(obj), expect); runTest (M, C_(obj), expect); runTest(M, C_(obj), expect); -#if TEST_STD_VER >= 11 - runTest (M, C_(obj), expect); - runTest (M, C_(obj), expect); - runTest (M, C_(obj), expect); - runTest(M, C_(obj), expect); -#endif } template @@ -166,12 +144,6 @@ struct TestCase { runTest(M, C_(obj), expect); runTest(M, C_(obj), expect); runTest(M, C_(obj), expect); -#if TEST_STD_VER >= 11 - runTest(M, C_(obj), expect); - runTest(M, C_(obj), expect); - runTest(M, C_(obj), expect); - runTest(M, C_(obj), expect); -#endif } template @@ -184,11 +156,7 @@ struct TestCase { } template -#if TEST_STD_VER >= 11 - void runTest(Fn M, T&& obj, ObjectType* expect) { -#else void runTest(Fn M, T& obj, ObjectType* expect ) { -#endif { static_assert((std::is_same< decltype(std::__invoke(M, std::forward(obj))), Expect diff --git a/libcxx/test/libcxx-03/utilities/function.objects/func.require/bullet_7.pass.cpp b/libcxx/test/libcxx-03/utilities/function.objects/func.require/bullet_7.pass.cpp index fb789fa0a86cc..ba1c655da002f 100644 --- a/libcxx/test/libcxx-03/utilities/function.objects/func.require/bullet_7.pass.cpp +++ b/libcxx/test/libcxx-03/utilities/function.objects/func.require/bullet_7.pass.cpp @@ -115,49 +115,6 @@ struct Functor03 { }; -#if TEST_STD_VER >= 11 - -//============================================================================== -// freeFunction11 - A C++11 free function. -template -void*& freeFunction11(Args&&...) { - return FunctionPtrID::setUncheckedCall(); -} - -template -void*& freeFunction11(Args&&...,...) { - return FunctionPtrID::setUncheckedCall(); -} - -//============================================================================== -// Functor11 - C++11 reference qualified test member functions. -struct Functor11 { - typedef void*& R; - typedef Functor11 C; - -#define F(CV) \ - template \ - R operator()(Args&&...) CV { return MethodID::setUncheckedCall(); } -# - F(&) - F(const &) - F(volatile &) - F(const volatile &) - F(&&) - F(const &&) - F(volatile &&) - F(const volatile &&) -#undef F -public: - Functor11() {} -private: - Functor11(Functor11 const&); - Functor11& operator=(Functor11 const&); -}; - -#endif // TEST_STD_VER >= 11 - - //============================================================================== // TestCaseFunctorImp - A test case for an operator() class method. // ClassType - The type of the call object. @@ -207,19 +164,9 @@ struct TestCaseFreeFunction { //============================================================================== // runTest Helpers //============================================================================== -#if TEST_STD_VER >= 11 -template -void runFunctionTestCase11() { - TestCaseFreeFunction(); -} -#endif - template void runFunctionTestCase() { TestCaseFreeFunction(); -#if TEST_STD_VER >= 11 - runFunctionTestCase11(); -#endif } template @@ -232,14 +179,6 @@ void runFunctorTestCase() { TestCaseFunctorImp::run(); } -#if TEST_STD_VER >= 11 -// runTestCase - Run a test case for C++11 class functor types -template -void runFunctorTestCase11() { - TestCaseFunctorImp::run(); -} -#endif - // runTestCase - Run a test case for both function and functor types. template void runTestCase() { @@ -265,11 +204,6 @@ int main(int, char**) { runFunctionTestCase(); runFunctionTestCase(); -#if TEST_STD_VER >= 11 - runFunctionTestCase11(); - runFunctionTestCase11(); -#endif - runFunctorTestCase(); runFunctorTestCase(); runFunctorTestCase(); @@ -302,27 +236,5 @@ int main(int, char**) { runFunctorTestCase(); } -#if TEST_STD_VER >= 11 - runFunctorTestCase11(); - runFunctorTestCase11(); - runFunctorTestCase11(); - runFunctorTestCase11(); - runFunctorTestCase11(); - runFunctorTestCase11(); - runFunctorTestCase11(); - runFunctorTestCase11(); - { - typedef MoveCaster MC; - runFunctorTestCase11(); - runFunctorTestCase11(); - runFunctorTestCase11(); - runFunctorTestCase11(); - runFunctorTestCase11(); - runFunctorTestCase11(); - runFunctorTestCase11(); - runFunctorTestCase11(); - } -#endif - return 0; } diff --git a/libcxx/test/libcxx-03/utilities/function.objects/func.require/invoke.pass.cpp b/libcxx/test/libcxx-03/utilities/function.objects/func.require/invoke.pass.cpp index e534553a87f04..bfcea06e944ad 100644 --- a/libcxx/test/libcxx-03/utilities/function.objects/func.require/invoke.pass.cpp +++ b/libcxx/test/libcxx-03/utilities/function.objects/func.require/invoke.pass.cpp @@ -23,24 +23,12 @@ struct Type { Array::type& f1(); Array::type& f2() const; -#if TEST_STD_VER >= 11 - Array::type& g1() &; - Array::type& g2() const &; - Array::type& g3() &&; - Array::type& g4() const &&; -#endif }; int main(int, char**) { static_assert(sizeof(std::__invoke(&Type::f1, std::declval())) == 1, ""); static_assert(sizeof(std::__invoke(&Type::f2, std::declval())) == 2, ""); -#if TEST_STD_VER >= 11 - static_assert(sizeof(std::__invoke(&Type::g1, std::declval())) == 1, ""); - static_assert(sizeof(std::__invoke(&Type::g2, std::declval())) == 2, ""); - static_assert(sizeof(std::__invoke(&Type::g3, std::declval())) == 3, ""); - static_assert(sizeof(std::__invoke(&Type::g4, std::declval())) == 4, ""); -#endif return 0; } diff --git a/libcxx/test/libcxx-03/utilities/function.objects/func.require/invoke_helpers.h b/libcxx/test/libcxx-03/utilities/function.objects/func.require/invoke_helpers.h index f6f418b51c489..bebb9f4685e44 100644 --- a/libcxx/test/libcxx-03/utilities/function.objects/func.require/invoke_helpers.h +++ b/libcxx/test/libcxx-03/utilities/function.objects/func.require/invoke_helpers.h @@ -52,13 +52,7 @@ struct Caster { struct apply { typedef typename std::remove_reference::type RawType; typedef typename QualTag::template apply::type CVType; -#if TEST_STD_VER >= 11 - typedef typename std::conditional::type type; -#else typedef CVType& type; -#endif }; template @@ -165,21 +159,10 @@ struct DerefPropType { template explicit DerefPropType(Up const& val) : object(val) {} -#if TEST_STD_VER < 11 To& operator*() { return object; } To const& operator*() const { return object; } To volatile& operator*() volatile { return object; } To const volatile& operator*() const volatile { return object; } -#else - To& operator*() & { return object; } - To const& operator*() const & { return object; } - To volatile& operator*() volatile & { return object; } - To const volatile& operator*() const volatile & { return object; } - To&& operator*() && { return static_cast(object); } - To const&& operator*() const && { return static_cast(object); } - To volatile&& operator*() volatile && { return static_cast(object); } - To const volatile&& operator*() const volatile && { return static_cast(object); } -#endif }; //============================================================================== diff --git a/libcxx/test/libcxx-03/utilities/is_pointer_in_range.pass.cpp b/libcxx/test/libcxx-03/utilities/is_pointer_in_range.pass.cpp index 6c60147adfdf4..bb33ee47e1d5b 100644 --- a/libcxx/test/libcxx-03/utilities/is_pointer_in_range.pass.cpp +++ b/libcxx/test/libcxx-03/utilities/is_pointer_in_range.pass.cpp @@ -18,20 +18,6 @@ TEST_CONSTEXPR_CXX14 void test_cv_quals() { assert(!std::__is_pointer_in_range(&i, &i, &i)); assert(std::__is_pointer_in_range(&i, &i + 1, &i)); assert(!std::__is_pointer_in_range(&i, &i + 1, &j)); - -#if TEST_STD_VER >= 20 - { - T* arr1 = new int[4]{1, 2, 3, 4}; - U* arr2 = new int[4]{5, 6, 7, 8}; - - assert(!std::__is_pointer_in_range(arr1, arr1 + 4, arr2)); - assert(std::__is_pointer_in_range(arr1, arr1 + 4, arr1 + 3)); - assert(!std::__is_pointer_in_range(arr1, arr1, arr1 + 3)); - - delete[] arr1; - delete[] arr2; - } -#endif } TEST_CONSTEXPR_CXX14 bool test() { @@ -48,9 +34,6 @@ TEST_CONSTEXPR_CXX14 bool test() { int main(int, char**) { test(); -#if TEST_STD_VER >= 14 - static_assert(test(), ""); -#endif return 0; } diff --git a/libcxx/test/libcxx-03/utilities/is_valid_range.pass.cpp b/libcxx/test/libcxx-03/utilities/is_valid_range.pass.cpp index 9ad1e89f98187..ef645d4f1222e 100644 --- a/libcxx/test/libcxx-03/utilities/is_valid_range.pass.cpp +++ b/libcxx/test/libcxx-03/utilities/is_valid_range.pass.cpp @@ -39,14 +39,6 @@ TEST_CONSTEXPR_CXX14 void check_type() { assert(!std::__is_valid_range(static_cast(&arr[1]), static_cast(&arr[0]))); assert(!std::__is_valid_range(static_cast(&arr[2]), static_cast(&arr[0]))); } - -#if TEST_STD_VER >= 20 - { - T* arr = new int[4]{1, 2, 3, 4}; - assert(std::__is_valid_range(static_cast(arr), static_cast(arr + 4))); - delete[] arr; - } -#endif } TEST_CONSTEXPR_CXX14 bool test() { @@ -60,9 +52,6 @@ TEST_CONSTEXPR_CXX14 bool test() { int main(int, char**) { test(); -#if TEST_STD_VER >= 14 - static_assert(test(), ""); -#endif return 0; } diff --git a/libcxx/test/libcxx-03/utilities/memory/pointer.conversion/to_address.pass.cpp b/libcxx/test/libcxx-03/utilities/memory/pointer.conversion/to_address.pass.cpp index 60ef98ae905d0..f6df5db40aaa7 100644 --- a/libcxx/test/libcxx-03/utilities/memory/pointer.conversion/to_address.pass.cpp +++ b/libcxx/test/libcxx-03/utilities/memory/pointer.conversion/to_address.pass.cpp @@ -152,8 +152,6 @@ TEST_CONSTEXPR_CXX14 bool test() { int main(int, char**) { test(); -#if TEST_STD_VER >= 14 - static_assert(test(), ""); -#endif + return 0; } diff --git a/libcxx/test/libcxx-03/utilities/memory/pointer.conversion/to_address_std_iterators.pass.cpp b/libcxx/test/libcxx-03/utilities/memory/pointer.conversion/to_address_std_iterators.pass.cpp index 5eed12d19c072..3b7527d8d6523 100644 --- a/libcxx/test/libcxx-03/utilities/memory/pointer.conversion/to_address_std_iterators.pass.cpp +++ b/libcxx/test/libcxx-03/utilities/memory/pointer.conversion/to_address_std_iterators.pass.cpp @@ -44,12 +44,6 @@ int main(int, char**) { test_container_iterators(std::array()); test_container_iterators(std::vector(3)); test_container_iterators(std::string("abc")); -#if TEST_STD_VER >= 17 - test_container_iterators(std::string_view("abc")); -#endif -#if TEST_STD_VER >= 20 - test_container_iterators(std::span("abc")); -#endif test_valarray_iterators(); return 0; diff --git a/libcxx/test/libcxx-03/utilities/meta/is_referenceable.compile.pass.cpp b/libcxx/test/libcxx-03/utilities/meta/is_referenceable.compile.pass.cpp index f39d1a5da41af..a079bbe9fcc98 100644 --- a/libcxx/test/libcxx-03/utilities/meta/is_referenceable.compile.pass.cpp +++ b/libcxx/test/libcxx-03/utilities/meta/is_referenceable.compile.pass.cpp @@ -33,10 +33,6 @@ static_assert((std::__libcpp_is_referenceable::value), ""); static_assert((std::__libcpp_is_referenceable::value), ""); static_assert((std::__libcpp_is_referenceable::value), ""); static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -#endif static_assert((std::__libcpp_is_referenceable::value), ""); static_assert((std::__libcpp_is_referenceable::value), ""); @@ -45,146 +41,42 @@ static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -#endif static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -#endif static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -#endif static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -#endif static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -#endif static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -#endif static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -#endif static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -static_assert((!std::__libcpp_is_referenceable::value), ""); -#endif // member functions with or without cv-qualifiers are referenceable static_assert((std::__libcpp_is_referenceable::value), ""); static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -#endif static_assert((std::__libcpp_is_referenceable::value), ""); static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -#endif static_assert((std::__libcpp_is_referenceable::value), ""); static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -#endif static_assert((std::__libcpp_is_referenceable::value), ""); static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -#endif static_assert((std::__libcpp_is_referenceable::value), ""); static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -#endif static_assert((std::__libcpp_is_referenceable::value), ""); static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -#endif static_assert((std::__libcpp_is_referenceable::value), ""); static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -#endif static_assert((std::__libcpp_is_referenceable::value), ""); static_assert((std::__libcpp_is_referenceable::value), ""); -#if TEST_STD_VER >= 11 -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -static_assert((std::__libcpp_is_referenceable::value), ""); -#endif diff --git a/libcxx/test/libcxx-03/utilities/no_destroy.pass.cpp b/libcxx/test/libcxx-03/utilities/no_destroy.pass.cpp index 561654d7f31fb..c79d20838d86f 100644 --- a/libcxx/test/libcxx-03/utilities/no_destroy.pass.cpp +++ b/libcxx/test/libcxx-03/utilities/no_destroy.pass.cpp @@ -11,11 +11,6 @@ #include "test_macros.h" -#if TEST_STD_VER > 17 -// Test constexpr-constructibility. -constinit std::__no_destroy nd_int_const(std::__uninitialized_tag{}); -#endif - struct DestroyLast { ~DestroyLast() { assert(*ptr == 5); } diff --git a/libcxx/test/libcxx-03/utilities/utility/pairs/pairs.pair/abi.non_trivial_copy_move.pass.cpp b/libcxx/test/libcxx-03/utilities/utility/pairs/pairs.pair/abi.non_trivial_copy_move.pass.cpp index 1f5dae1232e37..d961884398920 100644 --- a/libcxx/test/libcxx-03/utilities/utility/pairs/pairs.pair/abi.non_trivial_copy_move.pass.cpp +++ b/libcxx/test/libcxx-03/utilities/utility/pairs/pairs.pair/abi.non_trivial_copy_move.pass.cpp @@ -31,49 +31,8 @@ template struct HasNonTrivialABI : std::integral_constant::value || (std::is_copy_constructible::value && !std::is_trivially_copy_constructible::value) -#if TEST_STD_VER >= 11 - || (std::is_move_constructible::value && !std::is_trivially_move_constructible::value) -#endif > {}; -#if TEST_STD_VER >= 11 -struct NonTrivialDtor { - NonTrivialDtor(NonTrivialDtor const&) = default; - ~NonTrivialDtor(); -}; -NonTrivialDtor::~NonTrivialDtor() {} -static_assert(HasNonTrivialABI::value, ""); - -struct NonTrivialCopy { - NonTrivialCopy(NonTrivialCopy const&); -}; -NonTrivialCopy::NonTrivialCopy(NonTrivialCopy const&) {} -static_assert(HasNonTrivialABI::value, ""); - -struct NonTrivialMove { - NonTrivialMove(NonTrivialMove const&) = default; - NonTrivialMove(NonTrivialMove&&); -}; -NonTrivialMove::NonTrivialMove(NonTrivialMove&&) {} -static_assert(HasNonTrivialABI::value, ""); - -struct DeletedCopy { - DeletedCopy(DeletedCopy const&) = delete; - DeletedCopy(DeletedCopy&&) = default; -}; -static_assert(!HasNonTrivialABI::value, ""); - -struct TrivialMove { - TrivialMove(TrivialMove &&) = default; -}; -static_assert(!HasNonTrivialABI::value, ""); - -struct Trivial { - Trivial(Trivial const&) = default; -}; -static_assert(!HasNonTrivialABI::value, ""); -#endif - void test_trivial() { @@ -82,62 +41,6 @@ void test_trivial() static_assert(std::is_copy_constructible

::value, ""); static_assert(HasNonTrivialABI

::value, ""); } -#if TEST_STD_VER >= 11 - { - typedef std::pair P; - static_assert(std::is_move_constructible

::value, ""); - static_assert(HasNonTrivialABI

::value, ""); - } - { - using P = std::pair; - static_assert(!std::is_trivially_destructible

::value, ""); - static_assert(std::is_copy_constructible

::value, ""); - static_assert(!std::is_trivially_copy_constructible

::value, ""); - static_assert(std::is_move_constructible

::value, ""); - static_assert(!std::is_trivially_move_constructible

::value, ""); - static_assert(HasNonTrivialABI

::value, ""); - } - { - using P = std::pair; - static_assert(std::is_copy_constructible

::value, ""); - static_assert(!std::is_trivially_copy_constructible

::value, ""); - static_assert(std::is_move_constructible

::value, ""); - static_assert(!std::is_trivially_move_constructible

::value, ""); - static_assert(HasNonTrivialABI

::value, ""); - } - { - using P = std::pair; - static_assert(std::is_copy_constructible

::value, ""); - static_assert(!std::is_trivially_copy_constructible

::value, ""); - static_assert(std::is_move_constructible

::value, ""); - static_assert(!std::is_trivially_move_constructible

::value, ""); - static_assert(HasNonTrivialABI

::value, ""); - } - { - using P = std::pair; - static_assert(!std::is_copy_constructible

::value, ""); - static_assert(!std::is_trivially_copy_constructible

::value, ""); - static_assert(std::is_move_constructible

::value, ""); - static_assert(!std::is_trivially_move_constructible

::value, ""); - static_assert(HasNonTrivialABI

::value, ""); - } - { - using P = std::pair; - static_assert(std::is_copy_constructible

::value, ""); - static_assert(!std::is_trivially_copy_constructible

::value, ""); - static_assert(std::is_move_constructible

::value, ""); - static_assert(!std::is_trivially_move_constructible

::value, ""); - static_assert(HasNonTrivialABI

::value, ""); - } - { - using P = std::pair; - static_assert(!std::is_copy_constructible

::value, ""); - static_assert(!std::is_trivially_copy_constructible

::value, ""); - static_assert(std::is_move_constructible

::value, ""); - static_assert(!std::is_trivially_move_constructible

::value, ""); - static_assert(HasNonTrivialABI

::value, ""); - } -#endif } void test_layout() { diff --git a/libcxx/test/libcxx-03/utilities/utility/pairs/pairs.pair/abi.trivial_copy_move.pass.cpp b/libcxx/test/libcxx-03/utilities/utility/pairs/pairs.pair/abi.trivial_copy_move.pass.cpp index 3ec60c08b8eab..1dafb10ce1f42 100644 --- a/libcxx/test/libcxx-03/utilities/utility/pairs/pairs.pair/abi.trivial_copy_move.pass.cpp +++ b/libcxx/test/libcxx-03/utilities/utility/pairs/pairs.pair/abi.trivial_copy_move.pass.cpp @@ -28,49 +28,8 @@ template struct HasTrivialABI : std::integral_constant::value && (!std::is_copy_constructible::value || std::is_trivially_copy_constructible::value) -#if TEST_STD_VER >= 11 - && (!std::is_move_constructible::value || std::is_trivially_move_constructible::value) -#endif > {}; -#if TEST_STD_VER >= 11 -struct NonTrivialDtor { - NonTrivialDtor(NonTrivialDtor const&) = default; - ~NonTrivialDtor(); -}; -NonTrivialDtor::~NonTrivialDtor() {} -static_assert(!HasTrivialABI::value, ""); - -struct NonTrivialCopy { - NonTrivialCopy(NonTrivialCopy const&); -}; -NonTrivialCopy::NonTrivialCopy(NonTrivialCopy const&) {} -static_assert(!HasTrivialABI::value, ""); - -struct NonTrivialMove { - NonTrivialMove(NonTrivialMove const&) = default; - NonTrivialMove(NonTrivialMove&&); -}; -NonTrivialMove::NonTrivialMove(NonTrivialMove&&) {} -static_assert(!HasTrivialABI::value, ""); - -struct DeletedCopy { - DeletedCopy(DeletedCopy const&) = delete; - DeletedCopy(DeletedCopy&&) = default; -}; -static_assert(HasTrivialABI::value, ""); - -struct TrivialMove { - TrivialMove(TrivialMove &&) = default; -}; -static_assert(HasTrivialABI::value, ""); - -struct Trivial { - Trivial(Trivial const&) = default; -}; -static_assert(HasTrivialABI::value, ""); -#endif - struct TrivialNoAssignment { int arr[4]; TrivialNoAssignment& operator=(const TrivialNoAssignment&) = delete; @@ -90,78 +49,14 @@ void test_trivial() static_assert(std::is_copy_constructible

::value, ""); static_assert(HasTrivialABI

::value, ""); } -#if TEST_STD_VER >= 11 - { - typedef std::pair P; - static_assert(std::is_move_constructible

::value, ""); - static_assert(HasTrivialABI

::value, ""); - } - { - using P = std::pair; - static_assert(!std::is_trivially_destructible

::value, ""); - static_assert(std::is_copy_constructible

::value, ""); - static_assert(!std::is_trivially_copy_constructible

::value, ""); - static_assert(std::is_move_constructible

::value, ""); - static_assert(!std::is_trivially_move_constructible

::value, ""); - static_assert(!HasTrivialABI

::value, ""); - } - { - using P = std::pair; - static_assert(std::is_copy_constructible

::value, ""); - static_assert(!std::is_trivially_copy_constructible

::value, ""); - static_assert(std::is_move_constructible

::value, ""); - static_assert(!std::is_trivially_move_constructible

::value, ""); - static_assert(!HasTrivialABI

::value, ""); - } - { - using P = std::pair; - static_assert(std::is_copy_constructible

::value, ""); - static_assert(std::is_trivially_copy_constructible

::value, ""); - static_assert(std::is_move_constructible

::value, ""); - static_assert(!std::is_trivially_move_constructible

::value, ""); - static_assert(!HasTrivialABI

::value, ""); - } - { - using P = std::pair; - static_assert(!std::is_copy_constructible

::value, ""); - static_assert(!std::is_trivially_copy_constructible

::value, ""); - static_assert(std::is_move_constructible

::value, ""); - static_assert(std::is_trivially_move_constructible

::value, ""); - static_assert(HasTrivialABI

::value, ""); - } - { - using P = std::pair; - static_assert(std::is_copy_constructible

::value, ""); - static_assert(std::is_trivially_copy_constructible

::value, ""); - static_assert(std::is_move_constructible

::value, ""); - static_assert(std::is_trivially_move_constructible

::value, ""); - static_assert(HasTrivialABI

::value, ""); - } - { - using P = std::pair; - static_assert(!std::is_copy_constructible

::value, ""); - static_assert(!std::is_trivially_copy_constructible

::value, ""); - static_assert(std::is_move_constructible

::value, ""); - static_assert(std::is_trivially_move_constructible

::value, ""); - static_assert(HasTrivialABI

::value, ""); - } -#endif { using P = std::pair; static_assert(std::is_trivially_copy_constructible

::value, ""); static_assert(std::is_trivially_move_constructible

::value, ""); -#if TEST_STD_VER >= 11 // This is https://llvm.org/PR90605 - static_assert(!std::is_trivially_copy_assignable

::value, ""); - static_assert(!std::is_trivially_move_assignable

::value, ""); -#endif // TEST_STD_VER >= 11 static_assert(std::is_trivially_destructible

::value, ""); } { using P = std::pair; -#if TEST_STD_VER >= 11 - static_assert(!std::is_trivially_copy_constructible

::value, ""); - static_assert(!std::is_trivially_move_constructible

::value, ""); -#endif // TEST_STD_VER >= 11 static_assert(!std::is_trivially_copy_assignable

::value, ""); static_assert(!std::is_trivially_move_assignable

::value, ""); static_assert(std::is_trivially_destructible

::value, ""); diff --git a/libcxx/test/libcxx-03/utilities/utility/pairs/pairs.pair/abi.trivially_copyable.compile.pass.cpp b/libcxx/test/libcxx-03/utilities/utility/pairs/pairs.pair/abi.trivially_copyable.compile.pass.cpp index 1132b3e5def18..7bd1adca0d5e0 100644 --- a/libcxx/test/libcxx-03/utilities/utility/pairs/pairs.pair/abi.trivially_copyable.compile.pass.cpp +++ b/libcxx/test/libcxx-03/utilities/utility/pairs/pairs.pair/abi.trivially_copyable.compile.pass.cpp @@ -52,13 +52,8 @@ static_assert(!std::is_trivially_copyable >::value, ""); static_assert(!std::is_trivially_copyable >::value, ""); static_assert(!std::is_trivially_copyable, int> >::value, ""); static_assert(!std::is_trivially_copyable >::value, ""); -#if TEST_STD_VER == 03 // Known ABI difference static_assert(!std::is_trivially_copyable >::value, ""); static_assert(!std::is_trivially_copyable >::value, ""); -#else -static_assert(std::is_trivially_copyable >::value, ""); -static_assert(std::is_trivially_copyable >::value, ""); -#endif static_assert(!std::is_trivially_copyable >::value, ""); static_assert(std::is_trivially_copy_constructible >::value, ""); diff --git a/libcxx/test/libcxx/containers/views/mdspan/extents/assert.obs.pass.cpp b/libcxx/test/libcxx/containers/views/mdspan/extents/assert.obs.pass.cpp index c473879d87b71..e32c0a96c1261 100644 --- a/libcxx/test/libcxx/containers/views/mdspan/extents/assert.obs.pass.cpp +++ b/libcxx/test/libcxx/containers/views/mdspan/extents/assert.obs.pass.cpp @@ -35,28 +35,28 @@ int main(int, char**) { // mismatch of static extent { std::extents e; - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.extent(0); }()), "extents access: index must be less than rank"); - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.static_extent(0); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.extent(0); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.static_extent(0); }()), "extents access: index must be less than rank"); } { std::extents e; - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.extent(2); }()), "extents access: index must be less than rank"); - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.static_extent(2); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.extent(2); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.static_extent(2); }()), "extents access: index must be less than rank"); } { std::extents e; - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.extent(2); }()), "extents access: index must be less than rank"); - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.static_extent(2); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.extent(2); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.static_extent(2); }()), "extents access: index must be less than rank"); } { std::extents e; - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.extent(2); }()), "extents access: index must be less than rank"); - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.static_extent(2); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.extent(2); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.static_extent(2); }()), "extents access: index must be less than rank"); } { std::extents e; - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.extent(9); }()), "extents access: index must be less than rank"); - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.static_extent(9); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.extent(9); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.static_extent(9); }()), "extents access: index must be less than rank"); } // check that static_extent works in constant expression with assertions enabled diff --git a/libcxx/test/libcxx/containers/views/mdspan/nodiscard.verify.cpp b/libcxx/test/libcxx/containers/views/mdspan/nodiscard.verify.cpp new file mode 100644 index 0000000000000..71f53f8f1f737 --- /dev/null +++ b/libcxx/test/libcxx/containers/views/mdspan/nodiscard.verify.cpp @@ -0,0 +1,62 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// REQUIRES: std-at-least-c++23 + +// + +// Check that functions are marked [[nodiscard]] + +#include +#include +#include + +void test() { + // mdspan<> + + std::array data; + std::mdspan> mdsp{data.data(), 2, 2}; + + mdsp[0, 1]; // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::array arr{0, 1}; + mdsp[arr]; // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::span sp{arr}; + mdsp[sp]; // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + mdsp.rank(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.rank_dynamic(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.static_extent(0); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.extent(0); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + mdsp.extents(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.data_handle(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.mapping(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.accessor(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + mdsp.is_always_unique(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.is_always_exhaustive(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.is_always_strided(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.is_unique(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.is_exhaustive(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.is_strided(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.stride(0); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + // Helpers + + std::extents ex; + ex.rank(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + ex.rank_dynamic(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + ex.static_extent(0); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + ex.extent(0); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + std::dextents dex; + dex.rank(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + dex.rank_dynamic(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + dex.static_extent(0); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + dex.extent(0); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} +} diff --git a/libcxx/test/libcxx/diagnostics/deque.nodiscard.verify.cpp b/libcxx/test/libcxx/diagnostics/deque.nodiscard.verify.cpp index e8dda09567613..a9adb1757b8ef 100644 --- a/libcxx/test/libcxx/diagnostics/deque.nodiscard.verify.cpp +++ b/libcxx/test/libcxx/diagnostics/deque.nodiscard.verify.cpp @@ -13,6 +13,32 @@ #include void test() { - std::deque deque; - deque.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::deque d; + const std::deque cd; + + d.begin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cd.begin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + d.end(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cd.end(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + d.rbegin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cd.rbegin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + d.rend(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cd.rend(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cd.cbegin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cd.cend(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cd.crbegin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cd.crend(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + d.size(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + d.max_size(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + d.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + d[0]; // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cd[0]; // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + d.at(0); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cd.at(0); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + d.front(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cd.front(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + d.back(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cd.back(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} } diff --git a/libcxx/test/libcxx/diagnostics/flat_map.nodiscard.verify.cpp b/libcxx/test/libcxx/diagnostics/flat_map.nodiscard.verify.cpp index 79b943b790d04..7d75083157aef 100644 --- a/libcxx/test/libcxx/diagnostics/flat_map.nodiscard.verify.cpp +++ b/libcxx/test/libcxx/diagnostics/flat_map.nodiscard.verify.cpp @@ -6,15 +6,107 @@ // //===----------------------------------------------------------------------===// -// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20 +// REQUIRES: std-at-least-c++23 // -// [[nodiscard]] bool empty() const noexcept; +// Check that functions are marked [[nodiscard]] #include +#include -void f() { - std::flat_map c; - c.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} +template +struct TransparentKey { + T t; + + constexpr explicit operator T() const { return t; } +}; + +struct TransparentCompare { + using is_transparent = void; // This makes the comparator transparent + + template + constexpr bool operator()(const T& t, const TransparentKey& transparent) const { + return t < transparent.t; + } + + template + constexpr bool operator()(const TransparentKey& transparent, const T& t) const { + return transparent.t < t; + } + + template + constexpr bool operator()(const T& t1, const T& t2) const { + return t1 < t2; + } +}; + +void test() { + std::flat_map fm; + const std::flat_map cfm{}; + + fm.begin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.begin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.end(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.end(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.rbegin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.rbegin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.rend(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.rend(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.cbegin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.cend(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.crbegin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.crend(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.size(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.max_size(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + int key = 0; + TransparentKey tkey; + + std::flat_map nfm; + nfm[key]; // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm[std::move(key)]; // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm[std::move(tkey)]; // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.at(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.at(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.at(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.at(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + std::move(fm).extract(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.key_comp(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.value_comp(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.keys(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.values(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.find(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.find(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.find(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.find(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.count(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.count(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.contains(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.contains(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.contains(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.contains(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.lower_bound(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.lower_bound(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.lower_bound(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.lower_bound(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.upper_bound(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.upper_bound(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.upper_bound(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.upper_bound(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.equal_range(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.equal_range(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.equal_range(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.equal_range(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} } diff --git a/libcxx/test/libcxx/diagnostics/flat_set.nodiscard.verify.cpp b/libcxx/test/libcxx/diagnostics/flat_set.nodiscard.verify.cpp index 161fe533eabac..89d60456a20c1 100644 --- a/libcxx/test/libcxx/diagnostics/flat_set.nodiscard.verify.cpp +++ b/libcxx/test/libcxx/diagnostics/flat_set.nodiscard.verify.cpp @@ -6,15 +6,93 @@ // //===----------------------------------------------------------------------===// -// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20 +// REQUIRES: std-at-least-c++23 // -// [[nodiscard]] bool empty() const noexcept; +// Check that functions are marked [[nodiscard]] #include +#include -void f() { - std::flat_set c; - c.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} +template +struct TransparentKey { + T t; + + constexpr explicit operator T() const { return t; } +}; + +struct TransparentCompare { + using is_transparent = void; // This makes the comparator transparent + + template + constexpr bool operator()(const T& t, const TransparentKey& transparent) const { + return t < transparent.t; + } + + template + constexpr bool operator()(const TransparentKey& transparent, const T& t) const { + return transparent.t < t; + } + + template + constexpr bool operator()(const T& t1, const T& t2) const { + return t1 < t2; + } +}; + +void test() { + std::flat_set fs; + const std::flat_set cfs; + + fs.begin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfs.begin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fs.end(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfs.end(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fs.rbegin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfs.rbegin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fs.rend(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfs.rend(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfs.cbegin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfs.cend(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfs.crbegin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfs.crend(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fs.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fs.size(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fs.max_size(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + std::move(fs).extract(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fs.key_comp(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fs.value_comp(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + int key = 0; + TransparentKey tkey; + + fs.find(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfs.find(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fs.find(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfs.find(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fs.count(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fs.count(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fs.contains(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fs.contains(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fs.lower_bound(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfs.lower_bound(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fs.lower_bound(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfs.lower_bound(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fs.upper_bound(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfs.upper_bound(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fs.upper_bound(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfs.upper_bound(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fs.equal_range(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfs.equal_range(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fs.equal_range(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfs.equal_range(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} } diff --git a/libcxx/test/libcxx/diagnostics/queue.nodiscard.verify.cpp b/libcxx/test/libcxx/diagnostics/queue.nodiscard.verify.cpp index 77d3367cc2f4a..da1f9ff3f01f6 100644 --- a/libcxx/test/libcxx/diagnostics/queue.nodiscard.verify.cpp +++ b/libcxx/test/libcxx/diagnostics/queue.nodiscard.verify.cpp @@ -12,12 +12,24 @@ #include -void test_queue() { - std::queue queue; - queue.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} -} +void test() { + { + std::queue q; + const std::queue cq{}; + + q.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + q.size(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + q.front(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cq.front(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + q.back(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cq.back(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } + + { + std::priority_queue pq; -void test_priority_queue() { - std::priority_queue priority_queue; - priority_queue.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + pq.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + pq.size(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + pq.top(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } } diff --git a/libcxx/test/libcxx/diagnostics/utility.nodiscard.verify.cpp b/libcxx/test/libcxx/diagnostics/utility.nodiscard.verify.cpp index 524be96736bad..2f5b3ba0fc642 100644 --- a/libcxx/test/libcxx/diagnostics/utility.nodiscard.verify.cpp +++ b/libcxx/test/libcxx/diagnostics/utility.nodiscard.verify.cpp @@ -10,8 +10,6 @@ // check that functions are marked [[nodiscard]] -// clang-format off - #include #include "test_macros.h" @@ -19,15 +17,33 @@ void test() { int i = 0; - std::forward(i); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} - std::forward(1); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} - std::move(i); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} - std::move_if_noexcept(i); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::forward(i); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::forward(1); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::move(i); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::move_if_noexcept(i); #if TEST_STD_VER >= 17 std::as_const(i); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} #endif +#if TEST_STD_VER >= 20 + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::cmp_equal(94, 82); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::cmp_not_equal(94, 82); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::cmp_less(94, 82); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::cmp_greater(94, 82); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::cmp_less_equal(94, 82); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::cmp_greater_equal(94, 82); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::in_range(49); +#endif + #if TEST_STD_VER >= 23 enum E { Apple, Orange } e = Apple; std::to_underlying(e); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} diff --git a/libcxx/test/libcxx/language.support/nodiscard.verify.cpp b/libcxx/test/libcxx/language.support/nodiscard.verify.cpp new file mode 100644 index 0000000000000..b87b04ad9f1ef --- /dev/null +++ b/libcxx/test/libcxx/language.support/nodiscard.verify.cpp @@ -0,0 +1,91 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++03 + +// Check that functions are marked [[nodiscard]] + +#include +#include +#include +#include + +#include "test_macros.h" + +void test() { +#if TEST_STD_VER >= 20 + { // + int x = 94; + int y = 82; + auto oRes = x <=> y; + + std::is_eq(oRes); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::is_neq(oRes); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::is_lt(oRes); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::is_lteq(oRes); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::is_gt(oRes); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::is_gteq(oRes); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } +#endif + +#if TEST_STD_VER >= 20 + { // + struct EmptyPromise { + } promise; + + { + std::coroutine_handle cr{}; + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cr.address(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::coroutine_handle::from_address(&promise); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cr.done(); + + std::hash> hash; + hash(cr); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } + { + std::coroutine_handle cr; + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::coroutine_handle::from_promise(promise); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cr.address(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::coroutine_handle::from_address(&promise); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cr.done(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cr.promise(); + } + { + std::coroutine_handle cr = std::noop_coroutine(); + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cr.done(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cr.promise(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cr.address(); + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::noop_coroutine(); + } + } +#endif + + { // + std::initializer_list il{94, 82, 49}; + + il.size(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + il.begin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + il.end(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } +} diff --git a/libcxx/test/libcxx/system_reserved_names.gen.py b/libcxx/test/libcxx/system_reserved_names.gen.py index d69182d68e0de..aaede220531d2 100644 --- a/libcxx/test/libcxx/system_reserved_names.gen.py +++ b/libcxx/test/libcxx/system_reserved_names.gen.py @@ -83,7 +83,7 @@ // Test that libc++ doesn't use names that collide with FreeBSD system macros. // newlib and picolibc also define these macros -#if !defined(__FreeBSD__) && !defined(_NEWLIB_VERSION) +#if !defined(__FreeBSD__) && !_LIBCPP_LIBC_NEWLIB # define __null_sentinel SYSTEM_RESERVED_NAME # define __generic SYSTEM_RESERVED_NAME #endif @@ -122,7 +122,7 @@ #endif // Newlib & picolibc use __input as a parameter name of a64l & l64a -#ifndef _NEWLIB_VERSION +#if !_LIBCPP_LIBC_NEWLIB # define __input SYSTEM_RESERVED_NAME #endif #define __output SYSTEM_RESERVED_NAME diff --git a/libcxx/test/libcxx/thread/nodiscard.verify.cpp b/libcxx/test/libcxx/thread/nodiscard.verify.cpp new file mode 100644 index 0000000000000..19e43f88db700 --- /dev/null +++ b/libcxx/test/libcxx/thread/nodiscard.verify.cpp @@ -0,0 +1,144 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++03 +// UNSUPPORTED: no-threads + +// Check that functions are marked [[nodiscard]] + +#include +#include +#include +#include +#include +#include + +#include "test_macros.h" + +const auto timePoint = std::chrono::steady_clock::now(); + +void test() { + // Threads + { + std::thread th; + + th.joinable(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + th.get_id(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + th.native_handle(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + th.hardware_concurrency(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } +#if TEST_STD_VER >= 20 + { + std::jthread jt; + + jt.joinable(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + jt.get_id(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + jt.native_handle(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + jt.get_stop_source(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + jt.get_stop_token(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + jt.hardware_concurrency(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } +#endif + + // Mutual exclusion + + { // + std::mutex m; + + m.try_lock(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + m.native_handle(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } + { + std::recursive_mutex m; + + m.try_lock(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + m.native_handle(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } + { + std::timed_mutex m; + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + m.try_lock(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + m.try_lock_for(std::chrono::nanoseconds{82}); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + m.try_lock_until(timePoint); + } + { + std::recursive_timed_mutex m; + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + m.try_lock(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + m.try_lock_for(std::chrono::nanoseconds{82}); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + m.try_lock_until(timePoint); + } + { + std::mutex m1; + std::mutex m2; + std::mutex m3; + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::try_lock(m1, m2); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::try_lock(m1, m2, m3); + } + + // Condition variables + + { // + std::condition_variable cv; + + cv.native_handle(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } + +#if TEST_STD_VER >= 20 + + // Semaphores + + { // + std::counting_semaphore<> cs{0}; + + cs.max(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cs.try_acquire_for(std::chrono::nanoseconds{82}); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cs.try_acquire(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cs.try_acquire_until(timePoint); + + std::binary_semaphore bs{0}; + + bs.max(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + bs.try_acquire_for(std::chrono::nanoseconds{82}); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + bs.try_acquire(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + bs.try_acquire_until(timePoint); + } + + // Latches and barriers + + { // + std::barrier<> b{94}; + + b.max(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } + { // + std::latch l{94}; + + l.max(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + l.try_wait(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } + +#endif +} diff --git a/libcxx/test/std/containers/container.adaptors/flat.map/flat.map.access/index_transparent.pass.cpp b/libcxx/test/std/containers/container.adaptors/flat.map/flat.map.access/index_transparent.pass.cpp index e8ea20b345e34..98629364654b6 100644 --- a/libcxx/test/std/containers/container.adaptors/flat.map/flat.map.access/index_transparent.pass.cpp +++ b/libcxx/test/std/containers/container.adaptors/flat.map/flat.map.access/index_transparent.pass.cpp @@ -97,7 +97,7 @@ constexpr bool test() { TransparentComparator c(transparent_used); std::flat_map m(std::sorted_unique, {{1, 1}, {2, 2}, {3, 3}}, c); assert(!transparent_used); - m[ConvertibleTransparent{3}]; + (void)m[ConvertibleTransparent{3}]; assert(transparent_used); } { diff --git a/libcxx/test/std/diagnostics/syserr/syserr.errcat/syserr.errcat.objects/generic_category.pass.cpp b/libcxx/test/std/diagnostics/syserr/syserr.errcat/syserr.errcat.objects/generic_category.pass.cpp index 5425203304014..513af522a6582 100644 --- a/libcxx/test/std/diagnostics/syserr/syserr.errcat/syserr.errcat.objects/generic_category.pass.cpp +++ b/libcxx/test/std/diagnostics/syserr/syserr.errcat/syserr.errcat.objects/generic_category.pass.cpp @@ -48,7 +48,7 @@ int main(int, char**) // responds with an empty message, which we probably want to // treat as a failure code otherwise, but we can detect that // with the preprocessor. -#if defined(_NEWLIB_VERSION) +#if _LIBCPP_LIBC_NEWLIB const bool is_newlib = true; #else const bool is_newlib = false; diff --git a/libcxx/test/std/diagnostics/syserr/syserr.errcat/syserr.errcat.objects/system_category.pass.cpp b/libcxx/test/std/diagnostics/syserr/syserr.errcat/syserr.errcat.objects/system_category.pass.cpp index 255cbe75e2fa9..1803c0a19eb9a 100644 --- a/libcxx/test/std/diagnostics/syserr/syserr.errcat/syserr.errcat.objects/system_category.pass.cpp +++ b/libcxx/test/std/diagnostics/syserr/syserr.errcat/syserr.errcat.objects/system_category.pass.cpp @@ -59,7 +59,7 @@ int main(int, char**) { // responds with an empty message, which we probably want to // treat as a failure code otherwise, but we can detect that // with the preprocessor. -#if defined(_NEWLIB_VERSION) +#if _LIBCPP_LIBC_NEWLIB const bool is_newlib = true; #else const bool is_newlib = false; diff --git a/libcxx/test/std/thread/thread.jthread/nodiscard.verify.cpp b/libcxx/test/std/thread/thread.jthread/nodiscard.verify.cpp deleted file mode 100644 index 2ef5cf874da90..0000000000000 --- a/libcxx/test/std/thread/thread.jthread/nodiscard.verify.cpp +++ /dev/null @@ -1,29 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// UNSUPPORTED: no-threads -// UNSUPPORTED: c++03, c++11, c++14, c++17 - -// [[nodiscard]] bool joinable() const noexcept; -// [[nodiscard]] id get_id() const noexcept; -// [[nodiscard]] native_handle_type native_handle(); -// [[nodiscard]] stop_source get_stop_source() noexcept; -// [[nodiscard]] stop_token get_stop_token() const noexcept; -// [[nodiscard]] static unsigned int hardware_concurrency() noexcept; - -#include - -void test() { - std::jthread jt; - jt.joinable(); // expected-warning {{ignoring return value of function}} - jt.get_id(); // expected-warning {{ignoring return value of function}} - jt.native_handle(); // expected-warning {{ignoring return value of function}} - jt.get_stop_source(); // expected-warning {{ignoring return value of function}} - jt.get_stop_token(); // expected-warning {{ignoring return value of function}} - jt.hardware_concurrency(); // expected-warning {{ignoring return value of function}} -} diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/U.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/U.pass.cpp index a90fecfd075fe..1e951ebdf1d74 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/U.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/U.pass.cpp @@ -13,25 +13,28 @@ // template // constexpr EXPLICIT optional(U&& u); +#include #include #include -#include #include "test_macros.h" #include "archetypes.h" #include "test_convertible.h" - using std::optional; -struct ImplicitThrow -{ - constexpr ImplicitThrow(int x) { if (x != -1) TEST_THROW(6);} +struct ImplicitThrow { + constexpr ImplicitThrow(int x) { + if (x != -1) + TEST_THROW(6); + } }; -struct ExplicitThrow -{ - constexpr explicit ExplicitThrow(int x) { if (x != -1) TEST_THROW(6);} +struct ExplicitThrow { + constexpr explicit ExplicitThrow(int x) { + if (x != -1) + TEST_THROW(6); + } }; struct ImplicitAny { @@ -39,56 +42,52 @@ struct ImplicitAny { constexpr ImplicitAny(U&&) {} }; - template -constexpr bool implicit_conversion(optional&& opt, const From& v) -{ - using O = optional; - static_assert(test_convertible(), ""); - static_assert(!test_convertible(), ""); - static_assert(!test_convertible(), ""); - return opt && *opt == static_cast(v); +constexpr bool implicit_conversion(optional&& opt, const From& v) { + using O = optional; + static_assert(test_convertible(), ""); + static_assert(!test_convertible(), ""); + static_assert(!test_convertible(), ""); + return opt && *opt == static_cast(v); } template -constexpr bool explicit_conversion(Input&& in, const Expect& v) -{ - using O = optional; - static_assert(std::is_constructible::value, ""); - static_assert(!std::is_convertible::value, ""); - static_assert(!std::is_constructible::value, ""); - static_assert(!std::is_constructible::value, ""); - optional opt(std::forward(in)); - optional opt2{std::forward(in)}; - return opt && *opt == static_cast(v) && (opt2 && *opt2 == static_cast(v)); +constexpr bool explicit_conversion(Input&& in, const Expect& v) { + using O = optional; + static_assert(std::is_constructible::value, ""); + static_assert(!std::is_convertible::value, ""); + static_assert(!std::is_constructible::value, ""); + static_assert(!std::is_constructible::value, ""); + optional opt(std::forward(in)); + optional opt2{std::forward(in)}; + return opt && *opt == static_cast(v) && (opt2 && *opt2 == static_cast(v)); } -void test_implicit() -{ - { - static_assert(implicit_conversion(42, 42), ""); - } - { - static_assert(implicit_conversion(3.14, 3.14), ""); - } - { - int x = 42; - optional o(&x); - assert(*o == &x); - } - { - using T = TrivialTestTypes::TestType; - static_assert(implicit_conversion(42, 42), ""); - } - { - using T = TestTypes::TestType; - assert(implicit_conversion(3, T(3))); - } - { - using T = TestTypes::TestType; - optional opt({3}); - assert(opt && *opt == static_cast(3)); - } +void test_implicit() { + { + static_assert(implicit_conversion(42, 42), ""); + } + { + static_assert(implicit_conversion(3.14, 3.14), ""); + } + { + int x = 42; + optional o(&x); + assert(*o == &x); + } + { + using T = TrivialTestTypes::TestType; + static_assert(implicit_conversion(42, 42), ""); + } + { + using T = TestTypes::TestType; + assert(implicit_conversion(3, T(3))); + } + { + using T = TestTypes::TestType; + optional opt({3}); + assert(opt && *opt == static_cast(3)); + } { using O = optional; static_assert(!test_convertible(), ""); @@ -96,64 +95,63 @@ void test_implicit() static_assert(!test_convertible(), ""); static_assert(!test_convertible(), ""); static_assert(!test_convertible(), ""); - } #ifndef TEST_HAS_NO_EXCEPTIONS - { - try { - using T = ImplicitThrow; - optional t = 42; - assert(false); - ((void)t); - } catch (int) { - } + { + try { + using T = ImplicitThrow; + optional t = 42; + assert(false); + ((void)t); + } catch (int) { } + } #endif } void test_explicit() { + { + using T = ExplicitTrivialTestTypes::TestType; + static_assert(explicit_conversion(42, 42), ""); + } + { + using T = ExplicitConstexprTestTypes::TestType; + static_assert(explicit_conversion(42, 42), ""); + static_assert(!std::is_convertible::value, ""); + } + { + using T = ExplicitTestTypes::TestType; + T::reset(); { - using T = ExplicitTrivialTestTypes::TestType; - static_assert(explicit_conversion(42, 42), ""); - } - { - using T = ExplicitConstexprTestTypes::TestType; - static_assert(explicit_conversion(42, 42), ""); - static_assert(!std::is_convertible::value, ""); + assert(explicit_conversion(42, 42)); + assert(T::alive == 0); } + T::reset(); { - using T = ExplicitTestTypes::TestType; - T::reset(); - { - assert(explicit_conversion(42, 42)); - assert(T::alive == 0); - } - T::reset(); - { - optional t(42); - assert(T::alive == 1); - assert(T::value_constructed == 1); - assert(T::move_constructed == 0); - assert(T::copy_constructed == 0); - assert(t.value().value == 42); - } - assert(T::alive == 0); + optional t(42); + assert(T::alive == 1); + assert(T::value_constructed == 1); + assert(T::move_constructed == 0); + assert(T::copy_constructed == 0); + assert(t.value().value == 42); } + assert(T::alive == 0); + } #ifndef TEST_HAS_NO_EXCEPTIONS - { - try { - using T = ExplicitThrow; - optional t(42); - assert(false); - } catch (int) { - } + { + try { + using T = ExplicitThrow; + optional t(42); + assert(false); + } catch (int) { } + } #endif } int main(int, char**) { - test_implicit(); - test_explicit(); + test_implicit(); + test_explicit(); return 0; } diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/const_T.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/const_T.pass.cpp index 91a2323eebbf4..67d0fcfc18b86 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/const_T.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/const_T.pass.cpp @@ -12,117 +12,102 @@ // constexpr optional(const T& v); +#include #include #include -#include #include "test_macros.h" #include "archetypes.h" using std::optional; -int main(int, char**) -{ - { - typedef int T; - constexpr T t(5); - constexpr optional opt(t); - static_assert(static_cast(opt) == true, ""); - static_assert(*opt == 5, ""); - - struct test_constexpr_ctor - : public optional - { - constexpr test_constexpr_ctor(const T&) {} - }; - - } - { - typedef double T; - constexpr T t(3); - constexpr optional opt(t); - static_assert(static_cast(opt) == true, ""); - static_assert(*opt == 3, ""); - - struct test_constexpr_ctor - : public optional - { - constexpr test_constexpr_ctor(const T&) {} - }; +int main(int, char**) { + { + typedef int T; + constexpr T t(5); + constexpr optional opt(t); + static_assert(static_cast(opt) == true, ""); + static_assert(*opt == 5, ""); - } - { - const int x = 42; - optional o(x); - assert(*o == x); - } - { - typedef TestTypes::TestType T; - T::reset(); - const T t(3); - optional opt = t; - assert(T::alive == 2); - assert(T::copy_constructed == 1); - assert(static_cast(opt) == true); - assert(opt.value().value == 3); - } - { - typedef ExplicitTestTypes::TestType T; - static_assert(!std::is_convertible>::value, ""); - T::reset(); - const T t(3); - optional opt(t); - assert(T::alive == 2); - assert(T::copy_constructed == 1); - assert(static_cast(opt) == true); - assert(opt.value().value == 3); - } - { - typedef ConstexprTestTypes::TestType T; - constexpr T t(3); - constexpr optional opt = {t}; - static_assert(static_cast(opt) == true, ""); - static_assert(opt.value().value == 3, ""); + struct test_constexpr_ctor : public optional { + constexpr test_constexpr_ctor(const T&) {} + }; + } + { + typedef double T; + constexpr T t(3); + constexpr optional opt(t); + static_assert(static_cast(opt) == true, ""); + static_assert(*opt == 3, ""); - struct test_constexpr_ctor - : public optional - { - constexpr test_constexpr_ctor(const T&) {} - }; - } - { - typedef ExplicitConstexprTestTypes::TestType T; - static_assert(!std::is_convertible>::value, ""); - constexpr T t(3); - constexpr optional opt(t); - static_assert(static_cast(opt) == true, ""); - static_assert(opt.value().value == 3, ""); + struct test_constexpr_ctor : public optional { + constexpr test_constexpr_ctor(const T&) {} + }; + } + { + const int x = 42; + optional o(x); + assert(*o == x); + } + { + typedef TestTypes::TestType T; + T::reset(); + const T t(3); + optional opt = t; + assert(T::alive == 2); + assert(T::copy_constructed == 1); + assert(static_cast(opt) == true); + assert(opt.value().value == 3); + } + { + typedef ExplicitTestTypes::TestType T; + static_assert(!std::is_convertible>::value, ""); + T::reset(); + const T t(3); + optional opt(t); + assert(T::alive == 2); + assert(T::copy_constructed == 1); + assert(static_cast(opt) == true); + assert(opt.value().value == 3); + } + { + typedef ConstexprTestTypes::TestType T; + constexpr T t(3); + constexpr optional opt = {t}; + static_assert(static_cast(opt) == true, ""); + static_assert(opt.value().value == 3, ""); - struct test_constexpr_ctor - : public optional - { - constexpr test_constexpr_ctor(const T&) {} - }; + struct test_constexpr_ctor : public optional { + constexpr test_constexpr_ctor(const T&) {} + }; + } + { + typedef ExplicitConstexprTestTypes::TestType T; + static_assert(!std::is_convertible>::value, ""); + constexpr T t(3); + constexpr optional opt(t); + static_assert(static_cast(opt) == true, ""); + static_assert(opt.value().value == 3, ""); - } + struct test_constexpr_ctor : public optional { + constexpr test_constexpr_ctor(const T&) {} + }; + } #ifndef TEST_HAS_NO_EXCEPTIONS - { - struct Z { - Z(int) {} - Z(const Z&) {throw 6;} - }; - typedef Z T; - try - { - const T t(3); - optional opt(t); - assert(false); - } - catch (int i) - { - assert(i == 6); - } + { + struct Z { + Z(int) {} + Z(const Z&) { throw 6; } + }; + typedef Z T; + try { + const T t(3); + optional opt(t); + assert(false); + } catch (int i) { + assert(i == 6); } + } #endif return 0; diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/const_optional_U.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/const_optional_U.pass.cpp index 9505238e6e5e2..70fd76ec6ed0b 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/const_optional_U.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/const_optional_U.pass.cpp @@ -12,74 +12,69 @@ // template // optional(const optional& rhs); +#include #include #include -#include #include "test_macros.h" using std::optional; template -TEST_CONSTEXPR_CXX20 void -test(const optional& rhs, bool is_going_to_throw = false) -{ - bool rhs_engaged = static_cast(rhs); +TEST_CONSTEXPR_CXX20 void test(const optional& rhs, bool is_going_to_throw = false) { + bool rhs_engaged = static_cast(rhs); #ifndef TEST_HAS_NO_EXCEPTIONS - try - { - optional lhs = rhs; - assert(is_going_to_throw == false); - assert(static_cast(lhs) == rhs_engaged); - if (rhs_engaged) - assert(*lhs == *rhs); - } - catch (int i) - { - assert(i == 6); - } -#else - if (is_going_to_throw) return; + try { optional lhs = rhs; + assert(is_going_to_throw == false); assert(static_cast(lhs) == rhs_engaged); if (rhs_engaged) - assert(*lhs == *rhs); + assert(*lhs == *rhs); + } catch (int i) { + assert(i == 6); + } +#else + if (is_going_to_throw) + return; + optional lhs = rhs; + assert(static_cast(lhs) == rhs_engaged); + if (rhs_engaged) + assert(*lhs == *rhs); #endif } -class X -{ - int i_; +class X { + int i_; + public: - constexpr X(int i) : i_(i) {} - constexpr X(const X& x) : i_(x.i_) {} - TEST_CONSTEXPR_CXX20 ~X() {i_ = 0;} - friend constexpr bool operator==(const X& x, const X& y) {return x.i_ == y.i_;} + constexpr X(int i) : i_(i) {} + constexpr X(const X& x) : i_(x.i_) {} + TEST_CONSTEXPR_CXX20 ~X() { i_ = 0; } + friend constexpr bool operator==(const X& x, const X& y) { return x.i_ == y.i_; } }; -class Y -{ - int i_; +class Y { + int i_; + public: - constexpr Y(int i) : i_(i) {} + constexpr Y(int i) : i_(i) {} - friend constexpr bool operator==(const Y& x, const Y& y) {return x.i_ == y.i_;} + friend constexpr bool operator==(const Y& x, const Y& y) { return x.i_ == y.i_; } }; int count = 0; -class Z -{ - int i_; +class Z { + int i_; + public: - Z(int i) : i_(i) {TEST_THROW(6);} + Z(int i) : i_(i) { TEST_THROW(6); } - friend bool operator==(const Z& x, const Z& y) {return x.i_ == y.i_;} + friend bool operator==(const Z& x, const Z& y) { return x.i_ == y.i_; } }; -template -constexpr bool test_all() -{ +template +constexpr bool test_all() { { optional rhs; test(rhs); @@ -91,30 +86,29 @@ constexpr bool test_all() return true; } -int main(int, char**) -{ - test_all(); - test_all(); - test_all(); +int main(int, char**) { + test_all(); + test_all(); + test_all(); #if TEST_STD_VER > 17 - static_assert(test_all()); - static_assert(test_all()); - static_assert(test_all()); + static_assert(test_all()); + static_assert(test_all()); + static_assert(test_all()); #endif - { - typedef Z T; - typedef int U; - optional rhs; - test(rhs); - } - { - typedef Z T; - typedef int U; - optional rhs(U{3}); - test(rhs, true); - } - - static_assert(!(std::is_constructible, const optional&>::value), ""); + { + typedef Z T; + typedef int U; + optional rhs; + test(rhs); + } + { + typedef Z T; + typedef int U; + optional rhs(U{3}); + test(rhs, true); + } + + static_assert(!(std::is_constructible, const optional&>::value), ""); return 0; } diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/copy.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/copy.pass.cpp index 54a424c4c347d..f61a22c23a04d 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/copy.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/copy.pass.cpp @@ -11,173 +11,165 @@ // constexpr optional(const optional& rhs); +#include #include #include -#include #include "test_macros.h" #include "archetypes.h" using std::optional; -template -void test(InitArgs&&... args) -{ - const optional rhs(std::forward(args)...); - bool rhs_engaged = static_cast(rhs); - optional lhs = rhs; - assert(static_cast(lhs) == rhs_engaged); - if (rhs_engaged) - assert(*lhs == *rhs); +template +void test(InitArgs&&... args) { + const optional rhs(std::forward(args)...); + bool rhs_engaged = static_cast(rhs); + optional lhs = rhs; + assert(static_cast(lhs) == rhs_engaged); + if (rhs_engaged) + assert(*lhs == *rhs); } -template -constexpr bool constexpr_test(InitArgs&&... args) -{ - static_assert( std::is_trivially_copy_constructible_v, ""); // requirement - const optional rhs(std::forward(args)...); - optional lhs = rhs; - return (lhs.has_value() == rhs.has_value()) && - (lhs.has_value() ? *lhs == *rhs : true); +template +constexpr bool constexpr_test(InitArgs&&... args) { + static_assert(std::is_trivially_copy_constructible_v, ""); // requirement + const optional rhs(std::forward(args)...); + optional lhs = rhs; + return (lhs.has_value() == rhs.has_value()) && (lhs.has_value() ? *lhs == *rhs : true); } void test_throwing_ctor() { #ifndef TEST_HAS_NO_EXCEPTIONS - struct Z { - Z() : count(0) {} - Z(Z const& o) : count(o.count + 1) - { if (count == 2) throw 6; } - int count; - }; - const Z z; - const optional rhs(z); - try - { - optional lhs(rhs); - assert(false); - } - catch (int i) - { - assert(i == 6); + struct Z { + Z() : count(0) {} + Z(Z const& o) : count(o.count + 1) { + if (count == 2) + throw 6; } + int count; + }; + const Z z; + const optional rhs(z); + try { + optional lhs(rhs); + assert(false); + } catch (int i) { + assert(i == 6); + } #endif } -template -void test_ref(InitArgs&&... args) -{ - const optional rhs(std::forward(args)...); - bool rhs_engaged = static_cast(rhs); - optional lhs = rhs; - assert(static_cast(lhs) == rhs_engaged); - if (rhs_engaged) - assert(&(*lhs) == &(*rhs)); +template +void test_ref(InitArgs&&... args) { + const optional rhs(std::forward(args)...); + bool rhs_engaged = static_cast(rhs); + optional lhs = rhs; + assert(static_cast(lhs) == rhs_engaged); + if (rhs_engaged) + assert(&(*lhs) == &(*rhs)); } - -void test_reference_extension() -{ +void test_reference_extension() { #if defined(_LIBCPP_VERSION) && 0 // FIXME these extensions are currently disabled. - using T = TestTypes::TestType; - T::reset(); - { - T t; - T::reset_constructors(); - test_ref(); - test_ref(t); - assert(T::alive == 1); - assert(T::constructed == 0); - assert(T::assigned == 0); - assert(T::destroyed == 0); - } - assert(T::destroyed == 1); - assert(T::alive == 0); - { - T t; - const T& ct = t; - T::reset_constructors(); - test_ref(); - test_ref(t); - test_ref(ct); - assert(T::alive == 1); - assert(T::constructed == 0); - assert(T::assigned == 0); - assert(T::destroyed == 0); - } - assert(T::alive == 0); - assert(T::destroyed == 1); - { - static_assert(!std::is_copy_constructible>::value, ""); - static_assert(!std::is_copy_constructible>::value, ""); - } + using T = TestTypes::TestType; + T::reset(); + { + T t; + T::reset_constructors(); + test_ref(); + test_ref(t); + assert(T::alive == 1); + assert(T::constructed == 0); + assert(T::assigned == 0); + assert(T::destroyed == 0); + } + assert(T::destroyed == 1); + assert(T::alive == 0); + { + T t; + const T& ct = t; + T::reset_constructors(); + test_ref(); + test_ref(t); + test_ref(ct); + assert(T::alive == 1); + assert(T::constructed == 0); + assert(T::assigned == 0); + assert(T::destroyed == 0); + } + assert(T::alive == 0); + assert(T::destroyed == 1); + { + static_assert(!std::is_copy_constructible>::value, ""); + static_assert(!std::is_copy_constructible>::value, ""); + } #endif } -int main(int, char**) -{ - test(); - test(3); - static_assert(constexpr_test(), "" ); - static_assert(constexpr_test(3), "" ); +int main(int, char**) { + test(); + test(3); + static_assert(constexpr_test(), ""); + static_assert(constexpr_test(3), ""); - { - const optional o(42); - optional o2(o); - assert(*o2 == 42); - } - { - using T = TestTypes::TestType; - T::reset(); - const optional rhs; - assert(T::alive == 0); - const optional lhs(rhs); - assert(lhs.has_value() == false); - assert(T::alive == 0); - } - TestTypes::TestType::reset(); - { - using T = TestTypes::TestType; - T::reset(); - const optional rhs(42); - assert(T::alive == 1); - assert(T::value_constructed == 1); - assert(T::copy_constructed == 0); - const optional lhs(rhs); - assert(lhs.has_value()); - assert(T::copy_constructed == 1); - assert(T::alive == 2); - } - TestTypes::TestType::reset(); - { - using namespace ConstexprTestTypes; - test(); - test(42); - } - { - using namespace TrivialTestTypes; - test(); - test(42); - } - { - test_throwing_ctor(); - } - { - test_reference_extension(); - } - { - constexpr std::optional o1{4}; - constexpr std::optional o2 = o1; - static_assert( *o2 == 4, "" ); - } + { + const optional o(42); + optional o2(o); + assert(*o2 == 42); + } + { + using T = TestTypes::TestType; + T::reset(); + const optional rhs; + assert(T::alive == 0); + const optional lhs(rhs); + assert(lhs.has_value() == false); + assert(T::alive == 0); + } + TestTypes::TestType::reset(); + { + using T = TestTypes::TestType; + T::reset(); + const optional rhs(42); + assert(T::alive == 1); + assert(T::value_constructed == 1); + assert(T::copy_constructed == 0); + const optional lhs(rhs); + assert(lhs.has_value()); + assert(T::copy_constructed == 1); + assert(T::alive == 2); + } + TestTypes::TestType::reset(); + { + using namespace ConstexprTestTypes; + test(); + test(42); + } + { + using namespace TrivialTestTypes; + test(); + test(42); + } + { + test_throwing_ctor(); + } + { + test_reference_extension(); + } + { + constexpr std::optional o1{4}; + constexpr std::optional o2 = o1; + static_assert(*o2 == 4, ""); + } - // LWG3836 https://wg21.link/LWG3836 - // std::optional conversion constructor optional(const optional&) - // should take precedence over optional(U&&) with operator bool - { - std::optional o1(false); - std::optional o2(o1); - assert(!o2.value()); - } + // LWG3836 https://wg21.link/LWG3836 + // std::optional conversion constructor optional(const optional&) + // should take precedence over optional(U&&) with operator bool + { + std::optional o1(false); + std::optional o2(o1); + assert(!o2.value()); + } return 0; } diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/ctor.verify.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/ctor.verify.cpp index c5281783d4350..00ca941668eb2 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/ctor.verify.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/ctor.verify.cpp @@ -13,28 +13,25 @@ // and shall satisfy the Cpp17Destructible requirements. // Note: array types do not satisfy the Cpp17Destructible requirements. -#include -#include #include +#include #include "test_macros.h" -struct NonDestructible { ~NonDestructible() = delete; }; +struct NonDestructible { + ~NonDestructible() = delete; +}; -int main(int, char**) -{ +int main(int, char**) { + // clang-format off { #if TEST_STD_VER >= 26 - std::optional - opt2; // expected-error-re@optional:* {{static assertion failed{{.*}}instantiation of optional with an rvalue reference type is ill-formed}} + std::optional opt2; // expected-error-re@optional:* {{static assertion failed{{.*}}instantiation of optional with an rvalue reference type is ill-formed}} #else - std::optional - o1; // expected-error-re@optional:* {{static assertion failed{{.*}}instantiation of optional with a reference type is ill-formed}} + std::optional o1; // expected-error-re@optional:* {{static assertion failed{{.*}}instantiation of optional with a reference type is ill-formed}} #endif - std::optional - o2; // expected-error-re@optional:* {{static assertion failed{{.*}}instantiation of optional with a non-destructible type is ill-formed}} - std::optional - o3; // expected-error-re@optional:* {{static assertion failed{{.*}}instantiation of optional with an array type is ill-formed}} + std::optional o2; // expected-error-re@optional:* {{static assertion failed{{.*}}instantiation of optional with a non-destructible type is ill-formed}} + std::optional o3; // expected-error-re@optional:* {{static assertion failed{{.*}}instantiation of optional with an array type is ill-formed}} } { @@ -44,12 +41,12 @@ int main(int, char**) std::optional o4; // expected-error-re@optional:* {{static assertion failed{{.*}}instantiation of optional with in_place_t is ill-formed}} } - { + { std::optional< std::nullopt_t> o1; // expected-error-re@optional:* {{static assertion failed{{.*}}instantiation of optional with nullopt_t is ill-formed}} std::optional o2; // expected-error-re@optional:* {{static assertion failed{{.*}}instantiation of optional with nullopt_t is ill-formed}} std::optional< volatile std::nullopt_t> o3; // expected-error-re@optional:* {{static assertion failed{{.*}}instantiation of optional with nullopt_t is ill-formed}} std::optional o4; // expected-error-re@optional:* {{static assertion failed{{.*}}instantiation of optional with nullopt_t is ill-formed}} - } - - return 0; + } + // clang-format on + return 0; } diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/deduct.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/deduct.pass.cpp index 9bfde5abaa9ac..bc1d26aa8bd18 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/deduct.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/deduct.pass.cpp @@ -12,67 +12,66 @@ // template // optional(T) -> optional; -#include #include +#include #include "test_macros.h" struct A {}; -int main(int, char**) -{ -// Test the explicit deduction guides - { -// optional(T) +int main(int, char**) { + // Test the explicit deduction guides + { + // optional(T) std::optional opt(5); ASSERT_SAME_TYPE(decltype(opt), std::optional); assert(static_cast(opt)); assert(*opt == 5); - } + } - { -// optional(T) + { + // optional(T) std::optional opt(A{}); ASSERT_SAME_TYPE(decltype(opt), std::optional); assert(static_cast(opt)); - } + } - { -// optional(const T&); + { + // optional(const T&); const int& source = 5; std::optional opt(source); ASSERT_SAME_TYPE(decltype(opt), std::optional); assert(static_cast(opt)); assert(*opt == 5); - } + } - { -// optional(T*); + { + // optional(T*); const int* source = nullptr; std::optional opt(source); ASSERT_SAME_TYPE(decltype(opt), std::optional); assert(static_cast(opt)); assert(*opt == nullptr); - } + } - { -// optional(T[]); + { + // optional(T[]); int source[] = {1, 2, 3}; std::optional opt(source); ASSERT_SAME_TYPE(decltype(opt), std::optional); assert(static_cast(opt)); assert((*opt)[0] == 1); - } + } -// Test the implicit deduction guides - { -// optional(optional); + // Test the implicit deduction guides + { + // optional(optional); std::optional source('A'); std::optional opt(source); ASSERT_SAME_TYPE(decltype(opt), std::optional); assert(static_cast(opt) == static_cast(source)); assert(*opt == *source); - } + } return 0; } diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/deduct.verify.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/deduct.verify.cpp index 364f9b2e955f0..7ab842bf68c51 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/deduct.verify.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/deduct.verify.cpp @@ -13,25 +13,26 @@ // template // optional(T) -> optional; -#include #include +#include struct A {}; -int main(int, char**) -{ -// Test the explicit deduction guides +int main(int, char**) { + // Test the explicit deduction guides -// Test the implicit deduction guides - { -// optional() - std::optional opt; // expected-error-re {{no viable constructor or deduction guide for deduction of template arguments of '{{(std::)?}}optional'}} - } + // Test the implicit deduction guides - { -// optional(nullopt_t) - std::optional opt(std::nullopt); // expected-error-re@optional:* {{static assertion failed{{.*}}instantiation of optional with nullopt_t is ill-formed}} - } + // clang-format off + { + // optional() + std::optional opt; // expected-error-re {{no viable constructor or deduction guide for deduction of template arguments of '{{(std::)?}}optional'}} + } + { + // optional(nullopt_t) + std::optional opt(std::nullopt); // expected-error-re@optional:* {{static assertion failed{{.*}}instantiation of optional with nullopt_t is ill-formed}} + } + // clang-format on return 0; } diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/default.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/default.pass.cpp index 61a365edb64ea..71d4d052da3b7 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/default.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/default.pass.cpp @@ -11,9 +11,9 @@ // constexpr optional() noexcept; +#include #include #include -#include #include "test_macros.h" #include "archetypes.h" @@ -21,61 +21,52 @@ using std::optional; template -void -test_constexpr() -{ - static_assert(std::is_nothrow_default_constructible::value, ""); - static_assert(std::is_trivially_destructible::value, ""); - static_assert(std::is_trivially_destructible::value, ""); +void test_constexpr() { + static_assert(std::is_nothrow_default_constructible::value, ""); + static_assert(std::is_trivially_destructible::value, ""); + static_assert(std::is_trivially_destructible::value, ""); - constexpr Opt opt; - static_assert(static_cast(opt) == false, ""); + constexpr Opt opt; + static_assert(static_cast(opt) == false, ""); - struct test_constexpr_ctor - : public Opt - { - constexpr test_constexpr_ctor() {} - }; + struct test_constexpr_ctor : public Opt { + constexpr test_constexpr_ctor() {} + }; } template -void -test() -{ - static_assert(std::is_nothrow_default_constructible::value, ""); - static_assert(!std::is_trivially_destructible::value, ""); - static_assert(!std::is_trivially_destructible::value, ""); - { - Opt opt; - assert(static_cast(opt) == false); - } - { - const Opt opt; - assert(static_cast(opt) == false); - } +void test() { + static_assert(std::is_nothrow_default_constructible::value, ""); + static_assert(!std::is_trivially_destructible::value, ""); + static_assert(!std::is_trivially_destructible::value, ""); + { + Opt opt; + assert(static_cast(opt) == false); + } + { + const Opt opt; + assert(static_cast(opt) == false); + } - struct test_constexpr_ctor - : public Opt - { - constexpr test_constexpr_ctor() {} - }; + struct test_constexpr_ctor : public Opt { + constexpr test_constexpr_ctor() {} + }; } -int main(int, char**) -{ - test_constexpr>(); - test_constexpr>(); - test_constexpr>(); - test_constexpr>(); - test_constexpr>(); - test>(); - // EXTENSIONS +int main(int, char**) { + test_constexpr>(); + test_constexpr>(); + test_constexpr>(); + test_constexpr>(); + test_constexpr>(); + test>(); + // EXTENSIONS #if defined(_LIBCPP_VERSION) && 0 // FIXME these extensions are currently disabled. - test_constexpr>(); - test_constexpr>(); - test_constexpr>(); - test_constexpr>(); - test_constexpr>(); + test_constexpr>(); + test_constexpr>(); + test_constexpr>(); + test_constexpr>(); + test_constexpr>(); #endif return 0; diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/empty_in_place_t_does_not_clobber.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/empty_in_place_t_does_not_clobber.pass.cpp index 594aac770bc82..f19174841813c 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/empty_in_place_t_does_not_clobber.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/empty_in_place_t_does_not_clobber.pass.cpp @@ -15,9 +15,9 @@ // in_place_t constructor with no arguments when the Clang is trying to check // copy constructor. +#include #include #include -#include #include "test_macros.h" #include "archetypes.h" diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/explicit_const_optional_U.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/explicit_const_optional_U.pass.cpp index d8594bc03b132..1b9882fb25633 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/explicit_const_optional_U.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/explicit_const_optional_U.pass.cpp @@ -12,75 +12,70 @@ // template // explicit optional(const optional& rhs); +#include #include #include -#include #include "test_macros.h" using std::optional; template -TEST_CONSTEXPR_CXX20 void -test(const optional& rhs, bool is_going_to_throw = false) -{ - static_assert(!(std::is_convertible&, optional>::value), ""); - bool rhs_engaged = static_cast(rhs); +TEST_CONSTEXPR_CXX20 void test(const optional& rhs, bool is_going_to_throw = false) { + static_assert(!(std::is_convertible&, optional>::value), ""); + bool rhs_engaged = static_cast(rhs); #ifndef TEST_HAS_NO_EXCEPTIONS - try - { - optional lhs(rhs); - assert(is_going_to_throw == false); - assert(static_cast(lhs) == rhs_engaged); - if (rhs_engaged) - assert(*lhs == T(*rhs)); - } - catch (int i) - { - assert(i == 6); - } -#else - if (is_going_to_throw) return; + try { optional lhs(rhs); + assert(is_going_to_throw == false); assert(static_cast(lhs) == rhs_engaged); if (rhs_engaged) - assert(*lhs == T(*rhs)); + assert(*lhs == T(*rhs)); + } catch (int i) { + assert(i == 6); + } +#else + if (is_going_to_throw) + return; + optional lhs(rhs); + assert(static_cast(lhs) == rhs_engaged); + if (rhs_engaged) + assert(*lhs == T(*rhs)); #endif } -class X -{ - int i_; +class X { + int i_; + public: - constexpr explicit X(int i) : i_(i) {} - constexpr X(const X& x) : i_(x.i_) {} - TEST_CONSTEXPR_CXX20 ~X() {i_ = 0;} - friend constexpr bool operator==(const X& x, const X& y) {return x.i_ == y.i_;} + constexpr explicit X(int i) : i_(i) {} + constexpr X(const X& x) : i_(x.i_) {} + TEST_CONSTEXPR_CXX20 ~X() { i_ = 0; } + friend constexpr bool operator==(const X& x, const X& y) { return x.i_ == y.i_; } }; -class Y -{ - int i_; +class Y { + int i_; + public: - constexpr explicit Y(int i) : i_(i) {} + constexpr explicit Y(int i) : i_(i) {} - friend constexpr bool operator==(const Y& x, const Y& y) {return x.i_ == y.i_;} + friend constexpr bool operator==(const Y& x, const Y& y) { return x.i_ == y.i_; } }; int count = 0; -class Z -{ - int i_; +class Z { + int i_; + public: - explicit Z(int i) : i_(i) {TEST_THROW(6);} + explicit Z(int i) : i_(i) { TEST_THROW(6); } - friend bool operator==(const Z& x, const Z& y) {return x.i_ == y.i_;} + friend bool operator==(const Z& x, const Z& y) { return x.i_ == y.i_; } }; -template -constexpr bool test_all() -{ +template +constexpr bool test_all() { { optional rhs; test(rhs); @@ -92,27 +87,25 @@ constexpr bool test_all() return true; } - -int main(int, char**) -{ - test_all(); - test_all(); +int main(int, char**) { + test_all(); + test_all(); #if TEST_STD_VER > 17 - static_assert(test_all()); - static_assert(test_all()); + static_assert(test_all()); + static_assert(test_all()); #endif - { - typedef Z T; - typedef int U; - optional rhs; - test(rhs); - } - { - typedef Z T; - typedef int U; - optional rhs(3); - test(rhs, true); - } + { + typedef Z T; + typedef int U; + optional rhs; + test(rhs); + } + { + typedef Z T; + typedef int U; + optional rhs(3); + test(rhs, true); + } return 0; } diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/explicit_optional_U.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/explicit_optional_U.pass.cpp index 708370a47b616..bddbd4ba93d5a 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/explicit_optional_U.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/explicit_optional_U.pass.cpp @@ -12,83 +12,77 @@ // template // explicit optional(optional&& rhs); +#include #include #include -#include #include "test_macros.h" using std::optional; template -TEST_CONSTEXPR_CXX20 void test(optional&& rhs, bool is_going_to_throw = false) -{ - static_assert(!(std::is_convertible&&, optional>::value), ""); - bool rhs_engaged = static_cast(rhs); +TEST_CONSTEXPR_CXX20 void test(optional&& rhs, bool is_going_to_throw = false) { + static_assert(!(std::is_convertible&&, optional>::value), ""); + bool rhs_engaged = static_cast(rhs); #ifndef TEST_HAS_NO_EXCEPTIONS - try - { - optional lhs(std::move(rhs)); - assert(is_going_to_throw == false); - assert(static_cast(lhs) == rhs_engaged); - } - catch (int i) - { - assert(i == 6); - } -#else - if (is_going_to_throw) return; + try { optional lhs(std::move(rhs)); + assert(is_going_to_throw == false); assert(static_cast(lhs) == rhs_engaged); + } catch (int i) { + assert(i == 6); + } +#else + if (is_going_to_throw) + return; + optional lhs(std::move(rhs)); + assert(static_cast(lhs) == rhs_engaged); #endif } -class X -{ - int i_; +class X { + int i_; + public: - constexpr explicit X(int i) : i_(i) {} - constexpr X(X&& x) : i_(x.i_) { x.i_ = 0; } - TEST_CONSTEXPR_CXX20 ~X() {i_ = 0;} - friend constexpr bool operator==(const X& x, const X& y) {return x.i_ == y.i_;} + constexpr explicit X(int i) : i_(i) {} + constexpr X(X&& x) : i_(x.i_) { x.i_ = 0; } + TEST_CONSTEXPR_CXX20 ~X() { i_ = 0; } + friend constexpr bool operator==(const X& x, const X& y) { return x.i_ == y.i_; } }; int count = 0; -class Z -{ +class Z { public: - explicit Z(int) { TEST_THROW(6); } + explicit Z(int) { TEST_THROW(6); } }; -TEST_CONSTEXPR_CXX20 bool test() -{ - { - optional rhs; - test(std::move(rhs)); - } - { - optional rhs(3); - test(std::move(rhs)); - } +TEST_CONSTEXPR_CXX20 bool test() { + { + optional rhs; + test(std::move(rhs)); + } + { + optional rhs(3); + test(std::move(rhs)); + } - return true; + return true; } -int main(int, char**) -{ +int main(int, char**) { #if TEST_STD_VER > 17 - static_assert(test()); + static_assert(test()); #endif - test(); - { - optional rhs; - test(std::move(rhs)); - } - { - optional rhs(3); - test(std::move(rhs), true); - } + test(); + { + optional rhs; + test(std::move(rhs)); + } + { + optional rhs(3); + test(std::move(rhs), true); + } return 0; } diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/in_place_t.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/in_place_t.pass.cpp index 65276c5a01976..902754418fbde 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/in_place_t.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/in_place_t.pass.cpp @@ -13,136 +13,112 @@ // template // constexpr explicit optional(in_place_t, Args&&... args); +#include #include #include -#include #include "test_macros.h" -using std::optional; -using std::in_place_t; using std::in_place; +using std::in_place_t; +using std::optional; + +class X { + int i_; + int j_ = 0; -class X -{ - int i_; - int j_ = 0; public: - X() : i_(0) {} - X(int i) : i_(i) {} - X(int i, int j) : i_(i), j_(j) {} + X() : i_(0) {} + X(int i) : i_(i) {} + X(int i, int j) : i_(i), j_(j) {} - ~X() {} + ~X() {} - friend bool operator==(const X& x, const X& y) - {return x.i_ == y.i_ && x.j_ == y.j_;} + friend bool operator==(const X& x, const X& y) { return x.i_ == y.i_ && x.j_ == y.j_; } }; -class Y -{ - int i_; - int j_ = 0; +class Y { + int i_; + int j_ = 0; + public: - constexpr Y() : i_(0) {} - constexpr Y(int i) : i_(i) {} - constexpr Y(int i, int j) : i_(i), j_(j) {} + constexpr Y() : i_(0) {} + constexpr Y(int i) : i_(i) {} + constexpr Y(int i, int j) : i_(i), j_(j) {} - friend constexpr bool operator==(const Y& x, const Y& y) - {return x.i_ == y.i_ && x.j_ == y.j_;} + friend constexpr bool operator==(const Y& x, const Y& y) { return x.i_ == y.i_ && x.j_ == y.j_; } }; -class Z -{ +class Z { public: - Z(int) {TEST_THROW(6);} + Z(int) { TEST_THROW(6); } }; - -int main(int, char**) -{ - { - constexpr optional opt(in_place, 5); - static_assert(static_cast(opt) == true, ""); - static_assert(*opt == 5, ""); - - struct test_constexpr_ctor - : public optional - { - constexpr test_constexpr_ctor(in_place_t, int i) - : optional(in_place, i) {} - }; - - } - { - optional opt(in_place, 5); - assert(*opt == 5); - } - { - const optional opt(in_place); - assert(static_cast(opt) == true); - assert(*opt == X()); - } - { - const optional opt(in_place, 5); - assert(static_cast(opt) == true); - assert(*opt == X(5)); - } - { - const optional opt(in_place, 5, 4); - assert(static_cast(opt) == true); - assert(*opt == X(5, 4)); - } - { - constexpr optional opt(in_place); - static_assert(static_cast(opt) == true, ""); - static_assert(*opt == Y(), ""); - - struct test_constexpr_ctor - : public optional - { - constexpr test_constexpr_ctor(in_place_t) - : optional(in_place) {} - }; - - } - { - constexpr optional opt(in_place, 5); - static_assert(static_cast(opt) == true, ""); - static_assert(*opt == Y(5), ""); - - struct test_constexpr_ctor - : public optional - { - constexpr test_constexpr_ctor(in_place_t, int i) - : optional(in_place, i) {} - }; - - } - { - constexpr optional opt(in_place, 5, 4); - static_assert(static_cast(opt) == true, ""); - static_assert(*opt == Y(5, 4), ""); - - struct test_constexpr_ctor - : public optional - { - constexpr test_constexpr_ctor(in_place_t, int i, int j) - : optional(in_place, i, j) {} - }; - - } +int main(int, char**) { + { + constexpr optional opt(in_place, 5); + static_assert(static_cast(opt) == true, ""); + static_assert(*opt == 5, ""); + + struct test_constexpr_ctor : public optional { + constexpr test_constexpr_ctor(in_place_t, int i) : optional(in_place, i) {} + }; + } + { + optional opt(in_place, 5); + assert(*opt == 5); + } + { + const optional opt(in_place); + assert(static_cast(opt) == true); + assert(*opt == X()); + } + { + const optional opt(in_place, 5); + assert(static_cast(opt) == true); + assert(*opt == X(5)); + } + { + const optional opt(in_place, 5, 4); + assert(static_cast(opt) == true); + assert(*opt == X(5, 4)); + } + { + constexpr optional opt(in_place); + static_assert(static_cast(opt) == true, ""); + static_assert(*opt == Y(), ""); + + struct test_constexpr_ctor : public optional { + constexpr test_constexpr_ctor(in_place_t) : optional(in_place) {} + }; + } + { + constexpr optional opt(in_place, 5); + static_assert(static_cast(opt) == true, ""); + static_assert(*opt == Y(5), ""); + + struct test_constexpr_ctor : public optional { + constexpr test_constexpr_ctor(in_place_t, int i) : optional(in_place, i) {} + }; + } + { + constexpr optional opt(in_place, 5, 4); + static_assert(static_cast(opt) == true, ""); + static_assert(*opt == Y(5, 4), ""); + + struct test_constexpr_ctor : public optional { + constexpr test_constexpr_ctor(in_place_t, int i, int j) : optional(in_place, i, j) {} + }; + } #ifndef TEST_HAS_NO_EXCEPTIONS - { - try - { - const optional opt(in_place, 1); - assert(false); - } - catch (int i) - { - assert(i == 6); - } + { + try { + const optional opt(in_place, 1); + assert(false); + } catch (int i) { + assert(i == 6); } + } #endif return 0; diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/initializer_list.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/initializer_list.pass.cpp index 6c42df9e1e097..1993476792878 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/initializer_list.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/initializer_list.pass.cpp @@ -13,105 +13,93 @@ // constexpr // explicit optional(in_place_t, initializer_list il, Args&&... args); +#include +#include #include #include -#include #include -#include #include "test_macros.h" -using std::optional; -using std::in_place_t; using std::in_place; +using std::in_place_t; +using std::optional; + +class X { + int i_; + int j_ = 0; -class X -{ - int i_; - int j_ = 0; public: - X() : i_(0) {} - X(int i) : i_(i) {} - X(int i, int j) : i_(i), j_(j) {} + X() : i_(0) {} + X(int i) : i_(i) {} + X(int i, int j) : i_(i), j_(j) {} - ~X() {} + ~X() {} - friend bool operator==(const X& x, const X& y) - {return x.i_ == y.i_ && x.j_ == y.j_;} + friend bool operator==(const X& x, const X& y) { return x.i_ == y.i_ && x.j_ == y.j_; } }; -class Y -{ - int i_; - int j_ = 0; +class Y { + int i_; + int j_ = 0; + public: - constexpr Y() : i_(0) {} - constexpr Y(int i) : i_(i) {} - constexpr Y(std::initializer_list il) : i_(il.begin()[0]), j_(il.begin()[1]) {} + constexpr Y() : i_(0) {} + constexpr Y(int i) : i_(i) {} + constexpr Y(std::initializer_list il) : i_(il.begin()[0]), j_(il.begin()[1]) {} - friend constexpr bool operator==(const Y& x, const Y& y) - {return x.i_ == y.i_ && x.j_ == y.j_;} + friend constexpr bool operator==(const Y& x, const Y& y) { return x.i_ == y.i_ && x.j_ == y.j_; } }; -class Z -{ - int i_; - int j_ = 0; +class Z { + int i_; + int j_ = 0; + public: - Z() : i_(0) {} - Z(int i) : i_(i) {} - Z(std::initializer_list il) : i_(il.begin()[0]), j_(il.begin()[1]) - {TEST_THROW(6);} + Z() : i_(0) {} + Z(int i) : i_(i) {} + Z(std::initializer_list il) : i_(il.begin()[0]), j_(il.begin()[1]) { TEST_THROW(6); } - friend bool operator==(const Z& x, const Z& y) - {return x.i_ == y.i_ && x.j_ == y.j_;} + friend bool operator==(const Z& x, const Z& y) { return x.i_ == y.i_ && x.j_ == y.j_; } }; -int main(int, char**) -{ - { - static_assert(!std::is_constructible&>::value, ""); - static_assert(!std::is_constructible, std::initializer_list&>::value, ""); - } - { - optional> opt(in_place, {3, 1}); - assert(static_cast(opt) == true); - assert((*opt == std::vector{3, 1})); - assert(opt->size() == 2); - } - { - optional> opt(in_place, {3, 1}, std::allocator()); - assert(static_cast(opt) == true); - assert((*opt == std::vector{3, 1})); - assert(opt->size() == 2); - } - { - static_assert(std::is_constructible, std::initializer_list&>::value, ""); - constexpr optional opt(in_place, {3, 1}); - static_assert(static_cast(opt) == true, ""); - static_assert(*opt == Y{3, 1}, ""); - - struct test_constexpr_ctor - : public optional - { - constexpr test_constexpr_ctor(in_place_t, std::initializer_list i) - : optional(in_place, i) {} - }; - - } +int main(int, char**) { + { + static_assert(!std::is_constructible&>::value, ""); + static_assert(!std::is_constructible, std::initializer_list&>::value, ""); + } + { + optional> opt(in_place, {3, 1}); + assert(static_cast(opt) == true); + assert((*opt == std::vector{3, 1})); + assert(opt->size() == 2); + } + { + optional> opt(in_place, {3, 1}, std::allocator()); + assert(static_cast(opt) == true); + assert((*opt == std::vector{3, 1})); + assert(opt->size() == 2); + } + { + static_assert(std::is_constructible, std::initializer_list&>::value, ""); + constexpr optional opt(in_place, {3, 1}); + static_assert(static_cast(opt) == true, ""); + static_assert(*opt == Y{3, 1}, ""); + + struct test_constexpr_ctor : public optional { + constexpr test_constexpr_ctor(in_place_t, std::initializer_list i) : optional(in_place, i) {} + }; + } #ifndef TEST_HAS_NO_EXCEPTIONS - { - static_assert(std::is_constructible, std::initializer_list&>::value, ""); - try - { - optional opt(in_place, {3, 1}); - assert(false); - } - catch (int i) - { - assert(i == 6); - } + { + static_assert(std::is_constructible, std::initializer_list&>::value, ""); + try { + optional opt(in_place, {3, 1}); + assert(false); + } catch (int i) { + assert(i == 6); } + } #endif return 0; diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/move.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/move.pass.cpp index f59fc3b82ad7f..583debcaac650 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/move.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/move.pass.cpp @@ -12,70 +12,64 @@ // constexpr optional(optional&& rhs); +#include #include #include -#include #include "test_macros.h" #include "archetypes.h" using std::optional; -template -void test(InitArgs&&... args) -{ - const optional orig(std::forward(args)...); - optional rhs(orig); - bool rhs_engaged = static_cast(rhs); - optional lhs = std::move(rhs); - assert(static_cast(lhs) == rhs_engaged); - if (rhs_engaged) - assert(*lhs == *orig); +template +void test(InitArgs&&... args) { + const optional orig(std::forward(args)...); + optional rhs(orig); + bool rhs_engaged = static_cast(rhs); + optional lhs = std::move(rhs); + assert(static_cast(lhs) == rhs_engaged); + if (rhs_engaged) + assert(*lhs == *orig); } -template -constexpr bool constexpr_test(InitArgs&&... args) -{ - static_assert( std::is_trivially_copy_constructible_v, ""); // requirement - const optional orig(std::forward(args)...); - optional rhs(orig); - optional lhs = std::move(rhs); - return (lhs.has_value() == orig.has_value()) && - (lhs.has_value() ? *lhs == *orig : true); +template +constexpr bool constexpr_test(InitArgs&&... args) { + static_assert(std::is_trivially_copy_constructible_v, ""); // requirement + const optional orig(std::forward(args)...); + optional rhs(orig); + optional lhs = std::move(rhs); + return (lhs.has_value() == orig.has_value()) && (lhs.has_value() ? *lhs == *orig : true); } void test_throwing_ctor() { #ifndef TEST_HAS_NO_EXCEPTIONS - struct Z { - Z() : count(0) {} - Z(Z&& o) : count(o.count + 1) - { if (count == 2) throw 6; } - int count; - }; - Z z; - optional rhs(std::move(z)); - try - { - optional lhs(std::move(rhs)); - assert(false); - } - catch (int i) - { - assert(i == 6); + struct Z { + Z() : count(0) {} + Z(Z&& o) : count(o.count + 1) { + if (count == 2) + throw 6; } + int count; + }; + Z z; + optional rhs(std::move(z)); + try { + optional lhs(std::move(rhs)); + assert(false); + } catch (int i) { + assert(i == 6); + } #endif } - -template -void test_ref(InitArgs&&... args) -{ - optional rhs(std::forward(args)...); - bool rhs_engaged = static_cast(rhs); - optional lhs = std::move(rhs); - assert(static_cast(lhs) == rhs_engaged); - if (rhs_engaged) - assert(&(*lhs) == &(*rhs)); +template +void test_ref(InitArgs&&... args) { + optional rhs(std::forward(args)...); + bool rhs_engaged = static_cast(rhs); + optional lhs = std::move(rhs); + assert(static_cast(lhs) == rhs_engaged); + if (rhs_engaged) + assert(&(*lhs) == &(*rhs)); } void test_reference_extension() { @@ -143,80 +137,79 @@ void test_reference_extension() { #endif } -int main(int, char**) -{ - test(); - test(3); - static_assert(constexpr_test(), "" ); - static_assert(constexpr_test(3), "" ); +int main(int, char**) { + test(); + test(3); + static_assert(constexpr_test(), ""); + static_assert(constexpr_test(3), ""); - { - optional o(42); - optional o2(std::move(o)); - assert(*o2 == 42); - } - { - using T = TestTypes::TestType; - T::reset(); - optional rhs; - assert(T::alive == 0); - const optional lhs(std::move(rhs)); - assert(lhs.has_value() == false); - assert(rhs.has_value() == false); - assert(T::alive == 0); - } - TestTypes::TestType::reset(); - { - using T = TestTypes::TestType; - T::reset(); - optional rhs(42); - assert(T::alive == 1); - assert(T::value_constructed == 1); - assert(T::move_constructed == 0); - const optional lhs(std::move(rhs)); - assert(lhs.has_value()); - assert(rhs.has_value()); - assert(lhs.value().value == 42); - assert(rhs.value().value == -1); - assert(T::move_constructed == 1); - assert(T::alive == 2); - } - TestTypes::TestType::reset(); - { - using namespace ConstexprTestTypes; - test(); - test(42); - } - { - using namespace TrivialTestTypes; - test(); - test(42); - } - { - test_throwing_ctor(); - } - { - struct ThrowsMove { - ThrowsMove() noexcept(false) {} - ThrowsMove(ThrowsMove const&) noexcept(false) {} - ThrowsMove(ThrowsMove &&) noexcept(false) {} - }; - static_assert(!std::is_nothrow_move_constructible>::value, ""); - struct NoThrowMove { - NoThrowMove() noexcept(false) {} - NoThrowMove(NoThrowMove const&) noexcept(false) {} - NoThrowMove(NoThrowMove &&) noexcept(true) {} - }; - static_assert(std::is_nothrow_move_constructible>::value, ""); - } - { - test_reference_extension(); - } - { + { + optional o(42); + optional o2(std::move(o)); + assert(*o2 == 42); + } + { + using T = TestTypes::TestType; + T::reset(); + optional rhs; + assert(T::alive == 0); + const optional lhs(std::move(rhs)); + assert(lhs.has_value() == false); + assert(rhs.has_value() == false); + assert(T::alive == 0); + } + TestTypes::TestType::reset(); + { + using T = TestTypes::TestType; + T::reset(); + optional rhs(42); + assert(T::alive == 1); + assert(T::value_constructed == 1); + assert(T::move_constructed == 0); + const optional lhs(std::move(rhs)); + assert(lhs.has_value()); + assert(rhs.has_value()); + assert(lhs.value().value == 42); + assert(rhs.value().value == -1); + assert(T::move_constructed == 1); + assert(T::alive == 2); + } + TestTypes::TestType::reset(); + { + using namespace ConstexprTestTypes; + test(); + test(42); + } + { + using namespace TrivialTestTypes; + test(); + test(42); + } + { + test_throwing_ctor(); + } + { + struct ThrowsMove { + ThrowsMove() noexcept(false) {} + ThrowsMove(ThrowsMove const&) noexcept(false) {} + ThrowsMove(ThrowsMove&&) noexcept(false) {} + }; + static_assert(!std::is_nothrow_move_constructible>::value, ""); + struct NoThrowMove { + NoThrowMove() noexcept(false) {} + NoThrowMove(NoThrowMove const&) noexcept(false) {} + NoThrowMove(NoThrowMove&&) noexcept(true) {} + }; + static_assert(std::is_nothrow_move_constructible>::value, ""); + } + { + test_reference_extension(); + } + { constexpr std::optional o1{4}; constexpr std::optional o2 = std::move(o1); - static_assert( *o2 == 4, "" ); - } + static_assert(*o2 == 4, ""); + } return 0; } diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/nullopt_t.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/nullopt_t.pass.cpp index 36a60f29da854..c1bdd81e5ed47 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/nullopt_t.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/nullopt_t.pass.cpp @@ -11,66 +11,57 @@ // constexpr optional(nullopt_t) noexcept; +#include #include #include -#include #include "archetypes.h" #include "test_macros.h" -using std::optional; -using std::nullopt_t; using std::nullopt; +using std::nullopt_t; +using std::optional; template -void -test_constexpr() -{ - static_assert(std::is_nothrow_constructible::value, ""); - static_assert(std::is_trivially_destructible::value, ""); - static_assert(std::is_trivially_destructible::value, ""); +void test_constexpr() { + static_assert(std::is_nothrow_constructible::value, ""); + static_assert(std::is_trivially_destructible::value, ""); + static_assert(std::is_trivially_destructible::value, ""); - constexpr Opt opt(nullopt); - static_assert(static_cast(opt) == false, ""); + constexpr Opt opt(nullopt); + static_assert(static_cast(opt) == false, ""); - struct test_constexpr_ctor - : public Opt - { - constexpr test_constexpr_ctor() {} - }; + struct test_constexpr_ctor : public Opt { + constexpr test_constexpr_ctor() {} + }; } template -void -test() -{ - static_assert(std::is_nothrow_constructible::value, ""); - static_assert(!std::is_trivially_destructible::value, ""); - static_assert(!std::is_trivially_destructible::value, ""); - { +void test() { + static_assert(std::is_nothrow_constructible::value, ""); + static_assert(!std::is_trivially_destructible::value, ""); + static_assert(!std::is_trivially_destructible::value, ""); + { Opt opt(nullopt); assert(static_cast(opt) == false); - } - { + } + { const Opt opt(nullopt); assert(static_cast(opt) == false); - } - struct test_constexpr_ctor - : public Opt - { - constexpr test_constexpr_ctor() {} - }; + } + struct test_constexpr_ctor : public Opt { + constexpr test_constexpr_ctor() {} + }; } -int main(int, char**) -{ - test_constexpr>(); - test_constexpr>(); - test_constexpr>(); - test_constexpr>(); - test_constexpr>(); - test>(); +int main(int, char**) { + test_constexpr>(); + test_constexpr>(); + test_constexpr>(); + test_constexpr>(); + test_constexpr>(); + test>(); return 0; } diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/optional_U.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/optional_U.pass.cpp index 14c400cdd1526..709b106c800a6 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/optional_U.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/optional_U.pass.cpp @@ -23,75 +23,68 @@ using std::optional; template -TEST_CONSTEXPR_CXX20 void -test(optional&& rhs, bool is_going_to_throw = false) -{ - bool rhs_engaged = static_cast(rhs); +TEST_CONSTEXPR_CXX20 void test(optional&& rhs, bool is_going_to_throw = false) { + bool rhs_engaged = static_cast(rhs); #ifndef TEST_HAS_NO_EXCEPTIONS - try - { - optional lhs = std::move(rhs); - assert(is_going_to_throw == false); - assert(static_cast(lhs) == rhs_engaged); - } - catch (int i) - { - assert(i == 6); - } -#else - if (is_going_to_throw) return; + try { optional lhs = std::move(rhs); + assert(is_going_to_throw == false); assert(static_cast(lhs) == rhs_engaged); + } catch (int i) { + assert(i == 6); + } +#else + if (is_going_to_throw) + return; + optional lhs = std::move(rhs); + assert(static_cast(lhs) == rhs_engaged); #endif } -class X -{ - int i_; +class X { + int i_; + public: - TEST_CONSTEXPR_CXX20 X(int i) : i_(i) {} - TEST_CONSTEXPR_CXX20 X(X&& x) : i_(std::exchange(x.i_, 0)) {} - TEST_CONSTEXPR_CXX20 ~X() {i_ = 0;} - friend constexpr bool operator==(const X& x, const X& y) {return x.i_ == y.i_;} + TEST_CONSTEXPR_CXX20 X(int i) : i_(i) {} + TEST_CONSTEXPR_CXX20 X(X&& x) : i_(std::exchange(x.i_, 0)) {} + TEST_CONSTEXPR_CXX20 ~X() { i_ = 0; } + friend constexpr bool operator==(const X& x, const X& y) { return x.i_ == y.i_; } }; -struct Z -{ - Z(int) { TEST_THROW(6); } +struct Z { + Z(int) { TEST_THROW(6); } }; -template -TEST_CONSTEXPR_CXX20 bool test_all() -{ - { - optional rhs; - test(std::move(rhs)); - } - { - optional rhs(short{3}); - test(std::move(rhs)); - } - return true; +template +TEST_CONSTEXPR_CXX20 bool test_all() { + { + optional rhs; + test(std::move(rhs)); + } + { + optional rhs(short{3}); + test(std::move(rhs)); + } + return true; } -int main(int, char**) -{ - test_all(); - test_all(); +int main(int, char**) { + test_all(); + test_all(); #if TEST_STD_VER > 17 - static_assert(test_all()); - static_assert(test_all()); + static_assert(test_all()); + static_assert(test_all()); #endif - { - optional rhs; - test(std::move(rhs)); - } - { - optional rhs(3); - test(std::move(rhs), true); - } + { + optional rhs; + test(std::move(rhs)); + } + { + optional rhs(3); + test(std::move(rhs), true); + } - static_assert(!(std::is_constructible, optional>::value), ""); + static_assert(!(std::is_constructible, optional>::value), ""); return 0; } diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/rvalue_T.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/rvalue_T.pass.cpp index 12425955f5a86..e73eef4592256 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/rvalue_T.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/rvalue_T.pass.cpp @@ -12,137 +12,118 @@ // constexpr optional(T&& v); +#include #include #include -#include #include "test_macros.h" #include "archetypes.h" - using std::optional; - -class Z -{ +class Z { public: - Z(int) {} - Z(Z&&) {TEST_THROW(6);} + Z(int) {} + Z(Z&&) { TEST_THROW(6); } }; - -int main(int, char**) -{ - { - typedef int T; - constexpr optional opt(T(5)); - static_assert(static_cast(opt) == true, ""); - static_assert(*opt == 5, ""); - - struct test_constexpr_ctor - : public optional - { - constexpr test_constexpr_ctor(T&&) {} - }; - } - { - typedef double T; - constexpr optional opt(T(3)); - static_assert(static_cast(opt) == true, ""); - static_assert(*opt == 3, ""); - - struct test_constexpr_ctor - : public optional - { - constexpr test_constexpr_ctor(T&&) {} - }; - } - { - const int x = 42; - optional o(std::move(x)); - assert(*o == 42); - } - { - typedef TestTypes::TestType T; - T::reset(); - optional opt = T{3}; - assert(T::alive == 1); - assert(T::move_constructed == 1); - assert(static_cast(opt) == true); - assert(opt.value().value == 3); - } - { - typedef ExplicitTestTypes::TestType T; - static_assert(!std::is_convertible>::value, ""); - T::reset(); - optional opt(T{3}); - assert(T::alive == 1); - assert(T::move_constructed == 1); - assert(static_cast(opt) == true); - assert(opt.value().value == 3); - } - { - typedef TestTypes::TestType T; - T::reset(); - optional opt = {3}; - assert(T::alive == 1); - assert(T::value_constructed == 1); - assert(T::copy_constructed == 0); - assert(T::move_constructed == 0); - assert(static_cast(opt) == true); - assert(opt.value().value == 3); - } - { - typedef ConstexprTestTypes::TestType T; - constexpr optional opt = {T(3)}; - static_assert(static_cast(opt) == true, ""); - static_assert(opt.value().value == 3, ""); - - struct test_constexpr_ctor - : public optional - { - constexpr test_constexpr_ctor(const T&) {} - }; - } - { - typedef ConstexprTestTypes::TestType T; - constexpr optional opt = {3}; - static_assert(static_cast(opt) == true, ""); - static_assert(opt.value().value == 3, ""); - - struct test_constexpr_ctor - : public optional - { - constexpr test_constexpr_ctor(const T&) {} - }; - } - { - typedef ExplicitConstexprTestTypes::TestType T; - static_assert(!std::is_convertible>::value, ""); - constexpr optional opt(T{3}); - static_assert(static_cast(opt) == true, ""); - static_assert(opt.value().value == 3, ""); - - struct test_constexpr_ctor - : public optional - { - constexpr test_constexpr_ctor(T&&) {} - }; - - } +int main(int, char**) { + { + typedef int T; + constexpr optional opt(T(5)); + static_assert(static_cast(opt) == true, ""); + static_assert(*opt == 5, ""); + + struct test_constexpr_ctor : public optional { + constexpr test_constexpr_ctor(T&&) {} + }; + } + { + typedef double T; + constexpr optional opt(T(3)); + static_assert(static_cast(opt) == true, ""); + static_assert(*opt == 3, ""); + + struct test_constexpr_ctor : public optional { + constexpr test_constexpr_ctor(T&&) {} + }; + } + { + const int x = 42; + optional o(std::move(x)); + assert(*o == 42); + } + { + typedef TestTypes::TestType T; + T::reset(); + optional opt = T{3}; + assert(T::alive == 1); + assert(T::move_constructed == 1); + assert(static_cast(opt) == true); + assert(opt.value().value == 3); + } + { + typedef ExplicitTestTypes::TestType T; + static_assert(!std::is_convertible>::value, ""); + T::reset(); + optional opt(T{3}); + assert(T::alive == 1); + assert(T::move_constructed == 1); + assert(static_cast(opt) == true); + assert(opt.value().value == 3); + } + { + typedef TestTypes::TestType T; + T::reset(); + optional opt = {3}; + assert(T::alive == 1); + assert(T::value_constructed == 1); + assert(T::copy_constructed == 0); + assert(T::move_constructed == 0); + assert(static_cast(opt) == true); + assert(opt.value().value == 3); + } + { + typedef ConstexprTestTypes::TestType T; + constexpr optional opt = {T(3)}; + static_assert(static_cast(opt) == true, ""); + static_assert(opt.value().value == 3, ""); + + struct test_constexpr_ctor : public optional { + constexpr test_constexpr_ctor(const T&) {} + }; + } + { + typedef ConstexprTestTypes::TestType T; + constexpr optional opt = {3}; + static_assert(static_cast(opt) == true, ""); + static_assert(opt.value().value == 3, ""); + + struct test_constexpr_ctor : public optional { + constexpr test_constexpr_ctor(const T&) {} + }; + } + { + typedef ExplicitConstexprTestTypes::TestType T; + static_assert(!std::is_convertible>::value, ""); + constexpr optional opt(T{3}); + static_assert(static_cast(opt) == true, ""); + static_assert(opt.value().value == 3, ""); + + struct test_constexpr_ctor : public optional { + constexpr test_constexpr_ctor(T&&) {} + }; + } #ifndef TEST_HAS_NO_EXCEPTIONS - { - try - { - Z z(3); - optional opt(std::move(z)); - assert(false); - } - catch (int i) - { - assert(i == 6); - } + { + try { + Z z(3); + optional opt(std::move(z)); + assert(false); + } catch (int i) { + assert(i == 6); } + } #endif return 0; diff --git a/libcxx/test/support/platform_support.h b/libcxx/test/support/platform_support.h index 99e60f60c5998..b66fdff9b0491 100644 --- a/libcxx/test/support/platform_support.h +++ b/libcxx/test/support/platform_support.h @@ -48,7 +48,7 @@ # include // strverscmp #endif -#if defined(_NEWLIB_VERSION) && defined(__STRICT_ANSI__) +#if _LIBCPP_LIBC_NEWLIB && defined(__STRICT_ANSI__) // Newlib provides this, but in the header it's under __STRICT_ANSI__ extern "C" { int mkstemp(char*); diff --git a/libcxx/utils/ci/run-buildbot b/libcxx/utils/ci/run-buildbot index 7442361627104..8ab6a94e0255f 100755 --- a/libcxx/utils/ci/run-buildbot +++ b/libcxx/utils/ci/run-buildbot @@ -205,6 +205,7 @@ function test-armv7m-picolibc() { -DLIBUNWIND_TEST_CONFIG="armv7m-picolibc-libunwind.cfg.in" \ -DCMAKE_C_FLAGS="${flags}" \ -DCMAKE_CXX_FLAGS="${flags}" \ + -DRUNTIMES_USE_LIBC=picolibc \ "${@}" step "Installing compiler-rt" diff --git a/lld/ELF/Arch/RISCV.cpp b/lld/ELF/Arch/RISCV.cpp index 5ed89e47c672e..7ec75b0d61fce 100644 --- a/lld/ELF/Arch/RISCV.cpp +++ b/lld/ELF/Arch/RISCV.cpp @@ -8,6 +8,7 @@ #include "InputFiles.h" #include "OutputSections.h" +#include "RISCVInternalRelocations.h" #include "RelocScan.h" #include "Symbols.h" #include "SyntheticSections.h" @@ -345,8 +346,15 @@ RelExpr RISCV::getRelExpr(const RelType type, const Symbol &s, case R_RISCV_SUB_ULEB128: return RE_RISCV_LEB128; default: - Err(ctx) << getErrorLoc(ctx, loc) << "unknown relocation (" << type.v - << ") against symbol " << &s; + if (type.v & INTERNAL_RISCV_VENDOR_MASK) { + Err(ctx) << getErrorLoc(ctx, loc) + << "unsupported vendor-specific relocation " << type + << " against symbol " << &s; + return R_NONE; + } + Err(ctx) << getErrorLoc(ctx, loc) << "unknown relocation (" + << (type.v & ~INTERNAL_RISCV_VENDOR_MASK) << ") against symbol " + << &s; return R_NONE; } } @@ -859,7 +867,7 @@ static bool relax(Ctx &ctx, int pass, InputSection &sec) { std::fill_n(aux.relocTypes.get(), relocs.size(), R_RISCV_NONE); aux.writes.clear(); - for (auto [i, r] : llvm::enumerate(relocs)) { + for (auto [i, r] : llvm::enumerate(riscv_vendor_relocs(relocs))) { const uint64_t loc = secAddr + r.offset - delta; uint32_t &cur = aux.relocDeltas[i], remove = 0; switch (r.type) { @@ -1503,12 +1511,19 @@ void RISCV::scanSectionImpl(InputSectionBase &sec, Relocs rels) { rvVendor = sym.getName(); continue; } else if (!rvVendor.empty()) { - Err(ctx) << getErrorLoc(ctx, loc) - << "unknown vendor-specific relocation (" << type.v - << ") in namespace '" << rvVendor << "' against symbol '" << &sym - << "'"; + uint32_t VendorFlag = getRISCVVendorRelMarker(rvVendor); + if (!VendorFlag) { + Err(ctx) << getErrorLoc(ctx, loc) + << "unknown vendor-specific relocation (" << type.v + << ") in namespace '" << rvVendor << "' against symbol '" + << &sym << "'"; + rvVendor = ""; + continue; + } + rvVendor = ""; - continue; + assert((type.v < 256) && "Out of range relocation detected!"); + type.v |= VendorFlag; } rs.scan(it, type, rs.getAddend(*it, type)); @@ -1533,3 +1548,21 @@ template void RISCV::scanSection1(InputSectionBase &sec) { void RISCV::scanSection(InputSectionBase &sec) { invokeELFT(scanSection1, sec); } + +namespace lld::elf { +uint32_t getRISCVVendorRelMarker(StringRef rvVendor) { + return StringSwitch(rvVendor) + .Case("QUALCOMM", INTERNAL_RISCV_VENDOR_QUALCOMM) + .Case("ANDES", INTERNAL_RISCV_VENDOR_ANDES) + .Default(0); +} + +std::optional getRISCVVendorString(RelType ty) { + if ((ty.v & INTERNAL_RISCV_VENDOR_MASK) == INTERNAL_RISCV_VENDOR_QUALCOMM) + return "QUALCOMM"; + if ((ty.v & INTERNAL_RISCV_VENDOR_MASK) == INTERNAL_RISCV_VENDOR_ANDES) + return "ANDES"; + return std::nullopt; +} + +} // namespace lld::elf diff --git a/lld/ELF/Arch/RISCVInternalRelocations.h b/lld/ELF/Arch/RISCVInternalRelocations.h new file mode 100644 index 0000000000000..35e2f53b03b35 --- /dev/null +++ b/lld/ELF/Arch/RISCVInternalRelocations.h @@ -0,0 +1,113 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLD_ELF_ARCH_RISCVINTERNALRELOCATIONS_H +#define LLD_ELF_ARCH_RISCVINTERNALRELOCATIONS_H + +#include "Relocations.h" +#include "Symbols.h" + +namespace lld::elf { + +// Bit 8 of RelType is used to indicate linker-internal relocations that are +// not vendor-specific. +// These are internal relocation numbers for GP/X0 relaxation. They aren't part +// of the psABI spec. +constexpr uint32_t INTERNAL_R_RISCV_GPREL_I = 256; +constexpr uint32_t INTERNAL_R_RISCV_GPREL_S = 257; +constexpr uint32_t INTERNAL_R_RISCV_X0REL_I = 258; +constexpr uint32_t INTERNAL_R_RISCV_X0REL_S = 259; + +// Bits 9 -> 31 of RelType are used to indicate vendor-specific relocations. +constexpr uint32_t INTERNAL_RISCV_VENDOR_MASK = 0xFFFFFFFF << 9; +constexpr uint32_t INTERNAL_RISCV_VENDOR_QUALCOMM = 1 << 9; +constexpr uint32_t INTERNAL_RISCV_VENDOR_ANDES = 2 << 9; + +constexpr uint32_t INTERNAL_RISCV_QC_ABS20_U = + INTERNAL_RISCV_VENDOR_QUALCOMM | llvm::ELF::R_RISCV_QC_ABS20_U; +constexpr uint32_t INTERNAL_RISCV_QC_E_BRANCH = + INTERNAL_RISCV_VENDOR_QUALCOMM | llvm::ELF::R_RISCV_QC_E_BRANCH; +constexpr uint32_t INTERNAL_RISCV_QC_E_32 = + INTERNAL_RISCV_VENDOR_QUALCOMM | llvm::ELF::R_RISCV_QC_E_32; +constexpr uint32_t INTERNAL_RISCV_QC_E_CALL_PLT = + INTERNAL_RISCV_VENDOR_QUALCOMM | llvm::ELF::R_RISCV_QC_E_CALL_PLT; + +constexpr uint32_t INTERNAL_RISCV_NDS_BRANCH_10 = + INTERNAL_RISCV_VENDOR_ANDES | llvm::ELF::R_RISCV_NDS_BRANCH_10; + +uint32_t getRISCVVendorRelMarker(llvm::StringRef rvVendor); +std::optional getRISCVVendorString(RelType ty); + +class vendor_reloc_iterator { +public: + using iterator_category = std::forward_iterator_tag; + using value_type = Relocation; + using difference_type = std::ptrdiff_t; + using pointer = Relocation *; + using reference = Relocation; // returned by value + + vendor_reloc_iterator(MutableArrayRef::iterator i, + MutableArrayRef::iterator e) + : it(i), end(e) {} + + // Dereference + Relocation operator*() const { + Relocation r = *it; + r.type.v |= rvVendorFlag; + return r; + } + + struct vendor_reloc_proxy { + Relocation r; + const Relocation *operator->() const { return &r; } + }; + + vendor_reloc_proxy operator->() const { + return vendor_reloc_proxy{this->operator*()}; + } + + vendor_reloc_iterator &operator++() { + ++it; + if (it != end && it->type == llvm::ELF::R_RISCV_VENDOR) { + rvVendorFlag = getRISCVVendorRelMarker(it->sym->getName()); + ++it; + } else { + rvVendorFlag = 0; + } + return *this; + } + + vendor_reloc_iterator operator++(int) { + vendor_reloc_iterator tmp(*this); + ++(*this); + return tmp; + } + + bool operator==(const vendor_reloc_iterator &other) const { + return it == other.it; + } + bool operator!=(const vendor_reloc_iterator &other) const { + return it != other.it; + } + + Relocation *getUnderlyingRelocation() const { return &*it; } + +private: + MutableArrayRef::iterator it; + MutableArrayRef::iterator end; + uint32_t rvVendorFlag = 0; +}; + +inline auto riscv_vendor_relocs(MutableArrayRef arr) { + return llvm::make_range(vendor_reloc_iterator(arr.begin(), arr.end()), + vendor_reloc_iterator(arr.end(), arr.end())); +} + +} // namespace lld::elf + +#endif diff --git a/lld/ELF/Target.cpp b/lld/ELF/Target.cpp index 89e4dbeed3109..3fc3e3f16e9e0 100644 --- a/lld/ELF/Target.cpp +++ b/lld/ELF/Target.cpp @@ -24,6 +24,7 @@ //===----------------------------------------------------------------------===// #include "Target.h" +#include "Arch/RISCVInternalRelocations.h" #include "InputFiles.h" #include "OutputSections.h" #include "RelocScan.h" @@ -40,6 +41,14 @@ using namespace lld::elf; std::string elf::toStr(Ctx &ctx, RelType type) { StringRef s = getELFRelocationTypeName(ctx.arg.emachine, type); + if (ctx.arg.emachine == EM_RISCV && s == "Unknown") { + auto VendorString = getRISCVVendorString(type); + if (VendorString) + s = getRISCVVendorRelocationTypeName(type & ~INTERNAL_RISCV_VENDOR_MASK, + *VendorString); + if (s == "Unknown") + return ("Unknown vendor-specific (" + Twine(type) + ")").str(); + } if (s == "Unknown") return ("Unknown (" + Twine(type) + ")").str(); return std::string(s); diff --git a/lld/MachO/Driver.cpp b/lld/MachO/Driver.cpp index 32b20993af67c..28c817c54c85d 100644 --- a/lld/MachO/Driver.cpp +++ b/lld/MachO/Driver.cpp @@ -41,6 +41,7 @@ #include "llvm/Object/Archive.h" #include "llvm/Option/ArgList.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/Parallel.h" #include "llvm/Support/Path.h" @@ -53,6 +54,10 @@ #include "llvm/TextAPI/Architecture.h" #include "llvm/TextAPI/PackedVersion.h" +#if !_WIN32 +#include +#endif + using namespace llvm; using namespace llvm::MachO; using namespace llvm::object; @@ -292,12 +297,13 @@ struct DeferredFile { using DeferredFiles = std::vector; #if LLVM_ENABLE_THREADS -class SerialBackgroundQueue { +class SerialBackgroundWorkQueue { std::deque> queue; std::thread *running; std::mutex mutex; public: + std::atomic_bool stopAllWork = false; void queueWork(std::function work) { mutex.lock(); if (running && queue.empty()) { @@ -312,7 +318,7 @@ class SerialBackgroundQueue { queue.emplace_back(std::move(work)); if (!running) running = new std::thread([&]() { - while (true) { + while (!stopAllWork) { mutex.lock(); if (queue.empty()) { mutex.unlock(); @@ -331,6 +337,8 @@ class SerialBackgroundQueue { } }; +static SerialBackgroundWorkQueue pageInQueue; + // Most input files have been mapped but not yet paged in. // This code forces the page-ins on multiple threads so // the process is not stalled waiting on disk buffer i/o. @@ -339,8 +347,8 @@ void multiThreadedPageInBackground(DeferredFiles &deferred) { static const size_t largeArchive = 10 * 1024 * 1024; #ifndef NDEBUG using namespace std::chrono; - std::atomic_int numDeferedFilesTouched = 0; static std::atomic_uint64_t totalBytes = 0; + std::atomic_int numDeferedFilesAdvised = 0; auto t0 = high_resolution_clock::now(); #endif @@ -348,24 +356,34 @@ void multiThreadedPageInBackground(DeferredFiles &deferred) { const StringRef &buff = deferredFile.buffer.getBuffer(); if (buff.size() > largeArchive) return; + #ifndef NDEBUG totalBytes += buff.size(); - numDeferedFilesTouched += 1; + numDeferedFilesAdvised += 1; #endif - +#if _WIN32 // Reference all file's mmap'd pages to load them into memory. - for (const char *page = buff.data(), *end = page + buff.size(); page < end; - page += pageSize) { + for (const char *page = buff.data(), *end = page + buff.size(); + page < end && !pageInQueue.stopAllWork; page += pageSize) { [[maybe_unused]] volatile char t = *page; (void)t; } +#else +#define DEBUG_TYPE "lld-madvise" + auto aligned = + llvm::alignDown(reinterpret_cast(buff.data()), pageSize); + if (madvise((void *)aligned, buff.size(), MADV_WILLNEED) < 0) + LLVM_DEBUG(llvm::dbgs() << "madvise error: " << strerror(errno) << "\n"); +#undef DEBUG_TYPE +#endif }; + { // Create scope for waiting for the taskGroup std::atomic_size_t index = 0; llvm::parallel::TaskGroup taskGroup; for (int w = 0; w < config->readWorkers; w++) taskGroup.spawn([&index, &preloadDeferredFile, &deferred]() { - while (true) { + while (!pageInQueue.stopAllWork) { size_t localIndex = index.fetch_add(1); if (localIndex >= deferred.size()) break; @@ -373,17 +391,17 @@ void multiThreadedPageInBackground(DeferredFiles &deferred) { } }); } + #ifndef NDEBUG auto dt = high_resolution_clock::now() - t0; if (Process::GetEnv("LLD_MULTI_THREAD_PAGE")) llvm::dbgs() << "multiThreadedPageIn " << totalBytes << "/" - << numDeferedFilesTouched << "/" << deferred.size() << "/" + << numDeferedFilesAdvised << "/" << deferred.size() << "/" << duration_cast(dt).count() / 1000. << "\n"; #endif } static void multiThreadedPageIn(const DeferredFiles &deferred) { - static SerialBackgroundQueue pageInQueue; pageInQueue.queueWork([=]() { DeferredFiles files = deferred; multiThreadedPageInBackground(files); @@ -489,7 +507,7 @@ static InputFile *processFile(std::optional buffer, continue; } - if (archiveContents) + if (config->readWorkers && archiveContents) archiveContents->push_back({path, isLazy, *mb}); if (!hasObjCSection(*mb)) continue; @@ -1447,6 +1465,8 @@ static void createFiles(const InputArgList &args) { multiThreadedPageIn(archiveContents); for (auto *archive : archives) archive->addLazySymbols(); + + pageInQueue.stopAllWork = true; } #endif } @@ -1845,8 +1865,8 @@ bool link(ArrayRef argsArr, llvm::raw_ostream &stdoutOS, "'"); config->readWorkers = workers; #else - error(arg->getSpelling() + - ": option unavailable because lld was not built with thread support"); + warn(arg->getSpelling() + + ": option unavailable because lld was not built with thread support"); #endif } if (auto *arg = args.getLastArg(OPT_threads_eq)) { diff --git a/lld/MachO/InputFiles.cpp b/lld/MachO/InputFiles.cpp index efcffc9c53383..81caef5f15ae1 100644 --- a/lld/MachO/InputFiles.cpp +++ b/lld/MachO/InputFiles.cpp @@ -217,7 +217,8 @@ std::optional macho::readFile(StringRef path) { if (entry != cachedReads.end()) return entry->second; - ErrorOr> mbOrErr = MemoryBuffer::getFile(path); + ErrorOr> mbOrErr = + MemoryBuffer::getFile(path, false, /*RequiresNullTerminator=*/false); if (std::error_code ec = mbOrErr.getError()) { error("cannot open " + path + ": " + ec.message()); return std::nullopt; diff --git a/lld/test/ELF/riscv-vendor-relocations.s b/lld/test/ELF/riscv-vendor-relocations.s index b0f3c4a30d060..f121adec95cd0 100644 --- a/lld/test/ELF/riscv-vendor-relocations.s +++ b/lld/test/ELF/riscv-vendor-relocations.s @@ -8,12 +8,19 @@ TARGET: nop -.global INVALID_VENDOR +.local INVALID_VENDOR +.local QUALCOMM +.local ANDES .reloc 1f, R_RISCV_VENDOR, INVALID_VENDOR+0 .reloc 1f, R_RISCV_VENDOR, INVALID_VENDOR+0 .reloc 1f, R_RISCV_CUSTOM255, TARGET -1: - nop - # CHECK: error: {{.*}}:(.text+0x4): malformed consecutive R_RISCV_VENDOR relocations # CHECK: error: {{.*}}:(.text+0x4): unknown vendor-specific relocation (255) in namespace 'INVALID_VENDOR' against symbol 'TARGET' +.reloc 1f, R_RISCV_VENDOR, QUALCOMM+0 +.reloc 1f, R_RISCV_CUSTOM192, TARGET +# CHECK: error: {{.*}}:(.text+0x4): unsupported vendor-specific relocation R_RISCV_QC_ABS20_U against symbol TARGET +.reloc 1f, R_RISCV_VENDOR, ANDES+0 +.reloc 1f, R_RISCV_CUSTOM241, TARGET +# CHECK: error: {{.*}}:(.text+0x4): unsupported vendor-specific relocation R_RISCV_NDS_BRANCH_10 against symbol TARGET +1: + nop diff --git a/lld/test/MachO/read-workers.s b/lld/test/MachO/read-workers.s index 294106ba0b084..4d2f88c2a757c 100644 --- a/lld/test/MachO/read-workers.s +++ b/lld/test/MachO/read-workers.s @@ -1,7 +1,4 @@ # REQUIRES: x86 && thread_support -## Sometimes fails, particularly in an ASAN build, do not run until -## https://github.com/llvm/llvm-project/pull/157917 addresses the cause. -# UNSUPPORTED: target={{.*}} # RUN: llvm-mc -filetype=obj -triple=x86_64-apple-darwin %s -o %t.o ## A non-negative integer is allowed. diff --git a/lld/test/wasm/wrap_import.s b/lld/test/wasm/wrap_import.s new file mode 100644 index 0000000000000..ce3b6f57f10c4 --- /dev/null +++ b/lld/test/wasm/wrap_import.s @@ -0,0 +1,32 @@ +# RUN: llvm-mc -filetype=obj -triple=wasm32-unknown-unknown %s -o %t.o +# RUN: wasm-ld -wrap nosuchsym -wrap foo -allow-undefined -o %t.wasm %t.o +# RUN: obj2yaml %t.wasm | FileCheck %s + +.globl foo +.globl _start + +foo: + .functype foo () -> () + end_function + +_start: + .functype _start () -> () + call foo + end_function + +# CHECK: - Type: IMPORT +# CHECK-NEXT: Imports: +# CHECK-NEXT: - Module: env +# CHECK-NEXT: Field: __wrap_foo +# CHECK-NEXT: Kind: FUNCTION +# CHECK-NEXT SigIndex: 0 + +# CHECK: - Type: CODE +# CHECK-NEXT: Functions: +# CHECK-NEXT: Index: 1 + +# CHECK: FunctionNames: +# CHECK-NEXT: - Index: 0 +# CHECK-NEXT: Name: __wrap_foo +# CHECK-NEXT: - Index: 1 +# CHECK-NEXT: Name: _start diff --git a/lld/wasm/Driver.cpp b/lld/wasm/Driver.cpp index fac166587cb9b..97e50783985a8 100644 --- a/lld/wasm/Driver.cpp +++ b/lld/wasm/Driver.cpp @@ -1173,9 +1173,10 @@ struct WrappedSymbol { Symbol *wrap; }; -static Symbol *addUndefined(StringRef name) { +static Symbol *addUndefined(StringRef name, + const WasmSignature *signature = nullptr) { return symtab->addUndefinedFunction(name, std::nullopt, std::nullopt, - WASM_SYMBOL_UNDEFINED, nullptr, nullptr, + WASM_SYMBOL_UNDEFINED, nullptr, signature, false); } @@ -1198,7 +1199,8 @@ static std::vector addWrappedSymbols(opt::InputArgList &args) { continue; Symbol *real = addUndefined(saver().save("__real_" + name)); - Symbol *wrap = addUndefined(saver().save("__wrap_" + name)); + Symbol *wrap = + addUndefined(saver().save("__wrap_" + name), sym->getSignature()); v.push_back({sym, real, wrap}); // We want to tell LTO not to inline symbols to be overwritten diff --git a/lld/wasm/SyntheticSections.cpp b/lld/wasm/SyntheticSections.cpp index 399a5084e6595..5e7b9c229f3ed 100644 --- a/lld/wasm/SyntheticSections.cpp +++ b/lld/wasm/SyntheticSections.cpp @@ -196,7 +196,9 @@ void ImportSection::addImport(Symbol *sym) { StringRef module = sym->importModule.value_or(defaultModule); StringRef name = sym->importName.value_or(sym->getName()); if (auto *f = dyn_cast(sym)) { - ImportKey key(*(f->getSignature()), module, name); + const WasmSignature *sig = f->getSignature(); + assert(sig && "imported functions must have a signature"); + ImportKey key(*sig, module, name); auto entry = importedFunctions.try_emplace(key, numImportedFunctions); if (entry.second) { importedSymbols.emplace_back(sym); diff --git a/lldb/bindings/interface/SBTargetExtensions.i b/lldb/bindings/interface/SBTargetExtensions.i index 43125d8970615..ef1093b03ced9 100644 --- a/lldb/bindings/interface/SBTargetExtensions.i +++ b/lldb/bindings/interface/SBTargetExtensions.i @@ -190,6 +190,7 @@ STRING_EXTENSION_LEVEL_OUTSIDE(SBTarget, lldb::eDescriptionLevelBrief) byte_order = property(GetByteOrder, None, doc='''A read only property that returns an lldb enumeration value (lldb.eByteOrderLittle, lldb.eByteOrderBig, lldb.eByteOrderInvalid) that represents the byte order for this target.''') addr_size = property(GetAddressByteSize, None, doc='''A read only property that returns the size in bytes of an address for this target.''') triple = property(GetTriple, None, doc='''A read only property that returns the target triple (arch-vendor-os) for this target as a string.''') + arch_name = property(GetArchName, None, doc='''A read only property that returns the architecture name for this target as a string.''') data_byte_size = property(GetDataByteSize, None, doc='''A read only property that returns the size in host bytes of a byte in the data address space for this target.''') code_byte_size = property(GetCodeByteSize, None, doc='''A read only property that returns the size in host bytes of a byte in the code address space for this target.''') platform = property(GetPlatform, None, doc='''A read only property that returns the platform associated with with this target.''') diff --git a/lldb/examples/python/templates/scripted_process.py b/lldb/examples/python/templates/scripted_process.py index 49059d533f38a..b4232f632a30a 100644 --- a/lldb/examples/python/templates/scripted_process.py +++ b/lldb/examples/python/templates/scripted_process.py @@ -35,9 +35,7 @@ def __init__(self, exe_ctx, args): target = exe_ctx.target if isinstance(target, lldb.SBTarget) and target.IsValid(): self.target = target - triple = self.target.triple - if triple: - self.arch = triple.split("-")[0] + self.arch = target.arch_name self.dbg = target.GetDebugger() if isinstance(args, lldb.SBStructuredData) and args.IsValid(): self.args = args diff --git a/lldb/include/lldb/API/SBTarget.h b/lldb/include/lldb/API/SBTarget.h index 379a0bb7e9513..ce81ae46a0905 100644 --- a/lldb/include/lldb/API/SBTarget.h +++ b/lldb/include/lldb/API/SBTarget.h @@ -44,6 +44,7 @@ class LLDB_API SBTarget { eBroadcastBitWatchpointChanged = (1 << 3), eBroadcastBitSymbolsLoaded = (1 << 4), eBroadcastBitSymbolsChanged = (1 << 5), + eBroadcastBitNewTargetCreated = (1 << 6), }; // Constructors @@ -64,6 +65,10 @@ class LLDB_API SBTarget { static lldb::SBTarget GetTargetFromEvent(const lldb::SBEvent &event); + /// For eBroadcastBitNewTargetCreated events, returns the newly created + /// target. For other event types, returns an invalid SBTarget. + static lldb::SBTarget GetCreatedTargetFromEvent(const lldb::SBEvent &event); + static uint32_t GetNumModulesFromEvent(const lldb::SBEvent &event); static lldb::SBModule GetModuleAtIndexFromEvent(const uint32_t idx, @@ -353,6 +358,8 @@ class LLDB_API SBTarget { const char *GetTriple(); + const char *GetArchName(); + const char *GetABIName(); const char *GetLabel() const; @@ -365,6 +372,16 @@ class LLDB_API SBTarget { /// LLDB_INVALID_GLOBALLY_UNIQUE_TARGET_ID if the target is invalid. lldb::user_id_t GetGloballyUniqueID() const; + /// Get the target session name for this target. + /// + /// The target session name provides a meaningful name for IDEs or tools to + /// display to help the user identify the origin and purpose of the target. + /// + /// \return + /// The target session name for this target, or nullptr if the target is + /// invalid or has no target session name. + const char *GetTargetSessionName() const; + SBError SetLabel(const char *label); /// Architecture opcode byte size width accessor diff --git a/lldb/include/lldb/Target/Process.h b/lldb/include/lldb/Target/Process.h index c1f9785e76f90..8e6c16cbfe0fc 100644 --- a/lldb/include/lldb/Target/Process.h +++ b/lldb/include/lldb/Target/Process.h @@ -2534,6 +2534,28 @@ void PruneThreadPlans(); void CalculateExecutionContext(ExecutionContext &exe_ctx) override; + /// Associates a file descriptor with the process' STDIO handling + /// and configures an asynchronous reading of that descriptor. + /// + /// This method installs a ConnectionFileDescriptor for the passed file + /// descriptor and starts a dedicated read thread. If the read thread starts + /// successfully, the method also ensures that an IOHandlerProcessSTDIO is + /// created to manage user input to the process. + /// + /// The descriptor's ownership is transferred to the underlying + /// ConnectionFileDescriptor. + /// + /// When data is successfully read from the file descriptor, it is stored in + /// m_stdout_data. There is no differentiation between stdout and stderr. + /// + /// \param[in] fd + /// The file descriptor to use for process STDIO communication. It's + /// assumed to be valid and will be managed by the newly created + /// connection. + /// + /// \see lldb_private::Process::STDIOReadThreadBytesReceived() + /// \see lldb_private::IOHandlerProcessSTDIO + /// \see lldb_private::ConnectionFileDescriptor void SetSTDIOFileDescriptor(int file_descriptor); // Add a permanent region of memory that should never be read or written to. diff --git a/lldb/include/lldb/Target/Target.h b/lldb/include/lldb/Target/Target.h index 908094bfd888d..c0fcda7c0d960 100644 --- a/lldb/include/lldb/Target/Target.h +++ b/lldb/include/lldb/Target/Target.h @@ -537,6 +537,7 @@ class Target : public std::enable_shared_from_this, eBroadcastBitWatchpointChanged = (1 << 3), eBroadcastBitSymbolsLoaded = (1 << 4), eBroadcastBitSymbolsChanged = (1 << 5), + eBroadcastBitNewTargetCreated = (1 << 6), }; // These two functions fill out the Broadcaster interface: @@ -556,6 +557,13 @@ class Target : public std::enable_shared_from_this, TargetEventData(const lldb::TargetSP &target_sp, const ModuleList &module_list); + // Constructor for eBroadcastBitNewTargetCreated events. For this event + // type: + // - target_sp is the parent target (the subject/broadcaster of the event) + // - created_target_sp is the newly created target + TargetEventData(const lldb::TargetSP &target_sp, + const lldb::TargetSP &created_target_sp); + ~TargetEventData() override; static llvm::StringRef GetFlavorString(); @@ -570,14 +578,23 @@ class Target : public std::enable_shared_from_this, static lldb::TargetSP GetTargetFromEvent(const Event *event_ptr); + // For eBroadcastBitNewTargetCreated events, returns the newly created + // target. For other event types, returns an invalid target. + static lldb::TargetSP GetCreatedTargetFromEvent(const Event *event_ptr); + static ModuleList GetModuleListFromEvent(const Event *event_ptr); const lldb::TargetSP &GetTarget() const { return m_target_sp; } + const lldb::TargetSP &GetCreatedTarget() const { + return m_created_target_sp; + } + const ModuleList &GetModuleList() const { return m_module_list; } private: lldb::TargetSP m_target_sp; + lldb::TargetSP m_created_target_sp; ModuleList m_module_list; TargetEventData(const TargetEventData &) = delete; @@ -622,6 +639,30 @@ class Target : public std::enable_shared_from_this, /// requirements. llvm::Error SetLabel(llvm::StringRef label); + /// Get the target session name for this target. + /// + /// Provides a meaningful name for IDEs or tools to display for dynamically + /// created targets. Defaults to "Session {ID}" based on the globally unique + /// ID. + /// + /// \return + /// The target session name for this target. + llvm::StringRef GetTargetSessionName() { return m_target_session_name; } + + /// Set the target session name for this target. + /// + /// This should typically be set along with the event + /// eBroadcastBitNewTargetCreated. Useful for scripts or triggers that + /// automatically create targets and want to provide meaningful names that + /// IDEs or other tools can display to help users identify the origin and + /// purpose of each target. + /// + /// \param[in] target_session_name + /// The target session name to set for this target. + void SetTargetSessionName(llvm::StringRef target_session_name) { + m_target_session_name = target_session_name.str(); + } + /// Find a binary on the system and return its Module, /// or return an existing Module that is already in the Target. /// @@ -1719,8 +1760,11 @@ class Target : public std::enable_shared_from_this, bool m_is_dummy_target; unsigned m_next_persistent_variable_index = 0; lldb::user_id_t m_target_unique_id = - LLDB_INVALID_GLOBALLY_UNIQUE_TARGET_ID; /// The globally unique ID + LLDB_INVALID_GLOBALLY_UNIQUE_TARGET_ID; ///< The globally unique ID /// assigned to this target + std::string m_target_session_name; ///< The target session name for this + /// target, used to name debugging + /// sessions in DAP. /// An optional \a lldb_private::Trace object containing processor trace /// information of this target. lldb::TraceSP m_trace_sp; diff --git a/lldb/packages/Python/lldbsuite/test/builders/darwin.py b/lldb/packages/Python/lldbsuite/test/builders/darwin.py index a023bda3ad801..eebe0ef47fd85 100644 --- a/lldb/packages/Python/lldbsuite/test/builders/darwin.py +++ b/lldb/packages/Python/lldbsuite/test/builders/darwin.py @@ -60,7 +60,7 @@ def get_triple_str(arch, vendor, os, version, env): component = [arch, vendor, os + version] if env: - components.append(env) + component.append(env) return "-".join(component) diff --git a/lldb/packages/Python/lldbsuite/test/dotest.py b/lldb/packages/Python/lldbsuite/test/dotest.py index 63f7df4de1894..f280bd2b3887b 100644 --- a/lldb/packages/Python/lldbsuite/test/dotest.py +++ b/lldb/packages/Python/lldbsuite/test/dotest.py @@ -1108,11 +1108,7 @@ def run_suite(): checkDAPSupport() skipped_categories_list = ", ".join(configuration.skip_categories) - print( - "Skipping the following test categories: {}".format( - configuration.skip_categories - ) - ) + print(f"Skipping the following test categories: {skipped_categories_list}") for testdir in configuration.testdirs: for dirpath, dirnames, filenames in os.walk(testdir): diff --git a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py index 459b7ab89dbef..4a7ba78b63993 100644 --- a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py +++ b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py @@ -785,6 +785,8 @@ def request_attach( *, program: Optional[str] = None, pid: Optional[int] = None, + debuggerId: Optional[int] = None, + targetId: Optional[int] = None, waitFor=False, initCommands: Optional[list[str]] = None, preRunCommands: Optional[list[str]] = None, @@ -804,6 +806,10 @@ def request_attach( args_dict["pid"] = pid if program is not None: args_dict["program"] = program + if debuggerId is not None: + args_dict["debuggerId"] = debuggerId + if targetId is not None: + args_dict["targetId"] = targetId if waitFor: args_dict["waitFor"] = waitFor args_dict["initCommands"] = self.init_commands @@ -1259,16 +1265,18 @@ def request_setFunctionBreakpoints(self, names, condition=None, hitCondition=Non return response def request_dataBreakpointInfo( - self, variablesReference, name, frameIndex=0, threadId=None + self, variablesReference, name, size=None, frameIndex=0, threadId=None ): stackFrame = self.get_stackFrame(frameIndex=frameIndex, threadId=threadId) if stackFrame is None: return [] - args_dict = { - "variablesReference": variablesReference, - "name": name, - "frameId": stackFrame["id"], - } + args_dict = {"name": name} + if size is None: + args_dict["variablesReference"] = variablesReference + args_dict["frameId"] = stackFrame["id"] + else: + args_dict["asAddress"] = True + args_dict["bytes"] = size command_dict = { "command": "dataBreakpointInfo", "type": "request", diff --git a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py index 71ca60ebe8d34..c7d302cc2dea2 100644 --- a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py +++ b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py @@ -169,6 +169,7 @@ def verify_breakpoint_hit(self, breakpoint_ids: List[Union[int, str]]): if ( body["reason"] != "breakpoint" and body["reason"] != "instruction breakpoint" + and body["reason"] != "data breakpoint" ): continue if "hitBreakpointIds" not in body: diff --git a/lldb/source/API/SBTarget.cpp b/lldb/source/API/SBTarget.cpp index 98d10aa07c53f..578a7bdf7433d 100644 --- a/lldb/source/API/SBTarget.cpp +++ b/lldb/source/API/SBTarget.cpp @@ -128,6 +128,12 @@ SBTarget SBTarget::GetTargetFromEvent(const SBEvent &event) { return Target::TargetEventData::GetTargetFromEvent(event.get()); } +SBTarget SBTarget::GetCreatedTargetFromEvent(const SBEvent &event) { + LLDB_INSTRUMENT_VA(event); + + return Target::TargetEventData::GetCreatedTargetFromEvent(event.get()); +} + uint32_t SBTarget::GetNumModulesFromEvent(const SBEvent &event) { LLDB_INSTRUMENT_VA(event); @@ -1614,6 +1620,19 @@ const char *SBTarget::GetTriple() { return nullptr; } +const char *SBTarget::GetArchName() { + LLDB_INSTRUMENT_VA(this); + + if (TargetSP target_sp = GetSP()) { + llvm::StringRef arch_name = + target_sp->GetArchitecture().GetTriple().getArchName(); + ConstString const_arch_name(arch_name); + + return const_arch_name.GetCString(); + } + return nullptr; +} + const char *SBTarget::GetABIName() { LLDB_INSTRUMENT_VA(this); @@ -1641,6 +1660,14 @@ lldb::user_id_t SBTarget::GetGloballyUniqueID() const { return LLDB_INVALID_GLOBALLY_UNIQUE_TARGET_ID; } +const char *SBTarget::GetTargetSessionName() const { + LLDB_INSTRUMENT_VA(this); + + if (TargetSP target_sp = GetSP()) + return ConstString(target_sp->GetTargetSessionName()).AsCString(); + return nullptr; +} + SBError SBTarget::SetLabel(const char *label) { LLDB_INSTRUMENT_VA(this, label); diff --git a/lldb/source/Commands/CommandObjectTarget.cpp b/lldb/source/Commands/CommandObjectTarget.cpp index 30bca639060e6..7f880d223d6c3 100644 --- a/lldb/source/Commands/CommandObjectTarget.cpp +++ b/lldb/source/Commands/CommandObjectTarget.cpp @@ -60,6 +60,7 @@ #include "lldb/lldb-forward.h" #include "lldb/lldb-private-enumerations.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/FrontendActions.h" diff --git a/lldb/source/Interpreter/ScriptInterpreter.cpp b/lldb/source/Interpreter/ScriptInterpreter.cpp index 211868b51facb..d2fd372bfe9e3 100644 --- a/lldb/source/Interpreter/ScriptInterpreter.cpp +++ b/lldb/source/Interpreter/ScriptInterpreter.cpp @@ -136,7 +136,7 @@ SymbolContext ScriptInterpreter::GetOpaqueTypeFromSBSymbolContext( return {}; } -std::optional +std::optional ScriptInterpreter::GetOpaqueTypeFromSBMemoryRegionInfo( const lldb::SBMemoryRegionInfo &mem_region) const { if (!mem_region.m_opaque_up) diff --git a/lldb/source/Plugins/ExpressionParser/Clang/CMakeLists.txt b/lldb/source/Plugins/ExpressionParser/Clang/CMakeLists.txt index 01d588ff6a78b..759a7c4dd14fb 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/CMakeLists.txt +++ b/lldb/source/Plugins/ExpressionParser/Clang/CMakeLists.txt @@ -51,10 +51,10 @@ add_lldb_library(lldbPluginExpressionParserClang CLANG_LIBS clangAST clangCodeGen - clangDriver clangEdit clangFrontend clangLex + clangOptions clangParse clangRewrite clangRewriteFrontend diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangHost.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangHost.cpp index 6de851081598f..660a21e3c6a8d 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/ClangHost.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangHost.cpp @@ -10,7 +10,7 @@ #include "clang/Basic/Version.h" #include "clang/Config/config.h" -#include "clang/Driver/Driver.h" +#include "clang/Options/OptionUtils.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" @@ -53,7 +53,7 @@ static bool DefaultComputeClangResourceDirectory(FileSpec &lldb_shlib_spec, std::string raw_path = lldb_shlib_spec.GetPath(); llvm::StringRef parent_dir = llvm::sys::path::parent_path(raw_path); static const std::string clang_resource_path = - clang::driver::Driver::GetResourcesPath("bin/lldb"); + clang::GetResourcesPath("bin/lldb"); static const llvm::StringRef kResourceDirSuffixes[] = { // LLVM.org's build of LLDB uses the clang resource directory placed diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp index e37c84efefdc9..ce8dc50b84a31 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp @@ -10,6 +10,7 @@ #include "clang/Basic/DiagnosticFrontend.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/TargetInfo.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/FrontendActions.h" #include "clang/Frontend/TextDiagnosticPrinter.h" diff --git a/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp b/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp index 4b66ff814935a..a3624accf9b5a 100644 --- a/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp +++ b/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp @@ -208,6 +208,20 @@ static bool IsTrivialBasename(const llvm::StringRef &basename) { return idx == basename.size(); } +/// A context is trivial if an only if it matches this pattern. +/// "^\s*([A-Za-z_:]*)\s*$". for example function `foo::bar::func()` +/// has a trivial context but. but `foo::bar::func()` doesn't. +static bool IsTrivialContext(llvm::StringRef context) { + // remove trailing or leading whitespace. + context = context.trim(); + + const auto iter = context.find_if_not([](char current) { + return std::isalnum(static_cast(current)) || + current == '_' || current == ':'; + }); + return iter == llvm::StringRef::npos; +} + /// Writes out the function name in 'full_name' to 'out_stream' /// but replaces each argument type with the variable name /// and the corresponding pretty-printed value @@ -481,18 +495,17 @@ bool CPlusPlusLanguage::CxxMethodName::TrySimplifiedParse() { m_basename = full.substr(basename_begin, basename_end - basename_begin); } - if (IsTrivialBasename(m_basename)) { + if (IsTrivialBasename(m_basename) && IsTrivialContext(m_context)) { return true; - } else { - // The C++ basename doesn't match our regular expressions so this can't - // be a valid C++ method, clear everything out and indicate an error - m_context = llvm::StringRef(); - m_basename = llvm::StringRef(); - m_arguments = llvm::StringRef(); - m_qualifiers = llvm::StringRef(); - m_return_type = llvm::StringRef(); - return false; } + // The C++ basename doesn't match our regular expressions so this can't + // be a valid C++ method, clear everything out and indicate an error + m_context = llvm::StringRef(); + m_basename = llvm::StringRef(); + m_arguments = llvm::StringRef(); + m_qualifiers = llvm::StringRef(); + m_return_type = llvm::StringRef(); + return false; } return false; } diff --git a/lldb/source/Plugins/SymbolFile/NativePDB/SymbolFileNativePDB.cpp b/lldb/source/Plugins/SymbolFile/NativePDB/SymbolFileNativePDB.cpp index aaec1600dacff..40e783f9bad38 100644 --- a/lldb/source/Plugins/SymbolFile/NativePDB/SymbolFileNativePDB.cpp +++ b/lldb/source/Plugins/SymbolFile/NativePDB/SymbolFileNativePDB.cpp @@ -1126,7 +1126,8 @@ lldb::LanguageType SymbolFileNativePDB::ParseLanguage(CompileUnit &comp_unit) { } void SymbolFileNativePDB::AddSymbols(Symtab &symtab) { - auto *section_list = m_objfile_sp->GetSectionList(); + auto *section_list = + m_objfile_sp->GetModule()->GetObjectFile()->GetSectionList(); if (!section_list) return; diff --git a/lldb/source/Plugins/SymbolFile/PDB/SymbolFilePDB.cpp b/lldb/source/Plugins/SymbolFile/PDB/SymbolFilePDB.cpp index 0ccb1804bb13a..97c995fc9b22a 100644 --- a/lldb/source/Plugins/SymbolFile/PDB/SymbolFilePDB.cpp +++ b/lldb/source/Plugins/SymbolFile/PDB/SymbolFilePDB.cpp @@ -287,8 +287,10 @@ uint32_t SymbolFilePDB::CalculateAbilities() { } void SymbolFilePDB::InitializeObject() { - lldb::addr_t obj_load_address = - m_objfile_sp->GetBaseAddress().GetFileAddress(); + lldb::addr_t obj_load_address = m_objfile_sp->GetModule() + ->GetObjectFile() + ->GetBaseAddress() + .GetFileAddress(); lldbassert(obj_load_address && obj_load_address != LLDB_INVALID_ADDRESS); m_session_up->setLoadAddress(obj_load_address); if (!m_global_scope_up) @@ -1479,7 +1481,8 @@ void SymbolFilePDB::AddSymbols(lldb_private::Symtab &symtab) { if (!results) return; - auto section_list = m_objfile_sp->GetSectionList(); + auto section_list = + m_objfile_sp->GetModule()->GetObjectFile()->GetSectionList(); if (!section_list) return; diff --git a/lldb/source/Target/Process.cpp b/lldb/source/Target/Process.cpp index 69edea503002e..9c8e8fa7041ee 100644 --- a/lldb/source/Target/Process.cpp +++ b/lldb/source/Target/Process.cpp @@ -2452,8 +2452,10 @@ size_t Process::ReadScalarIntegerFromMemory(addr_t addr, uint32_t byte_size, scalar = data.GetMaxU32(&offset, byte_size); else scalar = data.GetMaxU64(&offset, byte_size); - if (is_signed) + if (is_signed) { + scalar.MakeSigned(); scalar.SignExtend(byte_size * 8); + } return bytes_read; } } else { @@ -6545,7 +6547,7 @@ Status Process::WriteMemoryTags(lldb::addr_t addr, size_t len, // Create a CoreFileMemoryRange from a MemoryRegionInfo static CoreFileMemoryRange -CreateCoreFileMemoryRange(const MemoryRegionInfo ®ion) { +CreateCoreFileMemoryRange(const lldb_private::MemoryRegionInfo ®ion) { const addr_t addr = region.GetRange().GetRangeBase(); llvm::AddressRange range(addr, addr + region.GetRange().GetByteSize()); return {range, region.GetLLDBPermissions()}; @@ -6554,7 +6556,7 @@ CreateCoreFileMemoryRange(const MemoryRegionInfo ®ion) { // Add dirty pages to the core file ranges and return true if dirty pages // were added. Return false if the dirty page information is not valid or in // the region. -static bool AddDirtyPages(const MemoryRegionInfo ®ion, +static bool AddDirtyPages(const lldb_private::MemoryRegionInfo ®ion, CoreFileMemoryRanges &ranges) { const auto &dirty_page_list = region.GetDirtyPageList(); if (!dirty_page_list) @@ -6593,8 +6595,8 @@ static bool AddDirtyPages(const MemoryRegionInfo ®ion, // given region. If the region has dirty page information, only dirty pages // will be added to \a ranges, else the entire range will be added to \a // ranges. -static void AddRegion(const MemoryRegionInfo ®ion, bool try_dirty_pages, - CoreFileMemoryRanges &ranges) { +static void AddRegion(const lldb_private::MemoryRegionInfo ®ion, + bool try_dirty_pages, CoreFileMemoryRanges &ranges) { // Don't add empty ranges. if (region.GetRange().GetByteSize() == 0) return; @@ -6617,7 +6619,7 @@ static void SaveDynamicLoaderSections(Process &process, if (!dyld) return; - std::vector dynamic_loader_mem_regions; + std::vector dynamic_loader_mem_regions; std::function save_thread_predicate = [&](const lldb_private::Thread &t) -> bool { return options.ShouldThreadBeSaved(t.GetID()); @@ -6742,10 +6744,11 @@ static void GetCoreFileSaveRangesStackOnly(Process &process, // TODO: We should refactor CoreFileMemoryRanges to use the lldb range type, and // then add an intersect method on it, or MemoryRegionInfo. -static MemoryRegionInfo Intersect(const MemoryRegionInfo &lhs, - const MemoryRegionInfo::RangeType &rhs) { +static lldb_private::MemoryRegionInfo +Intersect(const lldb_private::MemoryRegionInfo &lhs, + const lldb_private::MemoryRegionInfo::RangeType &rhs) { - MemoryRegionInfo region_info; + lldb_private::MemoryRegionInfo region_info; region_info.SetLLDBPermissions(lhs.GetLLDBPermissions()); region_info.GetRange() = lhs.GetRange().Intersect(rhs); diff --git a/lldb/source/Target/Target.cpp b/lldb/source/Target/Target.cpp index 5f2e7af54044a..3a936b85f6339 100644 --- a/lldb/source/Target/Target.cpp +++ b/lldb/source/Target/Target.cpp @@ -185,6 +185,8 @@ Target::Target(Debugger &debugger, const ArchSpec &target_arch, m_internal_stop_hooks(), m_latest_stop_hook_id(0), m_valid(true), m_suppress_stop_hooks(false), m_is_dummy_target(is_dummy_target), m_target_unique_id(g_target_unique_id++), + m_target_session_name( + llvm::formatv("Session {0}", m_target_unique_id).str()), m_frame_recognizer_manager_up( std::make_unique()) { SetEventName(eBroadcastBitBreakpointChanged, "breakpoint-changed"); @@ -192,6 +194,7 @@ Target::Target(Debugger &debugger, const ArchSpec &target_arch, SetEventName(eBroadcastBitModulesUnloaded, "modules-unloaded"); SetEventName(eBroadcastBitWatchpointChanged, "watchpoint-changed"); SetEventName(eBroadcastBitSymbolsLoaded, "symbols-loaded"); + SetEventName(eBroadcastBitNewTargetCreated, "new-target-created"); CheckInWithManager(); @@ -2280,8 +2283,10 @@ size_t Target::ReadScalarIntegerFromMemory(const Address &addr, uint32_t byte_si else scalar = data.GetMaxU64(&offset, byte_size); - if (is_signed) + if (is_signed) { + scalar.MakeSigned(); scalar.SignExtend(byte_size * 8); + } return bytes_read; } } else { @@ -2296,7 +2301,7 @@ int64_t Target::ReadSignedIntegerFromMemory(const Address &addr, int64_t fail_value, Status &error, bool force_live_memory) { Scalar scalar; - if (ReadScalarIntegerFromMemory(addr, integer_byte_size, false, scalar, error, + if (ReadScalarIntegerFromMemory(addr, integer_byte_size, true, scalar, error, force_live_memory)) return scalar.SLongLong(fail_value); return fail_value; @@ -5198,6 +5203,11 @@ Target::TargetEventData::TargetEventData(const lldb::TargetSP &target_sp, const ModuleList &module_list) : EventData(), m_target_sp(target_sp), m_module_list(module_list) {} +Target::TargetEventData::TargetEventData( + const lldb::TargetSP &target_sp, const lldb::TargetSP &created_target_sp) + : EventData(), m_target_sp(target_sp), + m_created_target_sp(created_target_sp), m_module_list() {} + Target::TargetEventData::~TargetEventData() = default; llvm::StringRef Target::TargetEventData::GetFlavorString() { @@ -5232,6 +5242,15 @@ TargetSP Target::TargetEventData::GetTargetFromEvent(const Event *event_ptr) { return target_sp; } +TargetSP +Target::TargetEventData::GetCreatedTargetFromEvent(const Event *event_ptr) { + TargetSP created_target_sp; + const TargetEventData *event_data = GetEventDataFromEvent(event_ptr); + if (event_data) + created_target_sp = event_data->m_created_target_sp; + return created_target_sp; +} + ModuleList Target::TargetEventData::GetModuleListFromEvent(const Event *event_ptr) { ModuleList module_list; diff --git a/lldb/source/Utility/RegisterValue.cpp b/lldb/source/Utility/RegisterValue.cpp index c28c9e2d4d106..4d762dc80e7f5 100644 --- a/lldb/source/Utility/RegisterValue.cpp +++ b/lldb/source/Utility/RegisterValue.cpp @@ -196,9 +196,7 @@ Status RegisterValue::SetValueFromData(const RegisterInfo ®_info, SetUInt64(src.GetMaxU64(&src_offset, src_len)); else { std::vector native_endian_src(src_len, 0); - src.ExtractBytes(src_offset, src_len, - llvm::sys::IsLittleEndianHost ? eByteOrderLittle - : eByteOrderBig, + src.ExtractBytes(src_offset, src_len, endian::InlHostByteOrder(), native_endian_src.data()); llvm::APInt uint = llvm::APInt::getZero(src_len * 8); llvm::LoadIntFromMemory(uint, native_endian_src.data(), src_len); diff --git a/lldb/test/API/commands/frame/var-dil/expr/PointerArithmetic/TestFrameVarDILPointerArithmetic.py b/lldb/test/API/commands/frame/var-dil/expr/PointerArithmetic/TestFrameVarDILExprPointerArithmetic.py similarity index 93% rename from lldb/test/API/commands/frame/var-dil/expr/PointerArithmetic/TestFrameVarDILPointerArithmetic.py rename to lldb/test/API/commands/frame/var-dil/expr/PointerArithmetic/TestFrameVarDILExprPointerArithmetic.py index 88429b370710e..448cd5b1ec7e0 100644 --- a/lldb/test/API/commands/frame/var-dil/expr/PointerArithmetic/TestFrameVarDILPointerArithmetic.py +++ b/lldb/test/API/commands/frame/var-dil/expr/PointerArithmetic/TestFrameVarDILExprPointerArithmetic.py @@ -8,7 +8,7 @@ from lldbsuite.test import lldbutil -class TestFrameVarDILPointerArithmetic(TestBase): +class TestFrameVarDILExprPointerArithmetic(TestBase): NO_DEBUG_INFO_TESTCASE = True def test_pointer_arithmetic(self): diff --git a/lldb/test/API/python_api/target/TestTargetAPI.py b/lldb/test/API/python_api/target/TestTargetAPI.py index d346563af18e2..d3c64d87375b4 100644 --- a/lldb/test/API/python_api/target/TestTargetAPI.py +++ b/lldb/test/API/python_api/target/TestTargetAPI.py @@ -105,6 +105,24 @@ def test_resolve_file_address(self): self.assertIsNotNone(data_section2) self.assertEqual(data_section.name, data_section2.name) + def test_get_arch_name(self): + d = {"EXE": "b.out"} + self.build(dictionary=d) + self.setTearDownCleanup(dictionary=d) + target = self.create_simple_target("b.out") + + arch_name = target.arch_name + self.assertTrue(len(arch_name) > 0, "Got an arch name") + + # Test consistency with triple. + triple = target.triple + self.assertTrue(len(triple) > 0, "Got a triple") + self.assertEqual( + triple.split("-")[0], + arch_name, + "Arch name is equal to the first item of the triple", + ) + def test_get_ABIName(self): d = {"EXE": "b.out"} self.build(dictionary=d) diff --git a/lldb/test/API/tools/lldb-dap/attach/TestDAP_attach.py b/lldb/test/API/tools/lldb-dap/attach/TestDAP_attach.py index 2db00a5ac3b6f..d6287397a93b0 100644 --- a/lldb/test/API/tools/lldb-dap/attach/TestDAP_attach.py +++ b/lldb/test/API/tools/lldb-dap/attach/TestDAP_attach.py @@ -75,3 +75,38 @@ def test_by_name_waitFor(self): self.spawn_thread.start() self.attach(program=program, waitFor=True) self.continue_and_verify_pid() + + def test_attach_with_missing_debuggerId_or_targetId(self): + """ + Test that attaching with only one of debuggerId/targetId specified + fails with the expected error message. + """ + self.build_and_create_debug_adapter() + + # Test with only targetId specified (no debuggerId) + resp = self.attach(targetId=99999, expectFailure=True) + self.assertFalse(resp["success"]) + self.assertIn( + "Both debuggerId and targetId must be specified together", + resp["body"]["error"]["format"], + ) + + def test_attach_with_invalid_debuggerId_and_targetId(self): + """ + Test that attaching with both debuggerId and targetId specified but + invalid fails with an appropriate error message. + """ + self.build_and_create_debug_adapter() + + # Attach with both debuggerId=9999 and targetId=99999 (both invalid). + # Since debugger ID 9999 likely doesn't exist in the global registry, + # we expect a validation error. + resp = self.attach(debuggerId=9999, targetId=99999, expectFailure=True) + self.assertFalse(resp["success"]) + error_msg = resp["body"]["error"]["format"] + # Either error is acceptable - both indicate the debugger reuse + # validation is working correctly + self.assertTrue( + "Unable to find existing debugger" in error_msg + or f"Expected debugger/target not found error, got: {error_msg}" + ) diff --git a/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py b/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py index a542a318050dd..df029ca16d667 100644 --- a/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py +++ b/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py @@ -39,18 +39,21 @@ def test_duplicate_start_addresses(self): {"dataId": response_x["body"]["dataId"], "accessType": "write"}, ] set_response = self.dap_server.request_setDataBreakpoint(dataBreakpoints) - self.assertEqual( - set_response["body"]["breakpoints"], - [{"verified": False}, {"verified": True}, {"verified": True}], - ) + breakpoints = set_response["body"]["breakpoints"] + self.assertEqual(len(breakpoints), 3) + self.assertFalse(breakpoints[0]["verified"]) + self.assertTrue(breakpoints[1]["verified"]) + self.assertTrue(breakpoints[2]["verified"]) - self.continue_to_next_stop() + self.dap_server.request_continue() + self.verify_breakpoint_hit([breakpoints[2]["id"]]) x_val = self.dap_server.get_local_variable_value("x") i_val = self.dap_server.get_local_variable_value("i") self.assertEqual(x_val, "2") self.assertEqual(i_val, "1") - self.continue_to_next_stop() + self.dap_server.request_continue() + self.verify_breakpoint_hit([breakpoints[1]["id"]]) arr_2 = self.dap_server.get_local_variable_child("arr", "[2]") i_val = self.dap_server.get_local_variable_value("i") self.assertEqual(arr_2["value"], "42") @@ -79,18 +82,20 @@ def test_expression(self): {"dataId": response_arr_2["body"]["dataId"], "accessType": "write"}, ] set_response = self.dap_server.request_setDataBreakpoint(dataBreakpoints) - self.assertEqual( - set_response["body"]["breakpoints"], - [{"verified": True}, {"verified": True}], - ) + breakpoints = set_response["body"]["breakpoints"] + self.assertEqual(len(breakpoints), 2) + self.assertTrue(breakpoints[0]["verified"]) + self.assertTrue(breakpoints[1]["verified"]) - self.continue_to_next_stop() + self.dap_server.request_continue() + self.verify_breakpoint_hit([breakpoints[0]["id"]]) x_val = self.dap_server.get_local_variable_value("x") i_val = self.dap_server.get_local_variable_value("i") self.assertEqual(x_val, "2") self.assertEqual(i_val, "1") - self.continue_to_next_stop() + self.dap_server.request_continue() + self.verify_breakpoint_hit([breakpoints[1]["id"]]) arr_2 = self.dap_server.get_local_variable_child("arr", "[2]") i_val = self.dap_server.get_local_variable_value("i") self.assertEqual(arr_2["value"], "42") @@ -123,18 +128,20 @@ def test_functionality(self): {"dataId": response_arr_2["body"]["dataId"], "accessType": "write"}, ] set_response = self.dap_server.request_setDataBreakpoint(dataBreakpoints) - self.assertEqual( - set_response["body"]["breakpoints"], - [{"verified": True}, {"verified": True}], - ) + breakpoints = set_response["body"]["breakpoints"] + self.assertEqual(len(breakpoints), 2) + self.assertTrue(breakpoints[0]["verified"]) + self.assertTrue(breakpoints[1]["verified"]) - self.continue_to_next_stop() + self.dap_server.request_continue() + self.verify_breakpoint_hit([breakpoints[0]["id"]]) x_val = self.dap_server.get_local_variable_value("x") i_val = self.dap_server.get_local_variable_value("i") self.assertEqual(x_val, "2") self.assertEqual(i_val, "1") - self.continue_to_next_stop() + self.dap_server.request_continue() + self.verify_breakpoint_hit([breakpoints[1]["id"]]) arr_2 = self.dap_server.get_local_variable_child("arr", "[2]") i_val = self.dap_server.get_local_variable_value("i") self.assertEqual(arr_2["value"], "42") @@ -153,8 +160,11 @@ def test_functionality(self): } ] set_response = self.dap_server.request_setDataBreakpoint(dataBreakpoints) - self.assertEqual(set_response["body"]["breakpoints"], [{"verified": True}]) - self.continue_to_next_stop() + breakpoints = set_response["body"]["breakpoints"] + self.assertEqual(len(breakpoints), 1) + self.assertTrue(breakpoints[0]["verified"]) + self.dap_server.request_continue() + self.verify_breakpoint_hit([breakpoints[0]["id"]]) x_val = self.dap_server.get_local_variable_value("x") self.assertEqual(x_val, "3") @@ -167,7 +177,64 @@ def test_functionality(self): } ] set_response = self.dap_server.request_setDataBreakpoint(dataBreakpoints) - self.assertEqual(set_response["body"]["breakpoints"], [{"verified": True}]) - self.continue_to_next_stop() + breakpoints = set_response["body"]["breakpoints"] + self.assertEqual(len(breakpoints), 1) + self.assertTrue(breakpoints[0]["verified"]) + self.dap_server.request_continue() + self.verify_breakpoint_hit([breakpoints[0]["id"]]) x_val = self.dap_server.get_local_variable_value("x") self.assertEqual(x_val, "10") + + @skipIfWindows + def test_bytes(self): + """Tests setting data breakpoints on memory range.""" + program = self.getBuildArtifact("a.out") + self.build_and_launch(program) + source = "main.cpp" + first_loop_break_line = line_number(source, "// first loop breakpoint") + self.set_source_breakpoints(source, [first_loop_break_line]) + self.continue_to_next_stop() + # Test write watchpoints on x, arr[2] + x = self.dap_server.get_local_variable("x") + response_x = self.dap_server.request_dataBreakpointInfo( + 0, x["memoryReference"], 4 + ) + arr_2 = self.dap_server.get_local_variable_child("arr", "[2]") + response_arr_2 = self.dap_server.request_dataBreakpointInfo( + 0, arr_2["memoryReference"], 4 + ) + + # Test response from dataBreakpointInfo request. + self.assertEqual( + response_x["body"]["dataId"].split("/"), [x["memoryReference"][2:], "4"] + ) + self.assertEqual(response_x["body"]["accessTypes"], self.accessTypes) + self.assertEqual( + response_arr_2["body"]["dataId"].split("/"), + [arr_2["memoryReference"][2:], "4"], + ) + self.assertEqual(response_arr_2["body"]["accessTypes"], self.accessTypes) + dataBreakpoints = [ + {"dataId": response_x["body"]["dataId"], "accessType": "write"}, + {"dataId": response_arr_2["body"]["dataId"], "accessType": "write"}, + ] + set_response = self.dap_server.request_setDataBreakpoint(dataBreakpoints) + breakpoints = set_response["body"]["breakpoints"] + self.assertEqual(len(breakpoints), 2) + self.assertTrue(breakpoints[0]["verified"]) + self.assertTrue(breakpoints[1]["verified"]) + + self.dap_server.request_continue() + self.verify_breakpoint_hit([breakpoints[0]["id"]]) + x_val = self.dap_server.get_local_variable_value("x") + i_val = self.dap_server.get_local_variable_value("i") + self.assertEqual(x_val, "2") + self.assertEqual(i_val, "1") + + self.dap_server.request_continue() + self.verify_breakpoint_hit([breakpoints[1]["id"]]) + arr_2 = self.dap_server.get_local_variable_child("arr", "[2]") + i_val = self.dap_server.get_local_variable_value("i") + self.assertEqual(arr_2["value"], "42") + self.assertEqual(i_val, "2") + self.dap_server.request_setDataBreakpoint([]) diff --git a/lldb/test/API/tools/lldb-dap/startDebugging/TestDAP_startDebugging.py b/lldb/test/API/tools/lldb-dap/startDebugging/TestDAP_startDebugging.py index b487257b6414d..7e60dd22f1084 100644 --- a/lldb/test/API/tools/lldb-dap/startDebugging/TestDAP_startDebugging.py +++ b/lldb/test/API/tools/lldb-dap/startDebugging/TestDAP_startDebugging.py @@ -36,3 +36,54 @@ def test_startDebugging(self): request = self.dap_server.reverse_requests[0] self.assertEqual(request["arguments"]["configuration"]["pid"], 321) self.assertEqual(request["arguments"]["request"], "attach") + + def test_startDebugging_debugger_reuse(self): + """ + Tests that debugger and target IDs can be passed through startDebugging + for debugger reuse. This verifies the infrastructure for child DAP + sessions to reuse the parent's debugger and attach to an existing target. + """ + program = self.getBuildArtifact("a.out") + source = "main.c" + self.build_and_launch(program) + + breakpoint_line = line_number(source, "// breakpoint") + self.set_source_breakpoints(source, [breakpoint_line]) + self.continue_to_next_stop() + + # Use mock IDs to test the infrastructure + # In a real scenario, these would come from the parent session + test_debugger_id = 1 + test_target_id = 100 + + # Send a startDebugging request with debuggerId and targetId + # This simulates creating a child DAP session that reuses the debugger + self.dap_server.request_evaluate( + f'`lldb-dap start-debugging attach \'{{"debuggerId":{test_debugger_id},"targetId":{test_target_id}}}\'', + context="repl", + ) + + self.continue_to_exit() + + # Verify the reverse request was sent with the correct IDs + self.assertEqual( + len(self.dap_server.reverse_requests), + 1, + "Should have received one startDebugging reverse request", + ) + + request = self.dap_server.reverse_requests[0] + self.assertEqual(request["command"], "startDebugging") + self.assertEqual(request["arguments"]["request"], "attach") + + config = request["arguments"]["configuration"] + self.assertEqual( + config["debuggerId"], + test_debugger_id, + "Reverse request should include debugger ID", + ) + self.assertEqual( + config["targetId"], + test_target_id, + "Reverse request should include target ID", + ) diff --git a/lldb/test/Shell/SymbolFile/PDB/add-symbols.cpp b/lldb/test/Shell/SymbolFile/PDB/add-symbols.cpp new file mode 100644 index 0000000000000..64fbc84a98f24 --- /dev/null +++ b/lldb/test/Shell/SymbolFile/PDB/add-symbols.cpp @@ -0,0 +1,39 @@ +// REQUIRES: lld, target-windows + +// Test that `target symbols add ` works. +// RUN: %build --compiler=clang-cl --nodefaultlib --output=%t.exe %s +// RUN: mv %t.pdb %t-renamed.pdb + +// RUN: env LLDB_USE_NATIVE_PDB_READER=0 %lldb \ +// RUN: -o "b main" \ +// RUN: -o "target symbols add %t-renamed.pdb" \ +// RUN: -o r \ +// RUN: -o "target variable a" \ +// RUN: -o "target modules dump symtab" \ +// RUN: -b %t.exe | FileCheck %s + +// RUN: env LLDB_USE_NATIVE_PDB_READER=1 %lldb \ +// RUN: -o "b main" \ +// RUN: -o "target symbols add %t-renamed.pdb" \ +// RUN: -o r \ +// RUN: -o "target variable a" \ +// RUN: -o "target modules dump symtab" \ +// RUN: -b %t.exe | FileCheck %s + +// CHECK: target create +// CHECK: (lldb) b main +// CHECK-NEXT: Breakpoint 1: no locations (pending). +// CHECK: (lldb) target symbols add +// CHECK: 1 location added to breakpoint 1 + +// CHECK: * thread #1, stop reason = breakpoint 1.1 +// CHECK: (lldb) target variable a +// CHECK-NEXT: (A) a = (x = 47) +// CHECK: (lldb) target modules dump symtab +// CHECK: [{{.*}} main + +struct A { + int x = 47; +}; +A a; +int main() {} diff --git a/lldb/tools/lldb-dap/CMakeLists.txt b/lldb/tools/lldb-dap/CMakeLists.txt index fa940b7b73943..237c3043dbbc7 100644 --- a/lldb/tools/lldb-dap/CMakeLists.txt +++ b/lldb/tools/lldb-dap/CMakeLists.txt @@ -10,6 +10,7 @@ add_lldb_library(lldbDAP DAP.cpp DAPError.cpp DAPLog.cpp + DAPSessionManager.cpp EventHelper.cpp ExceptionBreakpoint.cpp FifoFiles.cpp diff --git a/lldb/tools/lldb-dap/DAP.cpp b/lldb/tools/lldb-dap/DAP.cpp index d4203a2f00983..465d85a07bd34 100644 --- a/lldb/tools/lldb-dap/DAP.cpp +++ b/lldb/tools/lldb-dap/DAP.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "DAP.h" +#include "CommandPlugins.h" #include "DAPLog.h" #include "EventHelper.h" #include "ExceptionBreakpoint.h" @@ -242,10 +243,12 @@ llvm::Error DAP::ConfigureIO(std::FILE *overrideOut, std::FILE *overrideErr) { } void DAP::StopEventHandlers() { - if (event_thread.joinable()) { - broadcaster.BroadcastEventByType(eBroadcastBitStopEventThread); - event_thread.join(); - } + event_thread_sp.reset(); + + // Clean up expired event threads from the session manager. + DAPSessionManager::GetInstance().ReleaseExpiredEventThreads(); + + // Still handle the progress thread normally since it's per-DAP instance. if (progress_event_thread.joinable()) { broadcaster.BroadcastEventByType(eBroadcastBitStopProgressThread); progress_event_thread.join(); @@ -816,7 +819,8 @@ void DAP::SetTarget(const lldb::SBTarget target) { lldb::SBTarget::eBroadcastBitModulesLoaded | lldb::SBTarget::eBroadcastBitModulesUnloaded | lldb::SBTarget::eBroadcastBitSymbolsLoaded | - lldb::SBTarget::eBroadcastBitSymbolsChanged); + lldb::SBTarget::eBroadcastBitSymbolsChanged | + lldb::SBTarget::eBroadcastBitNewTargetCreated); listener.StartListeningForEvents(this->broadcaster, eBroadcastBitStopEventThread); } @@ -1303,13 +1307,99 @@ protocol::Capabilities DAP::GetCustomCapabilities() { } void DAP::StartEventThread() { - event_thread = std::thread(&DAP::EventThread, this); + // Get event thread for this debugger (creates it if it doesn't exist). + event_thread_sp = DAPSessionManager::GetInstance().GetEventThreadForDebugger( + debugger, this); } void DAP::StartProgressEventThread() { progress_event_thread = std::thread(&DAP::ProgressEventThread, this); } +void DAP::StartEventThreads() { + if (clientFeatures.contains(eClientFeatureProgressReporting)) + StartProgressEventThread(); + + StartEventThread(); +} + +llvm::Error DAP::InitializeDebugger(int debugger_id, + lldb::user_id_t target_id) { + // Find the existing debugger by ID + debugger = lldb::SBDebugger::FindDebuggerWithID(debugger_id); + if (!debugger.IsValid()) { + return llvm::createStringError( + "Unable to find existing debugger for debugger ID"); + } + + // Find the target within the debugger by its globally unique ID + lldb::SBTarget target = debugger.FindTargetByGloballyUniqueID(target_id); + if (!target.IsValid()) { + return llvm::createStringError( + "Unable to find existing target for target ID"); + } + + // Set the target for this DAP session. + SetTarget(target); + StartEventThreads(); + return llvm::Error::success(); +} + +llvm::Error DAP::InitializeDebugger() { + debugger = lldb::SBDebugger::Create(/*argument_name=*/false); + + // Configure input/output/error file descriptors. + debugger.SetInputFile(in); + target = debugger.GetDummyTarget(); + + llvm::Expected out_fd = out.GetWriteFileDescriptor(); + if (!out_fd) + return out_fd.takeError(); + debugger.SetOutputFile(lldb::SBFile(*out_fd, "w", false)); + + llvm::Expected err_fd = err.GetWriteFileDescriptor(); + if (!err_fd) + return err_fd.takeError(); + debugger.SetErrorFile(lldb::SBFile(*err_fd, "w", false)); + + // The sourceInitFile option is not part of the DAP specification. It is an + // extension used by the test suite to prevent sourcing `.lldbinit` and + // changing its behavior. The CLI flag --no-lldbinit takes precedence over + // the DAP parameter. + bool should_source_init_files = !no_lldbinit && sourceInitFile; + if (should_source_init_files) { + debugger.SkipLLDBInitFiles(false); + debugger.SkipAppInitFiles(false); + lldb::SBCommandReturnObject init; + auto interp = debugger.GetCommandInterpreter(); + interp.SourceInitFileInGlobalDirectory(init); + interp.SourceInitFileInHomeDirectory(init); + } + + // Run initialization commands. + if (llvm::Error err = RunPreInitCommands()) + return err; + + auto cmd = debugger.GetCommandInterpreter().AddMultiwordCommand( + "lldb-dap", "Commands for managing lldb-dap."); + + if (clientFeatures.contains(eClientFeatureStartDebuggingRequest)) { + cmd.AddCommand( + "start-debugging", new StartDebuggingCommand(*this), + "Sends a startDebugging request from the debug adapter to the client " + "to start a child debug session of the same type as the caller."); + } + + cmd.AddCommand( + "repl-mode", new ReplModeCommand(*this), + "Get or set the repl behavior of lldb-dap evaluation requests."); + cmd.AddCommand("send-event", new SendEventCommand(*this), + "Sends an DAP event to the client."); + + StartEventThreads(); + return llvm::Error::success(); +} + void DAP::ProgressEventThread() { lldb::SBListener listener("lldb-dap.progress.listener"); debugger.GetBroadcaster().AddListener( @@ -1370,213 +1460,6 @@ void DAP::ProgressEventThread() { } } -// All events from the debugger, target, process, thread and frames are -// received in this function that runs in its own thread. We are using a -// "FILE *" to output packets back to VS Code and they have mutexes in them -// them prevent multiple threads from writing simultaneously so no locking -// is required. -void DAP::EventThread() { - llvm::set_thread_name("lldb.DAP.client." + m_client_name + ".event_handler"); - lldb::SBListener listener = debugger.GetListener(); - broadcaster.AddListener(listener, eBroadcastBitStopEventThread); - debugger.GetBroadcaster().AddListener( - listener, lldb::eBroadcastBitError | lldb::eBroadcastBitWarning); - - // listen for thread events. - listener.StartListeningForEventClass( - debugger, lldb::SBThread::GetBroadcasterClassName(), - lldb::SBThread::eBroadcastBitStackChanged); - - lldb::SBEvent event; - bool done = false; - while (!done) { - if (!listener.WaitForEvent(UINT32_MAX, event)) - continue; - - const uint32_t event_mask = event.GetType(); - if (lldb::SBProcess::EventIsProcessEvent(event)) { - HandleProcessEvent(event, /*&process_exited=*/done); - } else if (lldb::SBTarget::EventIsTargetEvent(event)) { - HandleTargetEvent(event); - } else if (lldb::SBBreakpoint::EventIsBreakpointEvent(event)) { - HandleBreakpointEvent(event); - } else if (lldb::SBThread::EventIsThreadEvent(event)) { - HandleThreadEvent(event); - } else if (event_mask & lldb::eBroadcastBitError || - event_mask & lldb::eBroadcastBitWarning) { - HandleDiagnosticEvent(event); - } else if (event.BroadcasterMatchesRef(broadcaster)) { - if (event_mask & eBroadcastBitStopEventThread) { - done = true; - } - } - } -} - -void DAP::HandleProcessEvent(const lldb::SBEvent &event, bool &process_exited) { - lldb::SBProcess process = lldb::SBProcess::GetProcessFromEvent(event); - const uint32_t event_mask = event.GetType(); - if (event_mask & lldb::SBProcess::eBroadcastBitStateChanged) { - auto state = lldb::SBProcess::GetStateFromEvent(event); - switch (state) { - case lldb::eStateConnected: - case lldb::eStateDetached: - case lldb::eStateInvalid: - case lldb::eStateUnloaded: - break; - case lldb::eStateAttaching: - case lldb::eStateCrashed: - case lldb::eStateLaunching: - case lldb::eStateStopped: - case lldb::eStateSuspended: - // Only report a stopped event if the process was not - // automatically restarted. - if (!lldb::SBProcess::GetRestartedFromEvent(event)) { - SendStdOutStdErr(*this, process); - if (llvm::Error err = SendThreadStoppedEvent(*this)) - DAP_LOG_ERROR(log, std::move(err), - "({1}) reporting thread stopped: {0}", m_client_name); - } - break; - case lldb::eStateRunning: - case lldb::eStateStepping: - WillContinue(); - SendContinuedEvent(*this); - break; - case lldb::eStateExited: - lldb::SBStream stream; - process.GetStatus(stream); - SendOutput(OutputType::Console, stream.GetData()); - - // When restarting, we can get an "exited" event for the process we - // just killed with the old PID, or even with no PID. In that case - // we don't have to terminate the session. - if (process.GetProcessID() == LLDB_INVALID_PROCESS_ID || - process.GetProcessID() == restarting_process_id) { - restarting_process_id = LLDB_INVALID_PROCESS_ID; - } else { - // Run any exit LLDB commands the user specified in the - // launch.json - RunExitCommands(); - SendProcessExitedEvent(*this, process); - SendTerminatedEvent(); - process_exited = true; - } - break; - } - } else if ((event_mask & lldb::SBProcess::eBroadcastBitSTDOUT) || - (event_mask & lldb::SBProcess::eBroadcastBitSTDERR)) { - SendStdOutStdErr(*this, process); - } -} - -void DAP::HandleTargetEvent(const lldb::SBEvent &event) { - const uint32_t event_mask = event.GetType(); - if (event_mask & lldb::SBTarget::eBroadcastBitModulesLoaded || - event_mask & lldb::SBTarget::eBroadcastBitModulesUnloaded || - event_mask & lldb::SBTarget::eBroadcastBitSymbolsLoaded || - event_mask & lldb::SBTarget::eBroadcastBitSymbolsChanged) { - const uint32_t num_modules = lldb::SBTarget::GetNumModulesFromEvent(event); - const bool remove_module = - event_mask & lldb::SBTarget::eBroadcastBitModulesUnloaded; - - // NOTE: Both mutexes must be acquired to prevent deadlock when - // handling `modules_request`, which also requires both locks. - lldb::SBMutex api_mutex = GetAPIMutex(); - const std::scoped_lock guard(api_mutex, - modules_mutex); - for (uint32_t i = 0; i < num_modules; ++i) { - lldb::SBModule module = - lldb::SBTarget::GetModuleAtIndexFromEvent(i, event); - - std::optional p_module = - CreateModule(target, module, remove_module); - if (!p_module) - continue; - - const llvm::StringRef module_id = p_module->id; - - const bool module_exists = modules.contains(module_id); - if (remove_module && module_exists) { - modules.erase(module_id); - Send(protocol::Event{"module", - ModuleEventBody{std::move(p_module).value(), - ModuleEventBody::eReasonRemoved}}); - } else if (module_exists) { - Send(protocol::Event{"module", - ModuleEventBody{std::move(p_module).value(), - ModuleEventBody::eReasonChanged}}); - } else if (!remove_module) { - modules.insert(module_id); - Send(protocol::Event{"module", - ModuleEventBody{std::move(p_module).value(), - ModuleEventBody::eReasonNew}}); - } - } - } -} - -void DAP::HandleBreakpointEvent(const lldb::SBEvent &event) { - const uint32_t event_mask = event.GetType(); - if (!(event_mask & lldb::SBTarget::eBroadcastBitBreakpointChanged)) - return; - - auto event_type = lldb::SBBreakpoint::GetBreakpointEventTypeFromEvent(event); - auto bp = - Breakpoint(*this, lldb::SBBreakpoint::GetBreakpointFromEvent(event)); - // If the breakpoint was set through DAP, it will have the - // BreakpointBase::kDAPBreakpointLabel. Regardless of whether - // locations were added, removed, or resolved, the breakpoint isn't - // going away and the reason is always "changed". - if ((event_type & lldb::eBreakpointEventTypeLocationsAdded || - event_type & lldb::eBreakpointEventTypeLocationsRemoved || - event_type & lldb::eBreakpointEventTypeLocationsResolved) && - bp.MatchesName(BreakpointBase::kDAPBreakpointLabel)) { - // As the DAP client already knows the path of this breakpoint, we - // don't need to send it back as part of the "changed" event. This - // avoids sending paths that should be source mapped. Note that - // CreateBreakpoint doesn't apply source mapping and certain - // implementation ignore the source part of this event anyway. - protocol::Breakpoint protocol_bp = bp.ToProtocolBreakpoint(); - - // "source" is not needed here, unless we add adapter data to be - // saved by the client. - if (protocol_bp.source && !protocol_bp.source->adapterData) - protocol_bp.source = std::nullopt; - - llvm::json::Object body; - body.try_emplace("breakpoint", protocol_bp); - body.try_emplace("reason", "changed"); - - llvm::json::Object bp_event = CreateEventObject("breakpoint"); - bp_event.try_emplace("body", std::move(body)); - - SendJSON(llvm::json::Value(std::move(bp_event))); - } -} - -void DAP::HandleThreadEvent(const lldb::SBEvent &event) { - const uint32_t event_type = event.GetType(); - - if (event_type & lldb::SBThread::eBroadcastBitStackChanged) { - const lldb::SBThread evt_thread = lldb::SBThread::GetThreadFromEvent(event); - SendInvalidatedEvent(*this, {InvalidatedEventBody::eAreaStacks}, - evt_thread.GetThreadID()); - } -} - -void DAP::HandleDiagnosticEvent(const lldb::SBEvent &event) { - const lldb::SBStructuredData data = - lldb::SBDebugger::GetDiagnosticFromEvent(event); - if (!data.IsValid()) - return; - - std::string type = GetStringValue(data.GetValueForKey("type")); - std::string message = GetStringValue(data.GetValueForKey("message")); - SendOutput(OutputType::Important, - llvm::formatv("{0}: {1}", type, message).str()); -} - std::vector DAP::SetSourceBreakpoints( const protocol::Source &source, const std::optional> &breakpoints) { diff --git a/lldb/tools/lldb-dap/DAP.h b/lldb/tools/lldb-dap/DAP.h index 5d40341329f34..b5f2a57d9dc5f 100644 --- a/lldb/tools/lldb-dap/DAP.h +++ b/lldb/tools/lldb-dap/DAP.h @@ -10,6 +10,7 @@ #define LLDB_TOOLS_LLDB_DAP_DAP_H #include "DAPForward.h" +#include "DAPSessionManager.h" #include "ExceptionBreakpoint.h" #include "FunctionBreakpoint.h" #include "InstructionBreakpoint.h" @@ -47,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -81,6 +83,8 @@ enum class ReplMode { Variable = 0, Command, Auto }; using DAPTransport = lldb_private::transport::JSONTransport; struct DAP final : public DAPTransport::MessageHandler { + friend class DAPSessionManager; + /// Path to the lldb-dap binary itself. static llvm::StringRef debug_adapter_path; @@ -157,6 +161,11 @@ struct DAP final : public DAPTransport::MessageHandler { /// Whether to disable sourcing .lldbinit files. bool no_lldbinit; + /// Stores whether the initialize request specified a value for + /// lldbExtSourceInitFile. Used by the test suite to prevent sourcing + /// `.lldbinit` and changing its behavior. + bool sourceInitFile = true; + /// The initial thread list upon attaching. std::vector initial_thread_list; @@ -408,9 +417,33 @@ struct DAP final : public DAPTransport::MessageHandler { lldb::SBMutex GetAPIMutex() const { return target.GetAPIMutex(); } + /// Get the client name for this DAP session. + llvm::StringRef GetClientName() const { return m_client_name; } + void StartEventThread(); void StartProgressEventThread(); + /// DAP debugger initialization functions. + /// @{ + + /// Perform complete DAP initialization for a new debugger. + llvm::Error InitializeDebugger(); + + /// Perform complete DAP initialization by reusing an existing debugger and + /// target. + /// + /// \param[in] debugger_id + /// The ID of the existing debugger to reuse. + /// + /// \param[in] target_id + /// The globally unique ID of the existing target to reuse. + llvm::Error InitializeDebugger(int debugger_id, lldb::user_id_t target_id); + + /// Start event handling threads based on client capabilities. + void StartEventThreads(); + + /// @} + /// Sets the given protocol `breakpoints` in the given `source`, while /// removing any existing breakpoints in the given source if they are not in /// `breakpoint`. @@ -453,15 +486,11 @@ struct DAP final : public DAPTransport::MessageHandler { /// Event threads. /// @{ - void EventThread(); - void HandleProcessEvent(const lldb::SBEvent &event, bool &process_exited); - void HandleTargetEvent(const lldb::SBEvent &event); - void HandleBreakpointEvent(const lldb::SBEvent &event); - void HandleThreadEvent(const lldb::SBEvent &event); - void HandleDiagnosticEvent(const lldb::SBEvent &event); void ProgressEventThread(); - std::thread event_thread; + /// Event thread is a shared pointer in case we have a multiple + /// DAP instances sharing the same event thread. + std::shared_ptr event_thread_sp; std::thread progress_event_thread; /// @} diff --git a/lldb/tools/lldb-dap/DAPForward.h b/lldb/tools/lldb-dap/DAPForward.h index 6620d5fd33642..e7fbbf669e7ec 100644 --- a/lldb/tools/lldb-dap/DAPForward.h +++ b/lldb/tools/lldb-dap/DAPForward.h @@ -28,6 +28,7 @@ namespace lldb { class SBAttachInfo; class SBBreakpoint; class SBBreakpointLocation; +class SBBroadcaster; class SBCommandInterpreter; class SBCommandReturnObject; class SBCommunication; diff --git a/lldb/tools/lldb-dap/DAPSessionManager.cpp b/lldb/tools/lldb-dap/DAPSessionManager.cpp new file mode 100644 index 0000000000000..d5440ffd64597 --- /dev/null +++ b/lldb/tools/lldb-dap/DAPSessionManager.cpp @@ -0,0 +1,142 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "DAPSessionManager.h" +#include "DAP.h" +#include "EventHelper.h" +#include "lldb/API/SBBroadcaster.h" +#include "lldb/API/SBEvent.h" +#include "lldb/API/SBTarget.h" +#include "lldb/Host/MainLoopBase.h" +#include "llvm/Support/Threading.h" +#include "llvm/Support/WithColor.h" + +#include +#include + +namespace lldb_dap { + +ManagedEventThread::ManagedEventThread(lldb::SBBroadcaster broadcaster, + std::thread t) + : m_broadcaster(broadcaster), m_event_thread(std::move(t)) {} + +ManagedEventThread::~ManagedEventThread() { + if (m_event_thread.joinable()) { + m_broadcaster.BroadcastEventByType(eBroadcastBitStopEventThread); + m_event_thread.join(); + } +} + +DAPSessionManager &DAPSessionManager::GetInstance() { + static std::once_flag initialized; + static DAPSessionManager *instance = + nullptr; // NOTE: intentional leak to avoid issues with C++ destructor + // chain + + std::call_once(initialized, []() { instance = new DAPSessionManager(); }); + + return *instance; +} + +void DAPSessionManager::RegisterSession(lldb_private::MainLoop *loop, + DAP *dap) { + std::lock_guard lock(m_sessions_mutex); + m_active_sessions[loop] = dap; +} + +void DAPSessionManager::UnregisterSession(lldb_private::MainLoop *loop) { + std::unique_lock lock(m_sessions_mutex); + m_active_sessions.erase(loop); + std::notify_all_at_thread_exit(m_sessions_condition, std::move(lock)); +} + +std::vector DAPSessionManager::GetActiveSessions() { + std::lock_guard lock(m_sessions_mutex); + std::vector sessions; + for (const auto &[loop, dap] : m_active_sessions) + if (dap) + sessions.emplace_back(dap); + return sessions; +} + +void DAPSessionManager::DisconnectAllSessions() { + std::lock_guard lock(m_sessions_mutex); + m_client_failed = false; + for (auto [loop, dap] : m_active_sessions) { + if (dap) { + if (llvm::Error error = dap->Disconnect()) { + m_client_failed = true; + llvm::WithColor::error() << "DAP client disconnected failed: " + << llvm::toString(std::move(error)) << "\n"; + } + loop->AddPendingCallback( + [](lldb_private::MainLoopBase &loop) { loop.RequestTermination(); }); + } + } +} + +llvm::Error DAPSessionManager::WaitForAllSessionsToDisconnect() { + std::unique_lock lock(m_sessions_mutex); + m_sessions_condition.wait(lock, [this] { return m_active_sessions.empty(); }); + + // Check if any disconnection failed and return appropriate error. + if (m_client_failed) + return llvm::make_error( + "disconnecting all clients failed", llvm::inconvertibleErrorCode()); + + return llvm::Error::success(); +} + +std::shared_ptr +DAPSessionManager::GetEventThreadForDebugger(lldb::SBDebugger debugger, + DAP *requesting_dap) { + lldb::user_id_t debugger_id = debugger.GetID(); + std::lock_guard lock(m_sessions_mutex); + + // Try to use shared event thread, if it exists. + if (auto it = m_debugger_event_threads.find(debugger_id); + it != m_debugger_event_threads.end()) { + if (std::shared_ptr thread_sp = it->second.lock()) + return thread_sp; + // Our weak pointer has expired. + m_debugger_event_threads.erase(it); + } + + // Create a new event thread and store it. + auto new_thread_sp = std::make_shared( + requesting_dap->broadcaster, + std::thread(EventThread, debugger, requesting_dap->broadcaster, + requesting_dap->m_client_name, requesting_dap->log)); + m_debugger_event_threads[debugger_id] = new_thread_sp; + return new_thread_sp; +} + +DAP *DAPSessionManager::FindDAPForTarget(lldb::SBTarget target) { + std::lock_guard lock(m_sessions_mutex); + + for (const auto &[loop, dap] : m_active_sessions) + if (dap && dap->target.IsValid() && dap->target == target) + return dap; + + return nullptr; +} + +void DAPSessionManager::ReleaseExpiredEventThreads() { + std::lock_guard lock(m_sessions_mutex); + for (auto it = m_debugger_event_threads.begin(); + it != m_debugger_event_threads.end();) { + // Check if the weak_ptr has expired (no DAP instances are using it + // anymore). + if (it->second.expired()) { + it = m_debugger_event_threads.erase(it); + } else { + ++it; + } + } +} + +} // namespace lldb_dap diff --git a/lldb/tools/lldb-dap/DAPSessionManager.h b/lldb/tools/lldb-dap/DAPSessionManager.h new file mode 100644 index 0000000000000..ad76b081ad78b --- /dev/null +++ b/lldb/tools/lldb-dap/DAPSessionManager.h @@ -0,0 +1,119 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains the declarations of the DAPSessionManager and +/// ManagedEventThread classes, which are used to multiple concurrent DAP +/// sessions in a single lldb-dap process. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLDB_TOOLS_LLDB_DAP_DAPSESSIONMANAGER_H +#define LLDB_TOOLS_LLDB_DAP_DAPSESSIONMANAGER_H + +#include "lldb/API/SBBroadcaster.h" +#include "lldb/API/SBDebugger.h" +#include "lldb/API/SBTarget.h" +#include "lldb/Host/MainLoop.h" +#include "lldb/lldb-types.h" +#include "llvm/Support/Error.h" +#include +#include +#include +#include +#include +#include +#include + +namespace lldb_dap { + +// Forward declarations +struct DAP; + +class ManagedEventThread { +public: + // Constructor declaration + ManagedEventThread(lldb::SBBroadcaster broadcaster, std::thread t); + + ~ManagedEventThread(); + + ManagedEventThread(const ManagedEventThread &) = delete; + ManagedEventThread &operator=(const ManagedEventThread &) = delete; + +private: + lldb::SBBroadcaster m_broadcaster; + std::thread m_event_thread; +}; + +/// Global DAP session manager that manages multiple concurrent DAP sessions in +/// a single lldb-dap process. Handles session lifecycle tracking, coordinates +/// shared debugger event threads, and facilitates target handoff between +/// sessions for dynamically created targets. +class DAPSessionManager { +public: + /// Get the singleton instance of the DAP session manager. + static DAPSessionManager &GetInstance(); + + /// Register a DAP session. + void RegisterSession(lldb_private::MainLoop *loop, DAP *dap); + + /// Unregister a DAP session. Called by sessions when they complete their + /// disconnection, which unblocks WaitForAllSessionsToDisconnect(). + void UnregisterSession(lldb_private::MainLoop *loop); + + /// Get all active DAP sessions. + std::vector GetActiveSessions(); + + /// Disconnect all registered sessions by calling Disconnect() on + /// each and requesting their event loops to terminate. Used during + /// shutdown to force all sessions to begin disconnecting. + void DisconnectAllSessions(); + + /// Block until all sessions disconnect and unregister. Returns an error if + /// DisconnectAllSessions() was called and any disconnection failed. + llvm::Error WaitForAllSessionsToDisconnect(); + + /// Get or create event thread for a specific debugger. + std::shared_ptr + GetEventThreadForDebugger(lldb::SBDebugger debugger, DAP *requesting_dap); + + /// Find the DAP instance that owns the given target. + DAP *FindDAPForTarget(lldb::SBTarget target); + + /// Static convenience method for FindDAPForTarget. + static DAP *FindDAP(lldb::SBTarget target) { + return GetInstance().FindDAPForTarget(target); + } + + /// Clean up expired event threads from the collection. + void ReleaseExpiredEventThreads(); + +private: + DAPSessionManager() = default; + ~DAPSessionManager() = default; + + // Non-copyable and non-movable. + DAPSessionManager(const DAPSessionManager &) = delete; + DAPSessionManager &operator=(const DAPSessionManager &) = delete; + DAPSessionManager(DAPSessionManager &&) = delete; + DAPSessionManager &operator=(DAPSessionManager &&) = delete; + + bool m_client_failed = false; + std::mutex m_sessions_mutex; + std::condition_variable m_sessions_condition; + std::map m_active_sessions; + + /// Map from debugger ID to its event thread, used when multiple DAP sessions + /// share the same debugger instance. + std::map> + m_debugger_event_threads; +}; + +} // namespace lldb_dap + +#endif // LLDB_TOOLS_LLDB_DAP_DAPSESSIONMANAGER_H diff --git a/lldb/tools/lldb-dap/EventHelper.cpp b/lldb/tools/lldb-dap/EventHelper.cpp index 12d9e21c52ab3..bdb6bb55fe168 100644 --- a/lldb/tools/lldb-dap/EventHelper.cpp +++ b/lldb/tools/lldb-dap/EventHelper.cpp @@ -7,16 +7,28 @@ //===----------------------------------------------------------------------===// #include "EventHelper.h" +#include "Breakpoint.h" +#include "BreakpointBase.h" #include "DAP.h" #include "DAPError.h" +#include "DAPLog.h" +#include "DAPSessionManager.h" +#include "Handler/ResponseHandler.h" #include "JSONUtils.h" #include "LLDBUtils.h" #include "Protocol/ProtocolEvents.h" #include "Protocol/ProtocolRequests.h" #include "Protocol/ProtocolTypes.h" +#include "ProtocolUtils.h" +#include "lldb/API/SBEvent.h" #include "lldb/API/SBFileSpec.h" +#include "lldb/API/SBListener.h" #include "lldb/API/SBPlatform.h" +#include "lldb/API/SBStream.h" #include "llvm/Support/Error.h" +#include "llvm/Support/FormatVariadic.h" +#include "llvm/Support/Threading.h" +#include #include #if defined(_WIN32) @@ -306,4 +318,312 @@ void SendMemoryEvent(DAP &dap, lldb::SBValue variable) { dap.Send(protocol::Event{"memory", std::move(body)}); } +// Event handler functions that are called by EventThread. +// These handlers extract the necessary objects from events and find the +// appropriate DAP instance to handle them, maintaining compatibility with +// the original DAP::Handle*Event pattern while supporting multi-session +// debugging. + +void HandleProcessEvent(const lldb::SBEvent &event, bool &process_exited, + Log *log) { + lldb::SBProcess process = lldb::SBProcess::GetProcessFromEvent(event); + + // Find the DAP instance that owns this process's target. + DAP *dap = DAPSessionManager::FindDAP(process.GetTarget()); + if (!dap) { + DAP_LOG(log, "Unable to find DAP instance for process {0}", + process.GetProcessID()); + return; + } + + const uint32_t event_mask = event.GetType(); + + if (event_mask & lldb::SBProcess::eBroadcastBitStateChanged) { + auto state = lldb::SBProcess::GetStateFromEvent(event); + switch (state) { + case lldb::eStateConnected: + case lldb::eStateDetached: + case lldb::eStateInvalid: + case lldb::eStateUnloaded: + break; + case lldb::eStateAttaching: + case lldb::eStateCrashed: + case lldb::eStateLaunching: + case lldb::eStateStopped: + case lldb::eStateSuspended: + // Only report a stopped event if the process was not + // automatically restarted. + if (!lldb::SBProcess::GetRestartedFromEvent(event)) { + SendStdOutStdErr(*dap, process); + if (llvm::Error err = SendThreadStoppedEvent(*dap)) + DAP_LOG_ERROR(dap->log, std::move(err), + "({1}) reporting thread stopped: {0}", + dap->GetClientName()); + } + break; + case lldb::eStateRunning: + case lldb::eStateStepping: + dap->WillContinue(); + SendContinuedEvent(*dap); + break; + case lldb::eStateExited: + lldb::SBStream stream; + process.GetStatus(stream); + dap->SendOutput(OutputType::Console, stream.GetData()); + + // When restarting, we can get an "exited" event for the process we + // just killed with the old PID, or even with no PID. In that case + // we don't have to terminate the session. + if (process.GetProcessID() == LLDB_INVALID_PROCESS_ID || + process.GetProcessID() == dap->restarting_process_id) { + dap->restarting_process_id = LLDB_INVALID_PROCESS_ID; + } else { + // Run any exit LLDB commands the user specified in the + // launch.json + dap->RunExitCommands(); + SendProcessExitedEvent(*dap, process); + dap->SendTerminatedEvent(); + process_exited = true; + } + break; + } + } else if ((event_mask & lldb::SBProcess::eBroadcastBitSTDOUT) || + (event_mask & lldb::SBProcess::eBroadcastBitSTDERR)) { + SendStdOutStdErr(*dap, process); + } +} + +void HandleTargetEvent(const lldb::SBEvent &event, Log *log) { + lldb::SBTarget target = lldb::SBTarget::GetTargetFromEvent(event); + + // Find the DAP instance that owns this target. + DAP *dap = DAPSessionManager::FindDAP(target); + if (!dap) { + DAP_LOG(log, "Unable to find DAP instance for target"); + return; + } + + const uint32_t event_mask = event.GetType(); + if (event_mask & lldb::SBTarget::eBroadcastBitModulesLoaded || + event_mask & lldb::SBTarget::eBroadcastBitModulesUnloaded || + event_mask & lldb::SBTarget::eBroadcastBitSymbolsLoaded || + event_mask & lldb::SBTarget::eBroadcastBitSymbolsChanged) { + const uint32_t num_modules = lldb::SBTarget::GetNumModulesFromEvent(event); + const bool remove_module = + event_mask & lldb::SBTarget::eBroadcastBitModulesUnloaded; + + // NOTE: Both mutexes must be acquired to prevent deadlock when + // handling `modules_request`, which also requires both locks. + lldb::SBMutex api_mutex = dap->GetAPIMutex(); + const std::scoped_lock guard(api_mutex, + dap->modules_mutex); + for (uint32_t i = 0; i < num_modules; ++i) { + lldb::SBModule module = + lldb::SBTarget::GetModuleAtIndexFromEvent(i, event); + + std::optional p_module = + CreateModule(dap->target, module, remove_module); + if (!p_module) + continue; + + llvm::StringRef module_id = p_module->id; + + const bool module_exists = dap->modules.contains(module_id); + if (remove_module && module_exists) { + dap->modules.erase(module_id); + dap->Send(protocol::Event{ + "module", protocol::ModuleEventBody{ + std::move(p_module).value(), + protocol::ModuleEventBody::eReasonRemoved}}); + } else if (module_exists) { + dap->Send(protocol::Event{ + "module", protocol::ModuleEventBody{ + std::move(p_module).value(), + protocol::ModuleEventBody::eReasonChanged}}); + } else if (!remove_module) { + dap->modules.insert(module_id); + dap->Send(protocol::Event{ + "module", + protocol::ModuleEventBody{std::move(p_module).value(), + protocol::ModuleEventBody::eReasonNew}}); + } + } + } else if (event_mask & lldb::SBTarget::eBroadcastBitNewTargetCreated) { + // For NewTargetCreated events, GetTargetFromEvent returns the parent + // target, and GetCreatedTargetFromEvent returns the newly created target. + lldb::SBTarget created_target = + lldb::SBTarget::GetCreatedTargetFromEvent(event); + + if (!target.IsValid() || !created_target.IsValid()) { + DAP_LOG(log, "Received NewTargetCreated event but parent or " + "created target is invalid"); + return; + } + + // Send a startDebugging reverse request with the debugger and target + // IDs. The new DAP instance will use these IDs to find the existing + // debugger and target via FindDebuggerWithID and + // FindTargetByGloballyUniqueID. + llvm::json::Object configuration; + configuration.try_emplace("type", "lldb"); + configuration.try_emplace("debuggerId", + created_target.GetDebugger().GetID()); + configuration.try_emplace("targetId", created_target.GetGloballyUniqueID()); + configuration.try_emplace("name", created_target.GetTargetSessionName()); + + llvm::json::Object request; + request.try_emplace("request", "attach"); + request.try_emplace("configuration", std::move(configuration)); + + dap->SendReverseRequest("startDebugging", + std::move(request)); + } +} + +void HandleBreakpointEvent(const lldb::SBEvent &event, Log *log) { + const uint32_t event_mask = event.GetType(); + if (!(event_mask & lldb::SBTarget::eBroadcastBitBreakpointChanged)) + return; + + lldb::SBBreakpoint bp = lldb::SBBreakpoint::GetBreakpointFromEvent(event); + if (!bp.IsValid()) + return; + + // Find the DAP instance that owns this breakpoint's target. + DAP *dap = DAPSessionManager::FindDAP(bp.GetTarget()); + if (!dap) { + DAP_LOG(log, "Unable to find DAP instance for breakpoint"); + return; + } + + auto event_type = lldb::SBBreakpoint::GetBreakpointEventTypeFromEvent(event); + auto breakpoint = Breakpoint(*dap, bp); + // If the breakpoint was set through DAP, it will have the + // BreakpointBase::kDAPBreakpointLabel. Regardless of whether + // locations were added, removed, or resolved, the breakpoint isn't + // going away and the reason is always "changed". + if ((event_type & lldb::eBreakpointEventTypeLocationsAdded || + event_type & lldb::eBreakpointEventTypeLocationsRemoved || + event_type & lldb::eBreakpointEventTypeLocationsResolved) && + breakpoint.MatchesName(BreakpointBase::kDAPBreakpointLabel)) { + // As the DAP client already knows the path of this breakpoint, we + // don't need to send it back as part of the "changed" event. This + // avoids sending paths that should be source mapped. Note that + // CreateBreakpoint doesn't apply source mapping and certain + // implementation ignore the source part of this event anyway. + protocol::Breakpoint protocol_bp = breakpoint.ToProtocolBreakpoint(); + + // "source" is not needed here, unless we add adapter data to be + // saved by the client. + if (protocol_bp.source && !protocol_bp.source->adapterData) + protocol_bp.source = std::nullopt; + + llvm::json::Object body; + body.try_emplace("breakpoint", protocol_bp); + body.try_emplace("reason", "changed"); + + llvm::json::Object bp_event = CreateEventObject("breakpoint"); + bp_event.try_emplace("body", std::move(body)); + + dap->SendJSON(llvm::json::Value(std::move(bp_event))); + } +} + +void HandleThreadEvent(const lldb::SBEvent &event, Log *log) { + uint32_t event_type = event.GetType(); + + if (!(event_type & lldb::SBThread::eBroadcastBitStackChanged)) + return; + + lldb::SBThread thread = lldb::SBThread::GetThreadFromEvent(event); + if (!thread.IsValid()) + return; + + // Find the DAP instance that owns this thread's process/target. + DAP *dap = DAPSessionManager::FindDAP(thread.GetProcess().GetTarget()); + if (!dap) { + DAP_LOG(log, "Unable to find DAP instance for thread"); + return; + } + + SendInvalidatedEvent(*dap, {protocol::InvalidatedEventBody::eAreaStacks}, + thread.GetThreadID()); +} + +void HandleDiagnosticEvent(const lldb::SBEvent &event, Log *log) { + // Global debugger events - send to all DAP instances. + std::vector active_instances = + DAPSessionManager::GetInstance().GetActiveSessions(); + for (DAP *dap_instance : active_instances) { + if (!dap_instance) + continue; + + lldb::SBStructuredData data = + lldb::SBDebugger::GetDiagnosticFromEvent(event); + if (!data.IsValid()) + continue; + + std::string type = GetStringValue(data.GetValueForKey("type")); + std::string message = GetStringValue(data.GetValueForKey("message")); + dap_instance->SendOutput(OutputType::Important, + llvm::formatv("{0}: {1}", type, message).str()); + } +} + +// Note: EventThread() is architecturally different from the other functions in +// this file. While the functions above are event helpers that operate on a +// single DAP instance (taking `DAP &dap` as a parameter), EventThread() is a +// shared event processing loop that: +// 1. Listens to events from a shared debugger instance +// 2. Dispatches events to the appropriate handler, which internally finds the +// DAP instance using DAPSessionManager::FindDAP() +// 3. Handles events for multiple different DAP sessions +// This allows multiple DAP sessions to share a single debugger and event +// thread, which is essential for the target handoff mechanism where child +// processes/targets are debugged in separate DAP sessions. +// +// All events from the debugger, target, process, thread and frames are +// received in this function that runs in its own thread. We are using a +// "FILE *" to output packets back to VS Code and they have mutexes in them +// them prevent multiple threads from writing simultaneously so no locking +// is required. +void EventThread(lldb::SBDebugger debugger, lldb::SBBroadcaster broadcaster, + llvm::StringRef client_name, Log *log) { + llvm::set_thread_name("lldb.DAP.client." + client_name + ".event_handler"); + lldb::SBListener listener = debugger.GetListener(); + broadcaster.AddListener(listener, eBroadcastBitStopEventThread); + debugger.GetBroadcaster().AddListener( + listener, lldb::eBroadcastBitError | lldb::eBroadcastBitWarning); + + // listen for thread events. + listener.StartListeningForEventClass( + debugger, lldb::SBThread::GetBroadcasterClassName(), + lldb::SBThread::eBroadcastBitStackChanged); + + lldb::SBEvent event; + bool done = false; + while (!done) { + if (!listener.WaitForEvent(UINT32_MAX, event)) + continue; + + const uint32_t event_mask = event.GetType(); + if (lldb::SBProcess::EventIsProcessEvent(event)) { + HandleProcessEvent(event, /*&process_exited=*/done, log); + } else if (lldb::SBTarget::EventIsTargetEvent(event)) { + HandleTargetEvent(event, log); + } else if (lldb::SBBreakpoint::EventIsBreakpointEvent(event)) { + HandleBreakpointEvent(event, log); + } else if (lldb::SBThread::EventIsThreadEvent(event)) { + HandleThreadEvent(event, log); + } else if (event_mask & lldb::eBroadcastBitError || + event_mask & lldb::eBroadcastBitWarning) { + HandleDiagnosticEvent(event, log); + } else if (event.BroadcasterMatchesRef(broadcaster)) { + if (event_mask & eBroadcastBitStopEventThread) { + done = true; + } + } + } +} + } // namespace lldb_dap diff --git a/lldb/tools/lldb-dap/EventHelper.h b/lldb/tools/lldb-dap/EventHelper.h index be783d032a5ae..3beba2629b2e3 100644 --- a/lldb/tools/lldb-dap/EventHelper.h +++ b/lldb/tools/lldb-dap/EventHelper.h @@ -42,6 +42,26 @@ void SendInvalidatedEvent( void SendMemoryEvent(DAP &dap, lldb::SBValue variable); +/// Event thread function that handles debugger events for multiple DAP sessions +/// sharing the same debugger instance. This runs in its own thread and +/// dispatches events to the appropriate DAP instance. +/// +/// \param debugger The debugger instance to listen for events from. +/// \param broadcaster The broadcaster for stop event thread notifications. +/// \param client_name The client name for thread naming/logging purposes. +/// \param log The log instance for logging. +void EventThread(lldb::SBDebugger debugger, lldb::SBBroadcaster broadcaster, + llvm::StringRef client_name, Log *log); + +/// Event handler functions called by EventThread. +/// These handlers extract the necessary objects from events and find the +/// appropriate DAP instance to handle them. +void HandleProcessEvent(const lldb::SBEvent &event, bool &done, Log *log); +void HandleTargetEvent(const lldb::SBEvent &event, Log *log); +void HandleBreakpointEvent(const lldb::SBEvent &event, Log *log); +void HandleThreadEvent(const lldb::SBEvent &event, Log *log); +void HandleDiagnosticEvent(const lldb::SBEvent &event, Log *log); + } // namespace lldb_dap #endif diff --git a/lldb/tools/lldb-dap/Handler/AttachRequestHandler.cpp b/lldb/tools/lldb-dap/Handler/AttachRequestHandler.cpp index 490513fe8a0b8..24c0ca2111f40 100644 --- a/lldb/tools/lldb-dap/Handler/AttachRequestHandler.cpp +++ b/lldb/tools/lldb-dap/Handler/AttachRequestHandler.cpp @@ -17,6 +17,7 @@ #include "lldb/lldb-defines.h" #include "llvm/Support/Error.h" #include "llvm/Support/FileSystem.h" +#include using namespace llvm; using namespace lldb_dap::protocol; @@ -29,14 +30,31 @@ namespace lldb_dap { /// Since attaching is debugger/runtime specific, the arguments for this request /// are not part of this specification. Error AttachRequestHandler::Run(const AttachRequestArguments &args) const { + // Initialize DAP debugger and related components if not sharing previously + // launched debugger. + std::optional debugger_id = args.debuggerId; + std::optional target_id = args.targetId; + + // Validate that both debugger_id and target_id are provided together. + if (debugger_id.has_value() != target_id.has_value()) { + return llvm::createStringError( + "Both debuggerId and targetId must be specified together for debugger " + "reuse, or both must be omitted to create a new debugger"); + } + + if (Error err = debugger_id && target_id + ? dap.InitializeDebugger(*debugger_id, *target_id) + : dap.InitializeDebugger()) + return err; + // Validate that we have a well formed attach request. if (args.attachCommands.empty() && args.coreFile.empty() && args.configuration.program.empty() && args.pid == LLDB_INVALID_PROCESS_ID && - args.gdbRemotePort == LLDB_DAP_INVALID_PORT) + args.gdbRemotePort == LLDB_DAP_INVALID_PORT && !target_id.has_value()) return make_error( "expected one of 'pid', 'program', 'attachCommands', " - "'coreFile' or 'gdb-remote-port' to be specified"); + "'coreFile', 'gdb-remote-port', or target_id to be specified"); // Check if we have mutually exclusive arguments. if ((args.pid != LLDB_INVALID_PROCESS_ID) && @@ -64,7 +82,18 @@ Error AttachRequestHandler::Run(const AttachRequestArguments &args) const { dap.ConfigureSourceMaps(); lldb::SBError error; - lldb::SBTarget target = dap.CreateTarget(error); + lldb::SBTarget target; + if (target_id) { + // Use the unique target ID to get the target. + target = dap.debugger.FindTargetByGloballyUniqueID(*target_id); + if (!target.IsValid()) { + error.SetErrorStringWithFormat("invalid target_id %lu in attach config", + *target_id); + } + } else { + target = dap.CreateTarget(error); + } + if (error.Fail()) return ToError(error); @@ -114,7 +143,7 @@ Error AttachRequestHandler::Run(const AttachRequestArguments &args) const { connect_url += std::to_string(args.gdbRemotePort); dap.target.ConnectRemote(listener, connect_url.c_str(), "gdb-remote", error); - } else { + } else if (!target_id.has_value()) { // Attach by pid or process name. lldb::SBAttachInfo attach_info; if (args.pid != LLDB_INVALID_PROCESS_ID) diff --git a/lldb/tools/lldb-dap/Handler/DataBreakpointInfoRequestHandler.cpp b/lldb/tools/lldb-dap/Handler/DataBreakpointInfoRequestHandler.cpp index 87b93fc999ecd..245d92c18e59e 100644 --- a/lldb/tools/lldb-dap/Handler/DataBreakpointInfoRequestHandler.cpp +++ b/lldb/tools/lldb-dap/Handler/DataBreakpointInfoRequestHandler.cpp @@ -7,15 +7,33 @@ //===----------------------------------------------------------------------===// #include "DAP.h" +#include "DAPError.h" #include "EventHelper.h" #include "Protocol/ProtocolTypes.h" #include "RequestHandler.h" +#include "lldb/API/SBAddress.h" #include "lldb/API/SBMemoryRegionInfo.h" #include "llvm/ADT/StringExtras.h" #include namespace lldb_dap { +static bool IsRW(DAP &dap, lldb::addr_t load_addr) { + if (!lldb::SBAddress(load_addr, dap.target).IsValid()) + return false; + lldb::SBMemoryRegionInfo region; + lldb::SBError err = + dap.target.GetProcess().GetMemoryRegionInfo(load_addr, region); + // Only lldb-server supports "qMemoryRegionInfo". So, don't fail this + // request if SBProcess::GetMemoryRegionInfo returns error. + if (err.Success()) { + if (!(region.IsReadable() || region.IsWritable())) { + return false; + } + } + return true; +} + /// Obtains information on a possible data breakpoint that could be set on an /// expression or variable. Clients should only call this request if the /// corresponding capability supportsDataBreakpoints is true. @@ -23,7 +41,6 @@ llvm::Expected DataBreakpointInfoRequestHandler::Run( const protocol::DataBreakpointInfoArguments &args) const { protocol::DataBreakpointInfoResponseBody response; - lldb::SBFrame frame = dap.GetLLDBFrame(args.frameId); lldb::SBValue variable = dap.variables.FindVariable( args.variablesReference.value_or(0), args.name); std::string addr, size; @@ -43,7 +60,8 @@ DataBreakpointInfoRequestHandler::Run( addr = llvm::utohexstr(load_addr); size = llvm::utostr(byte_size); } - } else if (args.variablesReference.value_or(0) == 0 && frame.IsValid()) { + } else if (lldb::SBFrame frame = dap.GetLLDBFrame(args.frameId); + args.variablesReference.value_or(0) == 0 && frame.IsValid()) { lldb::SBValue value = frame.EvaluateExpression(args.name.c_str()); if (value.GetError().Fail()) { lldb::SBError error = value.GetError(); @@ -58,17 +76,10 @@ DataBreakpointInfoRequestHandler::Run( if (data.IsValid()) { size = llvm::utostr(data.GetByteSize()); addr = llvm::utohexstr(load_addr); - lldb::SBMemoryRegionInfo region; - lldb::SBError err = - dap.target.GetProcess().GetMemoryRegionInfo(load_addr, region); - // Only lldb-server supports "qMemoryRegionInfo". So, don't fail this - // request if SBProcess::GetMemoryRegionInfo returns error. - if (err.Success()) { - if (!(region.IsReadable() || region.IsWritable())) { - is_data_ok = false; - response.description = "memory region for address " + addr + - " has no read or write permissions"; - } + if (!IsRW(dap, load_addr)) { + is_data_ok = false; + response.description = "memory region for address " + addr + + " has no read or write permissions"; } } else { is_data_ok = false; @@ -76,6 +87,17 @@ DataBreakpointInfoRequestHandler::Run( "unable to get byte size for expression: " + args.name; } } + } else if (args.asAddress) { + size = llvm::utostr(args.bytes.value_or(dap.target.GetAddressByteSize())); + lldb::addr_t load_addr = LLDB_INVALID_ADDRESS; + if (llvm::StringRef(args.name).getAsInteger(0, load_addr)) + return llvm::make_error(args.name + " is not a valid address", + llvm::inconvertibleErrorCode(), false); + addr = llvm::utohexstr(load_addr); + if (!IsRW(dap, load_addr)) + return llvm::make_error("memory region for address " + addr + + " has no read or write permissions", + llvm::inconvertibleErrorCode(), false); } else { is_data_ok = false; response.description = "variable not found: " + args.name; @@ -86,7 +108,10 @@ DataBreakpointInfoRequestHandler::Run( response.accessTypes = {protocol::eDataBreakpointAccessTypeRead, protocol::eDataBreakpointAccessTypeWrite, protocol::eDataBreakpointAccessTypeReadWrite}; - response.description = size + " bytes at " + addr + " " + args.name; + if (args.asAddress) + response.description = size + " bytes at " + addr; + else + response.description = size + " bytes at " + addr + " " + args.name; } return response; diff --git a/lldb/tools/lldb-dap/Handler/InitializeRequestHandler.cpp b/lldb/tools/lldb-dap/Handler/InitializeRequestHandler.cpp index 9069de4a3a690..53e1810a5b0e0 100644 --- a/lldb/tools/lldb-dap/Handler/InitializeRequestHandler.cpp +++ b/lldb/tools/lldb-dap/Handler/InitializeRequestHandler.cpp @@ -21,63 +21,9 @@ using namespace lldb_dap::protocol; /// Initialize request; value of command field is 'initialize'. llvm::Expected InitializeRequestHandler::Run( const InitializeRequestArguments &arguments) const { + // Store initialization arguments for later use in Launch/Attach. dap.clientFeatures = arguments.supportedFeatures; - - // Do not source init files until in/out/err are configured. - dap.debugger = lldb::SBDebugger::Create(false); - dap.debugger.SetInputFile(dap.in); - dap.target = dap.debugger.GetDummyTarget(); - - llvm::Expected out_fd = dap.out.GetWriteFileDescriptor(); - if (!out_fd) - return out_fd.takeError(); - dap.debugger.SetOutputFile(lldb::SBFile(*out_fd, "w", false)); - - llvm::Expected err_fd = dap.err.GetWriteFileDescriptor(); - if (!err_fd) - return err_fd.takeError(); - dap.debugger.SetErrorFile(lldb::SBFile(*err_fd, "w", false)); - - auto interp = dap.debugger.GetCommandInterpreter(); - - // The sourceInitFile option is not part of the DAP specification. It is an - // extension used by the test suite to prevent sourcing `.lldbinit` and - // changing its behavior. The CLI flag --no-lldbinit takes precedence over - // the DAP parameter. - bool should_source_init_files = - !dap.no_lldbinit && arguments.lldbExtSourceInitFile.value_or(true); - if (should_source_init_files) { - dap.debugger.SkipLLDBInitFiles(false); - dap.debugger.SkipAppInitFiles(false); - lldb::SBCommandReturnObject init; - interp.SourceInitFileInGlobalDirectory(init); - interp.SourceInitFileInHomeDirectory(init); - } - - if (llvm::Error err = dap.RunPreInitCommands()) - return err; - - auto cmd = dap.debugger.GetCommandInterpreter().AddMultiwordCommand( - "lldb-dap", "Commands for managing lldb-dap."); - if (arguments.supportedFeatures.contains( - eClientFeatureStartDebuggingRequest)) { - cmd.AddCommand( - "start-debugging", new StartDebuggingCommand(dap), - "Sends a startDebugging request from the debug adapter to the client " - "to start a child debug session of the same type as the caller."); - } - cmd.AddCommand( - "repl-mode", new ReplModeCommand(dap), - "Get or set the repl behavior of lldb-dap evaluation requests."); - cmd.AddCommand("send-event", new SendEventCommand(dap), - "Sends an DAP event to the client."); - - if (arguments.supportedFeatures.contains(eClientFeatureProgressReporting)) - dap.StartProgressEventThread(); - - // Start our event thread so we can receive events from the debugger, target, - // process and more. - dap.StartEventThread(); + dap.sourceInitFile = arguments.lldbExtSourceInitFile.value_or(true); return dap.GetCapabilities(); } diff --git a/lldb/tools/lldb-dap/Handler/LaunchRequestHandler.cpp b/lldb/tools/lldb-dap/Handler/LaunchRequestHandler.cpp index 553cbeaf849e2..329f0a7bf6453 100644 --- a/lldb/tools/lldb-dap/Handler/LaunchRequestHandler.cpp +++ b/lldb/tools/lldb-dap/Handler/LaunchRequestHandler.cpp @@ -22,6 +22,10 @@ namespace lldb_dap { /// Launch request; value of command field is 'launch'. Error LaunchRequestHandler::Run(const LaunchRequestArguments &arguments) const { + // Initialize DAP debugger. + if (Error err = dap.InitializeDebugger()) + return err; + // Validate that we have a well formed launch request. if (!arguments.launchCommands.empty() && arguments.console != protocol::eConsoleInternal) diff --git a/lldb/tools/lldb-dap/Handler/RequestHandler.h b/lldb/tools/lldb-dap/Handler/RequestHandler.h index 65a52075ebd79..5d235352b7738 100644 --- a/lldb/tools/lldb-dap/Handler/RequestHandler.h +++ b/lldb/tools/lldb-dap/Handler/RequestHandler.h @@ -435,6 +435,9 @@ class DataBreakpointInfoRequestHandler public: using RequestHandler::RequestHandler; static llvm::StringLiteral GetCommand() { return "dataBreakpointInfo"; } + FeatureSet GetSupportedFeatures() const override { + return {protocol::eAdapterFeatureDataBreakpointBytes}; + } llvm::Expected Run(const protocol::DataBreakpointInfoArguments &args) const override; }; diff --git a/lldb/tools/lldb-dap/JSONUtils.cpp b/lldb/tools/lldb-dap/JSONUtils.cpp index 81eadae03bb48..5c4afa3fd2f62 100644 --- a/lldb/tools/lldb-dap/JSONUtils.cpp +++ b/lldb/tools/lldb-dap/JSONUtils.cpp @@ -677,7 +677,14 @@ llvm::json::Value CreateThreadStopped(DAP &dap, lldb::SBThread &thread, EmplaceSafeString(body, "description", desc_str); } } break; - case lldb::eStopReasonWatchpoint: + case lldb::eStopReasonWatchpoint: { + body.try_emplace("reason", "data breakpoint"); + lldb::break_id_t bp_id = thread.GetStopReasonDataAtIndex(0); + body.try_emplace("hitBreakpointIds", + llvm::json::Array{llvm::json::Value(bp_id)}); + EmplaceSafeString(body, "description", + llvm::formatv("data breakpoint {0}", bp_id).str()); + } break; case lldb::eStopReasonInstrumentation: body.try_emplace("reason", "breakpoint"); break; diff --git a/lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp b/lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp index ac01cfb95dd41..d53a520ade39b 100644 --- a/lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp +++ b/lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp @@ -317,7 +317,9 @@ bool fromJSON(const json::Value &Params, AttachRequestArguments &ARA, O.mapOptional("waitFor", ARA.waitFor) && O.mapOptional("gdb-remote-port", ARA.gdbRemotePort) && O.mapOptional("gdb-remote-hostname", ARA.gdbRemoteHostname) && - O.mapOptional("coreFile", ARA.coreFile); + O.mapOptional("coreFile", ARA.coreFile) && + O.mapOptional("targetId", ARA.targetId) && + O.mapOptional("debuggerId", ARA.debuggerId); } bool fromJSON(const json::Value &Params, ContinueArguments &CA, json::Path P) { diff --git a/lldb/tools/lldb-dap/Protocol/ProtocolRequests.h b/lldb/tools/lldb-dap/Protocol/ProtocolRequests.h index c1e1e93f1e44a..37fc2465f6a05 100644 --- a/lldb/tools/lldb-dap/Protocol/ProtocolRequests.h +++ b/lldb/tools/lldb-dap/Protocol/ProtocolRequests.h @@ -350,6 +350,12 @@ struct AttachRequestArguments { /// Path to the core file to debug. std::string coreFile; + /// Unique ID of an existing target to attach to. + std::optional targetId; + + /// ID of an existing debugger instance to use. + std::optional debuggerId; + /// @} }; bool fromJSON(const llvm::json::Value &, AttachRequestArguments &, diff --git a/lldb/tools/lldb-dap/Watchpoint.cpp b/lldb/tools/lldb-dap/Watchpoint.cpp index 0acc980890be8..e730e71c0dc31 100644 --- a/lldb/tools/lldb-dap/Watchpoint.cpp +++ b/lldb/tools/lldb-dap/Watchpoint.cpp @@ -45,6 +45,7 @@ protocol::Breakpoint Watchpoint::ToProtocolBreakpoint() { breakpoint.message = m_error.GetCString(); } else { breakpoint.verified = true; + breakpoint.id = m_wp.GetID(); } return breakpoint; diff --git a/lldb/tools/lldb-dap/package.json b/lldb/tools/lldb-dap/package.json index 05dce285dd592..8e07c550b88c3 100644 --- a/lldb/tools/lldb-dap/package.json +++ b/lldb/tools/lldb-dap/package.json @@ -778,6 +778,10 @@ "description": "Custom commands that are executed instead of attaching to a process ID or to a process by name. These commands may optionally create a new target and must perform an attach. A valid process must exist after these commands complete or the \"attach\" will fail.", "default": [] }, + "targetId": { + "type": "number", + "description": "The globally unique target id to attach to. Used when a target is dynamically created." + }, "initCommands": { "type": "array", "items": { diff --git a/lldb/tools/lldb-dap/tool/lldb-dap.cpp b/lldb/tools/lldb-dap/tool/lldb-dap.cpp index f10ed12344cbd..27516b2a25678 100644 --- a/lldb/tools/lldb-dap/tool/lldb-dap.cpp +++ b/lldb/tools/lldb-dap/tool/lldb-dap.cpp @@ -445,12 +445,8 @@ static llvm::Error serveConnection( g_connection_timeout_time_point, connection_timeout_seconds.value()); std::condition_variable dap_sessions_condition; - std::mutex dap_sessions_mutex; - std::map dap_sessions; unsigned int clientCount = 0; - auto handle = listener->Accept(g_loop, [=, &dap_sessions_condition, - &dap_sessions_mutex, &dap_sessions, - &clientCount]( + auto handle = listener->Accept(g_loop, [=, &clientCount]( std::unique_ptr sock) { // Reset the keep alive timer, because we won't be killing the server // while this connection is being served. @@ -464,8 +460,7 @@ static llvm::Error serveConnection( // Move the client into a background thread to unblock accepting the next // client. - std::thread client([=, &dap_sessions_condition, &dap_sessions_mutex, - &dap_sessions]() { + std::thread client([=]() { llvm::set_thread_name(client_name + ".runloop"); MainLoop loop; Transport transport(client_name, log, io, io); @@ -478,10 +473,8 @@ static llvm::Error serveConnection( return; } - { - std::scoped_lock lock(dap_sessions_mutex); - dap_sessions[&loop] = &dap; - } + // Register the DAP session with the global manager. + DAPSessionManager::GetInstance().RegisterSession(&loop, &dap); if (auto Err = dap.Loop()) { llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), @@ -490,10 +483,8 @@ static llvm::Error serveConnection( } DAP_LOG(log, "({0}) client disconnected", client_name); - std::unique_lock lock(dap_sessions_mutex); - dap_sessions.erase(&loop); - std::notify_all_at_thread_exit(dap_sessions_condition, std::move(lock)); - + // Unregister the DAP session from the global manager. + DAPSessionManager::GetInstance().UnregisterSession(&loop); // Start the countdown to kill the server at the end of each connection. if (connection_timeout_seconds) TrackConnectionTimeout(g_loop, g_connection_timeout_mutex, @@ -516,29 +507,11 @@ static llvm::Error serveConnection( log, "lldb-dap server shutdown requested, disconnecting remaining clients..."); - bool client_failed = false; - { - std::scoped_lock lock(dap_sessions_mutex); - for (auto [loop, dap] : dap_sessions) { - if (llvm::Error error = dap->Disconnect()) { - client_failed = true; - llvm::WithColor::error() << "DAP client disconnected failed: " - << llvm::toString(std::move(error)) << "\n"; - } - loop->AddPendingCallback( - [](MainLoopBase &loop) { loop.RequestTermination(); }); - } - } - - // Wait for all clients to finish disconnecting. - std::unique_lock lock(dap_sessions_mutex); - dap_sessions_condition.wait(lock, [&] { return dap_sessions.empty(); }); - - if (client_failed) - return llvm::make_error( - "disconnecting all clients failed", llvm::inconvertibleErrorCode()); + // Disconnect all active sessions using the global manager. + DAPSessionManager::GetInstance().DisconnectAllSessions(); - return llvm::Error::success(); + // Wait for all clients to finish disconnecting and return any errors. + return DAPSessionManager::GetInstance().WaitForAllSessionsToDisconnect(); } int main(int argc, char *argv[]) { @@ -775,6 +748,10 @@ int main(int argc, char *argv[]) { return EXIT_FAILURE; } + // Register the DAP session with the global manager for stdio mode. + // This is needed for the event handling to find the correct DAP instance. + DAPSessionManager::GetInstance().RegisterSession(&loop, &dap); + // used only by TestVSCode_redirection_to_console.py if (getenv("LLDB_DAP_TEST_STDOUT_STDERR_REDIRECTION") != nullptr) redirection_test(); @@ -784,7 +761,9 @@ int main(int argc, char *argv[]) { llvm::toStringWithoutConsuming(Err)); llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), "DAP session error: "); + DAPSessionManager::GetInstance().UnregisterSession(&loop); return EXIT_FAILURE; } + DAPSessionManager::GetInstance().UnregisterSession(&loop); return EXIT_SUCCESS; } diff --git a/lldb/unittests/DAP/CMakeLists.txt b/lldb/unittests/DAP/CMakeLists.txt index a478cf07eedb2..0f8e9db2fab31 100644 --- a/lldb/unittests/DAP/CMakeLists.txt +++ b/lldb/unittests/DAP/CMakeLists.txt @@ -1,6 +1,7 @@ add_lldb_unittest(DAPTests ClientLauncherTest.cpp DAPErrorTest.cpp + DAPSessionManagerTest.cpp DAPTest.cpp DAPTypesTest.cpp FifoFilesTest.cpp diff --git a/lldb/unittests/DAP/DAPSessionManagerTest.cpp b/lldb/unittests/DAP/DAPSessionManagerTest.cpp new file mode 100644 index 0000000000000..b840d31ef116d --- /dev/null +++ b/lldb/unittests/DAP/DAPSessionManagerTest.cpp @@ -0,0 +1,103 @@ +//===-- DAPSessionManagerTest.cpp ----------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "DAPSessionManager.h" +#include "TestBase.h" +#include "lldb/API/SBDebugger.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using namespace lldb_dap; +using namespace lldb; +using namespace lldb_dap_tests; + +class DAPSessionManagerTest : public DAPTestBase {}; + +TEST_F(DAPSessionManagerTest, GetInstanceReturnsSameSingleton) { + DAPSessionManager &instance1 = DAPSessionManager::GetInstance(); + DAPSessionManager &instance2 = DAPSessionManager::GetInstance(); + + EXPECT_EQ(&instance1, &instance2); +} + +// UnregisterSession uses std::notify_all_at_thread_exit, so it must be called +// from a separate thread to properly release the mutex on thread exit. +TEST_F(DAPSessionManagerTest, RegisterAndUnregisterSession) { + DAPSessionManager &manager = DAPSessionManager::GetInstance(); + + // Initially not registered. + std::vector sessions_before = manager.GetActiveSessions(); + EXPECT_EQ( + std::count(sessions_before.begin(), sessions_before.end(), dap.get()), 0); + + manager.RegisterSession(&loop, dap.get()); + + // Should be in active sessions after registration. + std::vector sessions_after = manager.GetActiveSessions(); + EXPECT_EQ(std::count(sessions_after.begin(), sessions_after.end(), dap.get()), + 1); + + // Unregister. + std::thread unregister_thread([&]() { manager.UnregisterSession(&loop); }); + + unregister_thread.join(); + + // There should no longer be active sessions. + std::vector sessions_final = manager.GetActiveSessions(); + EXPECT_EQ(std::count(sessions_final.begin(), sessions_final.end(), dap.get()), + 0); +} + +TEST_F(DAPSessionManagerTest, DisconnectAllSessions) { + DAPSessionManager &manager = DAPSessionManager::GetInstance(); + + manager.RegisterSession(&loop, dap.get()); + + std::vector sessions = manager.GetActiveSessions(); + EXPECT_EQ(std::count(sessions.begin(), sessions.end(), dap.get()), 1); + + manager.DisconnectAllSessions(); + + // DisconnectAllSessions shutdown but doesn't wait for + // sessions to complete or remove them from the active sessions map. + sessions = manager.GetActiveSessions(); + EXPECT_EQ(std::count(sessions.begin(), sessions.end(), dap.get()), 1); + + std::thread unregister_thread([&]() { manager.UnregisterSession(&loop); }); + unregister_thread.join(); +} + +TEST_F(DAPSessionManagerTest, WaitForAllSessionsToDisconnect) { + DAPSessionManager &manager = DAPSessionManager::GetInstance(); + + manager.RegisterSession(&loop, dap.get()); + + std::vector sessions = manager.GetActiveSessions(); + EXPECT_EQ(std::count(sessions.begin(), sessions.end(), dap.get()), 1); + + // Unregister after a delay to test blocking behavior. + std::thread unregister_thread([&]() { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + manager.UnregisterSession(&loop); + }); + + // WaitForAllSessionsToDisconnect should block until unregistered. + auto start = std::chrono::steady_clock::now(); + llvm::Error err = manager.WaitForAllSessionsToDisconnect(); + EXPECT_FALSE(err); + auto duration = std::chrono::steady_clock::now() - start; + + // Verify it waited at least 100ms. + EXPECT_GE(duration, std::chrono::milliseconds(100)); + + // Session should be unregistered now. + sessions = manager.GetActiveSessions(); + EXPECT_EQ(std::count(sessions.begin(), sessions.end(), dap.get()), 0); + + unregister_thread.join(); +} diff --git a/lldb/unittests/Expression/ClangParserTest.cpp b/lldb/unittests/Expression/ClangParserTest.cpp index fab4487c73719..c949026e87cd8 100644 --- a/lldb/unittests/Expression/ClangParserTest.cpp +++ b/lldb/unittests/Expression/ClangParserTest.cpp @@ -8,7 +8,7 @@ #include "clang/Basic/Version.h" #include "clang/Config/config.h" -#include "clang/Driver/Driver.h" +#include "clang/Options/OptionUtils.h" #include "Plugins/ExpressionParser/Clang/ClangHost.h" #include "TestingSupport/SubsystemRAII.h" @@ -43,7 +43,7 @@ TEST_F(ClangHostTest, ComputeClangResourceDirectory) { std::string path_to_liblldb = "C:\\foo\\bar\\lib\\"; #endif std::string path_to_clang_dir = - clang::driver::Driver::GetResourcesPath(path_to_liblldb + "liblldb"); + clang::GetResourcesPath(path_to_liblldb + "liblldb"); llvm::SmallString<256> path_to_clang_lib_dir_real; llvm::sys::fs::real_path(path_to_clang_dir, path_to_clang_lib_dir_real); diff --git a/lldb/unittests/Language/CPlusPlus/CPlusPlusLanguageTest.cpp b/lldb/unittests/Language/CPlusPlus/CPlusPlusLanguageTest.cpp index 23f2f4218601a..c05418168e62e 100644 --- a/lldb/unittests/Language/CPlusPlus/CPlusPlusLanguageTest.cpp +++ b/lldb/unittests/Language/CPlusPlus/CPlusPlusLanguageTest.cpp @@ -30,6 +30,10 @@ TEST(CPlusPlusLanguage, MethodNameParsing) { {"foo::~bar(baz)", "", "foo", "~bar", "(baz)", "", "foo::~bar"}, {"a::b::c::d(e,f)", "", "a::b::c", "d", "(e,f)", "", "a::b::c::d"}, {"void f(int)", "void", "", "f", "(int)", "", "f"}, + {"std::vectorfoo::bar()", "std::vector", "foo", "bar", "()", "", + "foo::bar"}, + {"int foo::bar::func01(int a, double b)", "int", "foo::bar", "func01", + "(int a, double b)", "", "foo::bar::func01"}, // Operators {"std::basic_ostream >& " @@ -101,6 +105,8 @@ TEST(CPlusPlusLanguage, MethodNameParsing) { "std::forward"}, // Templates + {"vector foo::bar::func(int)", "vector", "foo::bar", "func", + "(int)", "", "foo::bar::func"}, {"void llvm::PM>::" "addPass(llvm::VP)", "void", "llvm::PM>", diff --git a/lldb/unittests/Process/gdb-remote/GDBRemoteCommunicationClientTest.cpp b/lldb/unittests/Process/gdb-remote/GDBRemoteCommunicationClientTest.cpp index 012eae02d5857..966b37e09ee55 100644 --- a/lldb/unittests/Process/gdb-remote/GDBRemoteCommunicationClientTest.cpp +++ b/lldb/unittests/Process/gdb-remote/GDBRemoteCommunicationClientTest.cpp @@ -326,7 +326,7 @@ TEST_F(GDBRemoteCommunicationClientTest, SendSignalsToIgnore) { TEST_F(GDBRemoteCommunicationClientTest, GetMemoryRegionInfo) { const lldb::addr_t addr = 0xa000; - MemoryRegionInfo region_info; + lldb_private::MemoryRegionInfo region_info; std::future result = std::async(std::launch::async, [&] { return client.GetMemoryRegionInfo(addr, region_info); }); @@ -343,13 +343,16 @@ TEST_F(GDBRemoteCommunicationClientTest, GetMemoryRegionInfo) { EXPECT_TRUE(result.get().Success()); EXPECT_EQ(addr, region_info.GetRange().GetRangeBase()); EXPECT_EQ(0x2000u, region_info.GetRange().GetByteSize()); - EXPECT_EQ(MemoryRegionInfo::eYes, region_info.GetReadable()); - EXPECT_EQ(MemoryRegionInfo::eNo, region_info.GetWritable()); - EXPECT_EQ(MemoryRegionInfo::eYes, region_info.GetExecutable()); + EXPECT_EQ(lldb_private::MemoryRegionInfo::eYes, region_info.GetReadable()); + EXPECT_EQ(lldb_private::MemoryRegionInfo::eNo, region_info.GetWritable()); + EXPECT_EQ(lldb_private::MemoryRegionInfo::eYes, region_info.GetExecutable()); EXPECT_EQ("/foo/bar.so", region_info.GetName().GetStringRef()); - EXPECT_EQ(MemoryRegionInfo::eDontKnow, region_info.GetMemoryTagged()); - EXPECT_EQ(MemoryRegionInfo::eDontKnow, region_info.IsStackMemory()); - EXPECT_EQ(MemoryRegionInfo::eDontKnow, region_info.IsShadowStack()); + EXPECT_EQ(lldb_private::MemoryRegionInfo::eDontKnow, + region_info.GetMemoryTagged()); + EXPECT_EQ(lldb_private::MemoryRegionInfo::eDontKnow, + region_info.IsStackMemory()); + EXPECT_EQ(lldb_private::MemoryRegionInfo::eDontKnow, + region_info.IsShadowStack()); result = std::async(std::launch::async, [&] { return client.GetMemoryRegionInfo(addr, region_info); @@ -358,9 +361,9 @@ TEST_F(GDBRemoteCommunicationClientTest, GetMemoryRegionInfo) { HandlePacket(server, "qMemoryRegionInfo:a000", "start:a000;size:2000;flags:;type:stack;"); EXPECT_TRUE(result.get().Success()); - EXPECT_EQ(MemoryRegionInfo::eNo, region_info.GetMemoryTagged()); - EXPECT_EQ(MemoryRegionInfo::eYes, region_info.IsStackMemory()); - EXPECT_EQ(MemoryRegionInfo::eNo, region_info.IsShadowStack()); + EXPECT_EQ(lldb_private::MemoryRegionInfo::eNo, region_info.GetMemoryTagged()); + EXPECT_EQ(lldb_private::MemoryRegionInfo::eYes, region_info.IsStackMemory()); + EXPECT_EQ(lldb_private::MemoryRegionInfo::eNo, region_info.IsShadowStack()); result = std::async(std::launch::async, [&] { return client.GetMemoryRegionInfo(addr, region_info); @@ -369,9 +372,10 @@ TEST_F(GDBRemoteCommunicationClientTest, GetMemoryRegionInfo) { HandlePacket(server, "qMemoryRegionInfo:a000", "start:a000;size:2000;flags: mt zz mt ss ;type:ha,ha,stack;"); EXPECT_TRUE(result.get().Success()); - EXPECT_EQ(MemoryRegionInfo::eYes, region_info.GetMemoryTagged()); - EXPECT_EQ(MemoryRegionInfo::eYes, region_info.IsStackMemory()); - EXPECT_EQ(MemoryRegionInfo::eYes, region_info.IsShadowStack()); + EXPECT_EQ(lldb_private::MemoryRegionInfo::eYes, + region_info.GetMemoryTagged()); + EXPECT_EQ(lldb_private::MemoryRegionInfo::eYes, region_info.IsStackMemory()); + EXPECT_EQ(lldb_private::MemoryRegionInfo::eYes, region_info.IsShadowStack()); result = std::async(std::launch::async, [&] { return client.GetMemoryRegionInfo(addr, region_info); @@ -380,12 +384,12 @@ TEST_F(GDBRemoteCommunicationClientTest, GetMemoryRegionInfo) { HandlePacket(server, "qMemoryRegionInfo:a000", "start:a000;size:2000;type:heap;"); EXPECT_TRUE(result.get().Success()); - EXPECT_EQ(MemoryRegionInfo::eNo, region_info.IsStackMemory()); + EXPECT_EQ(lldb_private::MemoryRegionInfo::eNo, region_info.IsStackMemory()); } TEST_F(GDBRemoteCommunicationClientTest, GetMemoryRegionInfoInvalidResponse) { const lldb::addr_t addr = 0x4000; - MemoryRegionInfo region_info; + lldb_private::MemoryRegionInfo region_info; std::future result = std::async(std::launch::async, [&] { return client.GetMemoryRegionInfo(addr, region_info); }); diff --git a/lldb/unittests/Target/MemoryTest.cpp b/lldb/unittests/Target/MemoryTest.cpp index e444f68dc4871..131a3cabdd896 100644 --- a/lldb/unittests/Target/MemoryTest.cpp +++ b/lldb/unittests/Target/MemoryTest.cpp @@ -48,6 +48,8 @@ class DummyProcess : public Process { } Status DoDestroy() override { return {}; } void RefreshStateAfterStop() override {} + // Required by Target::ReadMemory() to call Process::ReadMemory() + bool IsAlive() override { return true; } size_t DoReadMemory(lldb::addr_t vm_addr, void *buf, size_t size, Status &error) override { if (m_bytes_left == 0) @@ -61,7 +63,7 @@ class DummyProcess : public Process { m_bytes_left -= size; } - memset(buf, 'B', num_bytes_to_write); + memset(buf, m_filler, num_bytes_to_write); return num_bytes_to_write; } bool DoUpdateThreadList(ThreadList &old_thread_list, @@ -72,8 +74,10 @@ class DummyProcess : public Process { // Test-specific additions size_t m_bytes_left; + int m_filler = 'B'; MemoryCache &GetMemoryCache() { return m_memory_cache; } void SetMaxReadSize(size_t size) { m_bytes_left = size; } + void SetFiller(int filler) { m_filler = filler; } }; } // namespace @@ -85,6 +89,18 @@ TargetSP CreateTarget(DebuggerSP &debugger_sp, ArchSpec &arch) { return target_sp; } +static ProcessSP CreateProcess(lldb::TargetSP target_sp) { + ListenerSP listener_sp(Listener::MakeListener("dummy")); + ProcessSP process_sp = std::make_shared(target_sp, listener_sp); + + struct TargetHack : public Target { + void SetProcess(ProcessSP process) { m_process_sp = process; } + }; + static_cast(target_sp.get())->SetProcess(process_sp); + + return process_sp; +} + TEST_F(MemoryTest, TesetMemoryCacheRead) { ArchSpec arch("x86_64-apple-macosx-"); @@ -96,8 +112,7 @@ TEST_F(MemoryTest, TesetMemoryCacheRead) { TargetSP target_sp = CreateTarget(debugger_sp, arch); ASSERT_TRUE(target_sp); - ListenerSP listener_sp(Listener::MakeListener("dummy")); - ProcessSP process_sp = std::make_shared(target_sp, listener_sp); + ProcessSP process_sp = CreateProcess(target_sp); ASSERT_TRUE(process_sp); DummyProcess *process = static_cast(process_sp.get()); @@ -227,6 +242,58 @@ TEST_F(MemoryTest, TesetMemoryCacheRead) { // old cache } +TEST_F(MemoryTest, TestReadInteger) { + ArchSpec arch("x86_64-apple-macosx-"); + + Platform::SetHostPlatform(PlatformRemoteMacOSX::CreateInstance(true, &arch)); + + DebuggerSP debugger_sp = Debugger::CreateInstance(); + ASSERT_TRUE(debugger_sp); + + TargetSP target_sp = CreateTarget(debugger_sp, arch); + ASSERT_TRUE(target_sp); + + ProcessSP process_sp = CreateProcess(target_sp); + ASSERT_TRUE(process_sp); + + DummyProcess *process = static_cast(process_sp.get()); + Status error; + + process->SetFiller(0xff); + process->SetMaxReadSize(256); + // The ReadSignedIntegerFromMemory() methods return int64_t. Check that they + // extend the sign correctly when reading 32-bit values. + EXPECT_EQ(-1, + target_sp->ReadSignedIntegerFromMemory(Address(0), 4, 0, error)); + EXPECT_EQ(-1, process->ReadSignedIntegerFromMemory(0, 4, 0, error)); + // Check reading 64-bit values as well. + EXPECT_EQ(-1, + target_sp->ReadSignedIntegerFromMemory(Address(0), 8, 0, error)); + EXPECT_EQ(-1, process->ReadSignedIntegerFromMemory(0, 8, 0, error)); + + // ReadUnsignedIntegerFromMemory() should not extend the sign. + EXPECT_EQ(0xffffffffULL, + target_sp->ReadUnsignedIntegerFromMemory(Address(0), 4, 0, error)); + EXPECT_EQ(0xffffffffULL, + process->ReadUnsignedIntegerFromMemory(0, 4, 0, error)); + EXPECT_EQ(0xffffffffffffffffULL, + target_sp->ReadUnsignedIntegerFromMemory(Address(0), 8, 0, error)); + EXPECT_EQ(0xffffffffffffffffULL, + process->ReadUnsignedIntegerFromMemory(0, 8, 0, error)); + + // Check reading positive values. + process->GetMemoryCache().Clear(); + process->SetFiller(0x7f); + process->SetMaxReadSize(256); + EXPECT_EQ(0x7f7f7f7fLL, + target_sp->ReadSignedIntegerFromMemory(Address(0), 4, 0, error)); + EXPECT_EQ(0x7f7f7f7fLL, process->ReadSignedIntegerFromMemory(0, 4, 0, error)); + EXPECT_EQ(0x7f7f7f7f7f7f7f7fLL, + target_sp->ReadSignedIntegerFromMemory(Address(0), 8, 0, error)); + EXPECT_EQ(0x7f7f7f7f7f7f7f7fLL, + process->ReadSignedIntegerFromMemory(0, 8, 0, error)); +} + /// A process class that, when asked to read memory from some address X, returns /// the least significant byte of X. class DummyReaderProcess : public Process { diff --git a/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp b/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp index e28366e9f0432..6c74860971674 100644 --- a/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp +++ b/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp @@ -1008,13 +1008,13 @@ TEST_F(TestArm64InstEmulation, TestMidFunctionEpilogueAndBackwardsJump) { // row[4]: 24: CFA=sp+48 => fp= lr= // // This must come from +56 - // row[5]: 32: CFA=fp+16 => fp=[CFA-16] lr=[CFA-8] x22=[CFA-24], x23=[CFA-32] + // row[5]: 32: CFA=fp+16 => fp=[CFA-16] lr=[CFA-8] x22=[CFA-32], x23=[CFA-24] // row[6]: 40: CFA=fp+16 => fp=[CFA-16] lr=[CFA-8] x22=same, x23 = same // row[6]: 44: CFA=sp+48 => fp=same lr=same x22=same, x23 = same // row[6]: 48: CFA=sp0 => fp=same lr=same x22=same, x23 = same // // row[x]: 52: CFA=fp+16 => fp=[CFA-16] lr=[CFA-8] - // row[x]: 56: CFA=fp+16 => fp=[CFA-16] lr=[CFA-8] x22=[CFA-24], x23=[CFA-32] + // row[x]: 56: CFA=fp+16 => fp=[CFA-16] lr=[CFA-8] x22=[CFA-32], x23=[CFA-24] // clang-format on sample_range = AddressRange(0x1000, sizeof(data)); @@ -1059,7 +1059,7 @@ TEST_F(TestArm64InstEmulation, TestMidFunctionEpilogueAndBackwardsJump) { // <+28>: ret // <+32>: mov x23, #0x1 row = unwind_plan.GetRowForFunctionOffset(32); - // FIXME: EXPECT_NE(32, row->GetOffset()); + // FIXME: EXPECT_NE(28, row->GetOffset()); // Check that the state of this branch // <+16>: b.ne ; <+52> DO_SOMETHING_AND_GOTO_AFTER_EPILOGUE diff --git a/llvm/docs/GettingInvolved.rst b/llvm/docs/GettingInvolved.rst index ad544342de329..b90f313f70cb2 100644 --- a/llvm/docs/GettingInvolved.rst +++ b/llvm/docs/GettingInvolved.rst @@ -209,7 +209,7 @@ what to add to your calendar invite. - `ics `__ - `Meeting details/agenda: `__ * - `LLVM Qualification Working Group `__ - - 1st Tuesday/Wednesday of the month + - 1st Tuesday of the month - `ics `__ `gcal `__ - `Minutes/docs `__ diff --git a/llvm/docs/InstCombineContributorGuide.md b/llvm/docs/InstCombineContributorGuide.md index 12567fc36f1d1..1c432b9b7446c 100644 --- a/llvm/docs/InstCombineContributorGuide.md +++ b/llvm/docs/InstCombineContributorGuide.md @@ -338,7 +338,7 @@ complexity and increasing compile-time overhead. We do not require explicit proof of real-world usefulness for every transform -- in most cases the usefulness is fairly "obvious". However, the question may -come up for complex or unusual folds. Keep this in mind when chosing what you +come up for complex or unusual folds. Keep this in mind when choosing what you work on. In particular, fixes for fuzzer-generated missed optimization reports will diff --git a/llvm/docs/KeyInstructionsDebugInfo.md b/llvm/docs/KeyInstructionsDebugInfo.md index 305740554c0fe..d93151a236680 100644 --- a/llvm/docs/KeyInstructionsDebugInfo.md +++ b/llvm/docs/KeyInstructionsDebugInfo.md @@ -82,7 +82,7 @@ int c = ``` In the current implementation an `is_stmt` won't be generated for the `a + b` instruction, meaning debuggers will likely step over the `add` and stop at the `store` of the result into `c` (which does get `is_stmt`). A user might have wished to edit `a` or `b` on the previous line in order to alter the result stored to `c`, which they now won't have the chance to do (they'd need to edit the variables on a previous line instead). If the expression was all on one line then they would be able to edit the values before the `add`. For these reasons we're choosing to recommend that the feature should not be enabled at O0. -It should be possible to fix this case if we make a few changes: add all the instructions in the statement (i.e., including the loads) to the atom, and tweak the DwarfEmission code to understand this situation (same atom, different line). So there is room to persue this in the future. Though that gets tricky in some cases due to the [other limitation mentioned above](#lack-of-multiple-atom-membership), e.g.: +It should be possible to fix this case if we make a few changes: add all the instructions in the statement (i.e., including the loads) to the atom, and tweak the DwarfEmission code to understand this situation (same atom, different line). So there is room to pursue this in the future. Though that gets tricky in some cases due to the [other limitation mentioned above](#lack-of-multiple-atom-membership), e.g.: ```c int e = // atom 1 (a + b) // atom 1 diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst index 33c23f2949765..02865f8a29c67 100644 --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -3234,6 +3234,24 @@ A "convergencectrl" operand bundle is only valid on a ``convergent`` operation. When present, the operand bundle must contain exactly one value of token type. See the :doc:`ConvergentOperations` document for details. +.. _deactivationsymbol: + +Deactivation Symbol Operand Bundles +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A ``"deactivation-symbol"`` operand bundle is valid on the following +instructions (AArch64 only): + +- Call to a normal function with ``notail`` attribute and a first argument and + return value of type ``ptr``. +- Call to ``llvm.ptrauth.sign`` or ``llvm.ptrauth.auth`` intrinsics. + +This operand bundle specifies that if the deactivation symbol is defined +to a valid value for the target, the marked instruction will return the +value of its first argument instead of calling the specified function +or intrinsic. This is achieved with ``PATCHINST`` relocations on the +target instructions (see the AArch64 psABI for details). + .. _moduleasm: Module-Level Inline Assembly @@ -5284,7 +5302,7 @@ need to refer to the actual function body. Pointer Authentication Constants -------------------------------- -``ptrauth (ptr CST, i32 KEY[, i64 DISC[, ptr ADDRDISC]?]?)`` +``ptrauth (ptr CST, i32 KEY[, i64 DISC[, ptr ADDRDISC[, ptr DS]?]?]?)`` A '``ptrauth``' constant represents a pointer with a cryptographic authentication signature embedded into some bits, as described in the @@ -5313,6 +5331,11 @@ Otherwise, the expression is equivalent to: %tmp2 = call i64 @llvm.ptrauth.sign(i64 ptrtoint (ptr CST to i64), i32 KEY, i64 %tmp1) %val = inttoptr i64 %tmp2 to ptr +If the deactivation symbol operand ``DS`` has a non-null value, +the semantics are as if a :ref:`deactivation-symbol operand bundle +` were added to the ``llvm.ptrauth.sign`` intrinsic +calls above, with ``DS`` as the only operand. + .. _constantexprs: Constant Expressions diff --git a/llvm/docs/QualGroup.rst b/llvm/docs/QualGroup.rst index 01c1f6f9d0032..1c065f69ef613 100644 --- a/llvm/docs/QualGroup.rst +++ b/llvm/docs/QualGroup.rst @@ -241,15 +241,8 @@ Agendas, meeting notes, and presentation slides for the sync-ups are shared to e Upcoming and past meeting agendas, and meeting minutes are published in a dedicated thread on the LLVM Discourse forum: `Meeting Agendas and Minutes `_ -Slides used to support discussions during sync-up meetings are stored in LLVM's GitHub repository. - -Available slides: - -* (add future entries here) -* `October 2025 `_ -* `September 2025 `_ -* `August 2025 `_ -* `July 2025 `_ +Slides used to support discussions during sync-up meetings are stored in a dedicated Google Drive folder: `Link `_. +Note that the naming convention for these slides is *YYYYMM*\_llvm_qual_wg. AI Transcription Policy ======================= diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md index 6203c01453d55..c6c527d1ae964 100644 --- a/llvm/docs/ReleaseNotes.md +++ b/llvm/docs/ReleaseNotes.md @@ -222,6 +222,7 @@ Changes to BOLT Changes to Sanitizers --------------------- +* Support running TypeSanitizer with UndefinedBehaviourSanitizer. * TypeSanitizer no longer inlines all instrumentation by default. Added the `-f[no-]sanitize-type-outline-instrumentation` flags to give users control over this behaviour. diff --git a/llvm/docs/Telemetry.rst b/llvm/docs/Telemetry.rst index 4f30ae82b5628..c36105c99709f 100644 --- a/llvm/docs/Telemetry.rst +++ b/llvm/docs/Telemetry.rst @@ -32,7 +32,7 @@ Important notes * There is no concrete implementation of a Telemetry library in upstream LLVM. We only provide the abstract API here. Any tool that wants telemetry will implement one. - + The rationale for this is that all the tools in LLVM are very different in what they care about (what/where/when to instrument data). Hence, it might not be practical to have a single implementation. @@ -41,16 +41,16 @@ Important notes * No implementation of Telemetry in upstream LLVM shall store any of the collected data due to privacy and security reasons: - + * Different organizations have different privacy models: - + * Which data is sensitive, which is not? * Whether it is acceptable for instrumented data to be stored anywhere? (to a local file, what not?) - + * Data ownership and data collection consents are hard to accommodate from LLVM developers' point of view: - + * E.g., data collected by Telemetry is not necessarily owned by the user of an LLVM tool with Telemetry enabled, hence the user's consent to data collection is not meaningful. On the other hand, LLVM developers have no @@ -75,7 +75,7 @@ The framework consists of four important classes: It is up to the vendor to decide which pieces of data to forward and where to forward them to for their final storage. * ``llvm::telemetry::Config``: Configurations for the ``Manager``. - + .. image:: llvm_telemetry_design.png How to implement and interact with the API @@ -123,7 +123,7 @@ To use Telemetry in your tool, you need to provide a concrete implementation of void write(StringRef KeyName, unsigned long Value) override { writeHelper(KeyName, Value); } - + void write(StringRef KeyName, unsigned long long Value) override { writeHelper(KeyName, Value); } @@ -131,12 +131,12 @@ To use Telemetry in your tool, you need to provide a concrete implementation of void write(StringRef KeyName, StringRef Value) override { writeHelper(KeyName, Value); } - + void beginObject(StringRef KeyName) override { Children.push_back(json::Object()); ChildrenNames.push_back(KeyName.str()); } - + void endObject() override { assert(!Children.empty() && !ChildrenNames.empty()); json::Value Val = json::Value(std::move(Children.back())); @@ -146,7 +146,7 @@ To use Telemetry in your tool, you need to provide a concrete implementation of ChildrenNames.pop_back(); writeHelper(Name, std::move(Val)); } - + Error finalize() override { if (!Started) return createStringError("Serializer not currently in use"); @@ -167,10 +167,10 @@ To use Telemetry in your tool, you need to provide a concrete implementation of std::vector Children; std::vector ChildrenNames; }; - - class MyManager : public telemery::Manager { + + class MyManager : public telemetry::Manager { public: - static std::unique_ptr createInstatnce(telemetry::Config *Config) { + static std::unique_ptr createInstance(telemetry::Config *Config) { // If Telemetry is not enabled, then just return null; if (!Config->EnableTelemetry) return nullptr; @@ -182,19 +182,19 @@ To use Telemetry in your tool, you need to provide a concrete implementation of Entry->SessionId = SessionId; return Error::success(); } - + // You can also define additional instrumentation points. void logStartup(TelemetryInfo *Entry) { // Add some additional data to entry. Entry->Msg = "Some message"; dispatch(Entry); } - + void logAdditionalPoint(TelemetryInfo *Entry) { // .... code here } - - private: + + private: const std::string SessionId; }; @@ -203,11 +203,11 @@ To use Telemetry in your tool, you need to provide a concrete implementation of Error receiveEntry(const TelemetryInfo *Entry) override { if (Error Err = Serializer.init()) return Err; - + Entry->serialize(Serializer); if (Error Err = Serializer.finalize()) return Err; - + json::Object Copied = *Serializer.getOutputObject(); // Send the `Copied` object to wherever. return Error::success(); @@ -220,16 +220,16 @@ To use Telemetry in your tool, you need to provide a concrete implementation of // This defines a custom TelemetryInfo that has an additional Msg field. struct MyTelemetryInfo : public telemetry::TelemetryInfo { std::string Msg; - + Error serialize(Serializer &Serializer) const override { TelemetryInfo::serialize(serializer); Serializer.writeString("MyMsg", Msg); } - + // Note: implement getKind() and classof() to support dyn_cast operations. }; - + 2) Use the library in your tool. Logging the tool init-process: @@ -241,10 +241,10 @@ Logging the tool init-process: telemetry::Config MyConfig = makeConfig(); // Build up the appropriate Config struct here. auto Manager = MyManager::createInstance(&MyConfig); - + // Any other tool's init code can go here. // ... - + // Finally, take a snapshot of the time now so we know how long it took the // init process to finish. auto EndTime = std::chrono::time_point::now(); diff --git a/llvm/include/llvm/Analysis/Delinearization.h b/llvm/include/llvm/Analysis/Delinearization.h index 434cfb61699d6..b9fc0bcf47430 100644 --- a/llvm/include/llvm/Analysis/Delinearization.h +++ b/llvm/include/llvm/Analysis/Delinearization.h @@ -17,6 +17,7 @@ #define LLVM_ANALYSIS_DELINEARIZATION_H #include "llvm/IR/PassManager.h" +#include "llvm/IR/Value.h" namespace llvm { class raw_ostream; @@ -133,14 +134,22 @@ bool findFixedSizeArrayDimensions(ScalarEvolution &SE, const SCEV *Expr, /// terms exist in the \p Expr. In other words, it assumes that the all step /// values are constant. /// -/// This function is intended to replace getIndexExpressionsFromGEP and -/// tryDelinearizeFixedSizeImpl. They rely on the GEP source element type so -/// that they will be removed in the future. +/// This function is intended to replace getIndexExpressionsFromGEP. They rely +/// on the GEP source element type so that will be removed in the future. bool delinearizeFixedSizeArray(ScalarEvolution &SE, const SCEV *Expr, SmallVectorImpl &Subscripts, SmallVectorImpl &Sizes, const SCEV *ElementSize); +/// Check that each subscript in \p Subscripts is within the corresponding size +/// in \p Sizes. For the outermost dimension, the subscript being negative is +/// allowed. If \p Ptr is not nullptr, it may be used to get information from +/// the IR pointer value, which may help in the validation. +bool validateDelinearizationResult(ScalarEvolution &SE, + ArrayRef Sizes, + ArrayRef Subscripts, + const Value *Ptr = nullptr); + /// Gathers the individual index expressions from a GEP instruction. /// /// This function optimistically assumes the GEP references into a fixed size @@ -155,17 +164,6 @@ bool getIndexExpressionsFromGEP(ScalarEvolution &SE, SmallVectorImpl &Subscripts, SmallVectorImpl &Sizes); -/// Implementation of fixed size array delinearization. Try to delinearize -/// access function for a fixed size multi-dimensional array, by deriving -/// subscripts from GEP instructions. Returns true upon success and false -/// otherwise. \p Inst is the load/store instruction whose pointer operand is -/// the one we want to delinearize. \p AccessFn is its corresponding SCEV -/// expression w.r.t. the surrounding loop. -bool tryDelinearizeFixedSizeImpl(ScalarEvolution *SE, Instruction *Inst, - const SCEV *AccessFn, - SmallVectorImpl &Subscripts, - SmallVectorImpl &Sizes); - struct DelinearizationPrinterPass : public PassInfoMixin { explicit DelinearizationPrinterPass(raw_ostream &OS); diff --git a/llvm/include/llvm/Analysis/DependenceAnalysis.h b/llvm/include/llvm/Analysis/DependenceAnalysis.h index f603ae8dbd70f..8286d8e8e45cc 100644 --- a/llvm/include/llvm/Analysis/DependenceAnalysis.h +++ b/llvm/include/llvm/Analysis/DependenceAnalysis.h @@ -355,16 +355,11 @@ class DependenceInfo { Function *getFunction() const { return F; } - /// getRuntimeAssumptions - Returns all the runtime assumptions under which - /// the dependence test is valid. - LLVM_ABI SCEVUnionPredicate getRuntimeAssumptions() const; - private: AAResults *AA; ScalarEvolution *SE; LoopInfo *LI; Function *F; - SmallVector Assumptions; /// Subscript - This private struct represents a pair of subscripts from /// a pair of potentially multi-dimensional array references. We use a @@ -773,8 +768,8 @@ class DependenceInfo { SmallVectorImpl &Pair); /// Tries to delinearize \p Src and \p Dst access functions for a fixed size - /// multi-dimensional array. Calls tryDelinearizeFixedSizeImpl() to - /// delinearize \p Src and \p Dst separately, + /// multi-dimensional array. Calls delinearizeFixedSizeArray() to delinearize + /// \p Src and \p Dst separately, bool tryDelinearizeFixedSize(Instruction *Src, Instruction *Dst, const SCEV *SrcAccessFn, const SCEV *DstAccessFn, SmallVectorImpl &SrcSubscripts, diff --git a/llvm/include/llvm/Analysis/IVDescriptors.h b/llvm/include/llvm/Analysis/IVDescriptors.h index 2c8484fde5b16..fc141ed6d96fe 100644 --- a/llvm/include/llvm/Analysis/IVDescriptors.h +++ b/llvm/include/llvm/Analysis/IVDescriptors.h @@ -95,12 +95,17 @@ class RecurrenceDescriptor { RecurKind K, FastMathFlags FMF, Instruction *ExactFP, Type *RT, bool Signed, bool Ordered, SmallPtrSetImpl &CI, - unsigned MinWidthCastToRecurTy) + unsigned MinWidthCastToRecurTy, + bool PhiHasUsesOutsideReductionChain = false) : IntermediateStore(Store), StartValue(Start), LoopExitInstr(Exit), Kind(K), FMF(FMF), ExactFPMathInst(ExactFP), RecurrenceType(RT), IsSigned(Signed), IsOrdered(Ordered), + PhiHasUsesOutsideReductionChain(PhiHasUsesOutsideReductionChain), MinWidthCastToRecurrenceType(MinWidthCastToRecurTy) { CastInsts.insert_range(CI); + assert( + (!PhiHasUsesOutsideReductionChain || isMinMaxRecurrenceKind(K)) && + "Only min/max recurrences are allowed to have multiple uses currently"); } /// This POD struct holds information about a potential recurrence operation. @@ -339,6 +344,13 @@ class RecurrenceDescriptor { /// Expose an ordered FP reduction to the instance users. bool isOrdered() const { return IsOrdered; } + /// Returns true if the reduction PHI has any uses outside the reduction + /// chain. This is relevant for min/max reductions that are part of a + /// FindLastIV pattern. + bool hasUsesOutsideReductionChain() const { + return PhiHasUsesOutsideReductionChain; + } + /// Attempts to find a chain of operations from Phi to LoopExitInst that can /// be treated as a set of reductions instructions for in-loop reductions. LLVM_ABI SmallVector getReductionOpChain(PHINode *Phi, @@ -376,6 +388,10 @@ class RecurrenceDescriptor { // Currently only a non-reassociative FAdd can be considered in-order, // if it is also the only FAdd in the PHI's use chain. bool IsOrdered = false; + // True if the reduction PHI has in-loop users outside the reduction chain. + // This is relevant for min/max reductions that are part of a FindLastIV + // pattern. + bool PhiHasUsesOutsideReductionChain = false; // Instructions used for type-promoting the recurrence. SmallPtrSet CastInsts; // The minimum width used by the recurrence. diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h index 22cff2035eb0b..e24e22da5681b 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -125,12 +125,23 @@ struct HardwareLoopInfo { /// Information for memory intrinsic cost model. class MemIntrinsicCostAttributes { + /// Optional context instruction, if one exists, e.g. the + /// load/store to transform to the intrinsic. + const Instruction *I = nullptr; + + /// Address in memory. + const Value *Ptr = nullptr; + /// Vector type of the data to be loaded or stored. Type *DataTy = nullptr; /// ID of the memory intrinsic. Intrinsic::ID IID; + /// True when the memory access is predicated with a mask + /// that is not a compile-time constant. + bool VariableMask = true; + /// Address space of the pointer. unsigned AddressSpace = 0; @@ -138,13 +149,29 @@ class MemIntrinsicCostAttributes { Align Alignment; public: + LLVM_ABI MemIntrinsicCostAttributes(Intrinsic::ID Id, Type *DataTy, + const Value *Ptr, bool VariableMask, + Align Alignment, + const Instruction *I = nullptr) + : I(I), Ptr(Ptr), DataTy(DataTy), IID(Id), VariableMask(VariableMask), + Alignment(Alignment) {} + LLVM_ABI MemIntrinsicCostAttributes(Intrinsic::ID Id, Type *DataTy, Align Alignment, unsigned AddressSpace) : DataTy(DataTy), IID(Id), AddressSpace(AddressSpace), Alignment(Alignment) {} + LLVM_ABI MemIntrinsicCostAttributes(Intrinsic::ID Id, Type *DataTy, + bool VariableMask, Align Alignment, + const Instruction *I = nullptr) + : I(I), DataTy(DataTy), IID(Id), VariableMask(VariableMask), + Alignment(Alignment) {} + Intrinsic::ID getID() const { return IID; } + const Instruction *getInst() const { return I; } + const Value *getPointer() const { return Ptr; } Type *getDataType() const { return DataTy; } + bool getVariableMask() const { return VariableMask; } unsigned getAddressSpace() const { return AddressSpace; } Align getAlignment() const { return Alignment; } }; @@ -1592,52 +1619,6 @@ class TargetTransformInfo { OperandValueInfo OpdInfo = {OK_AnyValue, OP_None}, const Instruction *I = nullptr) const; - /// \return The cost of masked Load and Store instructions. - LLVM_ABI InstructionCost getMaskedMemoryOpCost( - const MemIntrinsicCostAttributes &MICA, - TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const; - - /// \return The cost of Gather or Scatter operation - /// \p Opcode - is a type of memory access Load or Store - /// \p DataTy - a vector type of the data to be loaded or stored - /// \p Ptr - pointer [or vector of pointers] - address[es] in memory - /// \p VariableMask - true when the memory access is predicated with a mask - /// that is not a compile-time constant - /// \p Alignment - alignment of single element - /// \p I - the optional original context instruction, if one exists, e.g. the - /// load/store to transform or the call to the gather/scatter intrinsic - LLVM_ABI InstructionCost getGatherScatterOpCost( - unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, - Align Alignment, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, - const Instruction *I = nullptr) const; - - /// \return The cost of Expand Load or Compress Store operation - /// \p Opcode - is a type of memory access Load or Store - /// \p Src - a vector type of the data to be loaded or stored - /// \p VariableMask - true when the memory access is predicated with a mask - /// that is not a compile-time constant - /// \p Alignment - alignment of single element - /// \p I - the optional original context instruction, if one exists, e.g. the - /// load/store to transform or the call to the gather/scatter intrinsic - LLVM_ABI InstructionCost getExpandCompressMemoryOpCost( - unsigned Opcode, Type *DataTy, bool VariableMask, Align Alignment, - TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, - const Instruction *I = nullptr) const; - - /// \return The cost of strided memory operations. - /// \p Opcode - is a type of memory access Load or Store - /// \p DataTy - a vector type of the data to be loaded or stored - /// \p Ptr - pointer [or vector of pointers] - address[es] in memory - /// \p VariableMask - true when the memory access is predicated with a mask - /// that is not a compile-time constant - /// \p Alignment - alignment of single element - /// \p I - the optional original context instruction, if one exists, e.g. the - /// load/store to transform or the call to the gather/scatter intrinsic - LLVM_ABI InstructionCost getStridedMemoryOpCost( - unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, - Align Alignment, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, - const Instruction *I = nullptr) const; - /// \return The cost of the interleaved memory operation. /// \p Opcode is the memory operation code /// \p VecTy is the vector type of the interleaved access. @@ -1716,6 +1697,12 @@ class TargetTransformInfo { LLVM_ABI InstructionCost getIntrinsicInstrCost( const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const; + /// \returns The cost of memory intrinsic instructions. + /// Used when IntrinsicInst is not materialized. + LLVM_ABI InstructionCost + getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, + TTI::TargetCostKind CostKind) const; + /// \returns The cost of Call instructions. LLVM_ABI InstructionCost getCallInstrCost( Function *F, Type *RetTy, ArrayRef Tys, diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h index 4954c0d90a1e1..624302bc6d0a3 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -859,9 +859,9 @@ class TargetTransformInfoImplBase { return 1; } - virtual InstructionCost getExpandCompressMemoryOpCost( - unsigned Opcode, Type *DataTy, bool VariableMask, Align Alignment, - TTI::TargetCostKind CostKind, const Instruction *I = nullptr) const { + virtual InstructionCost + getExpandCompressMemoryOpCost(const MemIntrinsicCostAttributes &MICA, + TTI::TargetCostKind CostKind) const { return 1; } @@ -929,6 +929,11 @@ class TargetTransformInfoImplBase { return 1; } + virtual InstructionCost + getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, + TTI::TargetCostKind CostKind) const { + return 1; + } virtual InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef Tys, TTI::TargetCostKind CostKind) const { diff --git a/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/llvm/include/llvm/Bitcode/LLVMBitCodes.h index 991aa49d787f9..2451d588bdbf7 100644 --- a/llvm/include/llvm/Bitcode/LLVMBitCodes.h +++ b/llvm/include/llvm/Bitcode/LLVMBitCodes.h @@ -437,6 +437,8 @@ enum ConstantsCodes { CST_CODE_CE_GEP_WITH_INRANGE = 31, // [opty, flags, range, n x operands] CST_CODE_CE_GEP = 32, // [opty, flags, n x operands] CST_CODE_PTRAUTH = 33, // [ptr, key, disc, addrdisc] + CST_CODE_PTRAUTH2 = 34, // [ptr, key, disc, addrdisc, + // deactivation_symbol] }; /// CastOpcodes - These are values used in the bitcode files to encode which diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h index cb389ae74ef46..b1beb68feca46 100644 --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -1580,10 +1580,14 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { } InstructionCost - getExpandCompressMemoryOpCost(unsigned Opcode, Type *DataTy, - bool VariableMask, Align Alignment, - TTI::TargetCostKind CostKind, - const Instruction *I = nullptr) const override { + getExpandCompressMemoryOpCost(const MemIntrinsicCostAttributes &MICA, + TTI::TargetCostKind CostKind) const override { + unsigned Opcode = MICA.getID() == Intrinsic::masked_expandload + ? Instruction::Load + : Instruction::Store; + Type *DataTy = MICA.getDataType(); + bool VariableMask = MICA.getVariableMask(); + Align Alignment = MICA.getAlignment(); // Treat expand load/compress store as gather/scatter operation. // TODO: implement more precise cost estimation for these intrinsics. return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask, @@ -1624,8 +1628,9 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { if (UseMaskForCond || UseMaskForGaps) { unsigned IID = Opcode == Instruction::Load ? Intrinsic::masked_load : Intrinsic::masked_store; - Cost = thisT()->getMaskedMemoryOpCost( - {IID, VecTy, Alignment, AddressSpace}, CostKind); + Cost = thisT()->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(IID, VecTy, Alignment, AddressSpace), + CostKind); } else Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace, CostKind); @@ -1825,9 +1830,11 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { if (auto *VPI = dyn_cast_or_null(ICA.getInst())) Alignment = VPI->getPointerAlignment().valueOrOne(); bool VarMask = isa(ICA.getArgs()[2]); - return thisT()->getGatherScatterOpCost( - Instruction::Store, ICA.getArgTypes()[0], ICA.getArgs()[1], VarMask, - Alignment, CostKind, nullptr); + return thisT()->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(Intrinsic::vp_scatter, + ICA.getArgTypes()[0], ICA.getArgs()[1], + VarMask, Alignment, nullptr), + CostKind); } if (ICA.getID() == Intrinsic::vp_gather) { if (ICA.isTypeBasedOnly()) { @@ -1841,9 +1848,11 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { if (auto *VPI = dyn_cast_or_null(ICA.getInst())) Alignment = VPI->getPointerAlignment().valueOrOne(); bool VarMask = isa(ICA.getArgs()[1]); - return thisT()->getGatherScatterOpCost( - Instruction::Load, ICA.getReturnType(), ICA.getArgs()[0], VarMask, - Alignment, CostKind, nullptr); + return thisT()->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(Intrinsic::vp_gather, + ICA.getReturnType(), ICA.getArgs()[0], + VarMask, Alignment, nullptr), + CostKind); } if (ICA.getID() == Intrinsic::vp_select || @@ -1948,31 +1957,37 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { const Value *Mask = Args[2]; bool VarMask = !isa(Mask); Align Alignment = I->getParamAlign(1).valueOrOne(); - return thisT()->getGatherScatterOpCost(Instruction::Store, - ICA.getArgTypes()[0], Args[1], - VarMask, Alignment, CostKind, I); + return thisT()->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(Intrinsic::masked_scatter, + ICA.getArgTypes()[0], Args[1], VarMask, + Alignment, I), + CostKind); } case Intrinsic::masked_gather: { const Value *Mask = Args[1]; bool VarMask = !isa(Mask); Align Alignment = I->getParamAlign(0).valueOrOne(); - return thisT()->getGatherScatterOpCost(Instruction::Load, RetTy, Args[0], - VarMask, Alignment, CostKind, I); + return thisT()->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(Intrinsic::masked_gather, RetTy, Args[0], + VarMask, Alignment, I), + CostKind); } case Intrinsic::masked_compressstore: { const Value *Data = Args[0]; const Value *Mask = Args[2]; Align Alignment = I->getParamAlign(1).valueOrOne(); - return thisT()->getExpandCompressMemoryOpCost( - Instruction::Store, Data->getType(), !isa(Mask), Alignment, - CostKind, I); + return thisT()->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(IID, Data->getType(), !isa(Mask), + Alignment, I), + CostKind); } case Intrinsic::masked_expandload: { const Value *Mask = Args[1]; Align Alignment = I->getParamAlign(0).valueOrOne(); - return thisT()->getExpandCompressMemoryOpCost(Instruction::Load, RetTy, - !isa(Mask), - Alignment, CostKind, I); + return thisT()->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(IID, RetTy, !isa(Mask), + Alignment, I), + CostKind); } case Intrinsic::experimental_vp_strided_store: { const Value *Data = Args[0]; @@ -1983,9 +1998,10 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { Type *EltTy = cast(Data->getType())->getElementType(); Align Alignment = I->getParamAlign(1).value_or(thisT()->DL.getABITypeAlign(EltTy)); - return thisT()->getStridedMemoryOpCost(Instruction::Store, - Data->getType(), Ptr, VarMask, - Alignment, CostKind, I); + return thisT()->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(IID, Data->getType(), Ptr, VarMask, + Alignment, I), + CostKind); } case Intrinsic::experimental_vp_strided_load: { const Value *Ptr = Args[0]; @@ -1995,8 +2011,9 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { Type *EltTy = cast(RetTy)->getElementType(); Align Alignment = I->getParamAlign(0).value_or(thisT()->DL.getABITypeAlign(EltTy)); - return thisT()->getStridedMemoryOpCost(Instruction::Load, RetTy, Ptr, - VarMask, Alignment, CostKind, I); + return thisT()->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(IID, RetTy, Ptr, VarMask, Alignment, I), + CostKind); } case Intrinsic::stepvector: { if (isa(RetTy)) @@ -2409,26 +2426,32 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { case Intrinsic::masked_store: { Type *Ty = Tys[0]; Align TyAlign = thisT()->DL.getABITypeAlign(Ty); - return thisT()->getMaskedMemoryOpCost({IID, Ty, TyAlign, 0}, CostKind); + return thisT()->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(IID, Ty, TyAlign, 0), CostKind); } case Intrinsic::masked_load: { Type *Ty = RetTy; Align TyAlign = thisT()->DL.getABITypeAlign(Ty); - return thisT()->getMaskedMemoryOpCost({IID, Ty, TyAlign, 0}, CostKind); + return thisT()->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(IID, Ty, TyAlign, 0), CostKind); } case Intrinsic::experimental_vp_strided_store: { auto *Ty = cast(ICA.getArgTypes()[0]); Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType()); - return thisT()->getStridedMemoryOpCost( - Instruction::Store, Ty, /*Ptr=*/nullptr, /*VariableMask=*/true, - Alignment, CostKind, ICA.getInst()); + return thisT()->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(IID, Ty, /*Ptr=*/nullptr, + /*VariableMask=*/true, Alignment, + ICA.getInst()), + CostKind); } case Intrinsic::experimental_vp_strided_load: { auto *Ty = cast(ICA.getReturnType()); Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType()); - return thisT()->getStridedMemoryOpCost( - Instruction::Load, Ty, /*Ptr=*/nullptr, /*VariableMask=*/true, - Alignment, CostKind, ICA.getInst()); + return thisT()->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(IID, Ty, /*Ptr=*/nullptr, + /*VariableMask=*/true, Alignment, + ICA.getInst()), + CostKind); } case Intrinsic::vector_reduce_add: case Intrinsic::vector_reduce_mul: @@ -3016,6 +3039,48 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { return SingleCallCost; } + /// Get memory intrinsic cost based on arguments. + InstructionCost + getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, + TTI::TargetCostKind CostKind) const override { + unsigned Id = MICA.getID(); + Type *DataTy = MICA.getDataType(); + const Value *Ptr = MICA.getPointer(); + const Instruction *I = MICA.getInst(); + bool VariableMask = MICA.getVariableMask(); + Align Alignment = MICA.getAlignment(); + + switch (Id) { + case Intrinsic::experimental_vp_strided_load: + case Intrinsic::experimental_vp_strided_store: { + unsigned Opcode = Id == Intrinsic::experimental_vp_strided_load + ? Instruction::Load + : Instruction::Store; + return thisT()->getStridedMemoryOpCost(Opcode, DataTy, Ptr, VariableMask, + Alignment, CostKind, I); + } + case Intrinsic::masked_scatter: + case Intrinsic::masked_gather: + case Intrinsic::vp_scatter: + case Intrinsic::vp_gather: { + unsigned Opcode = + (Id == Intrinsic::masked_gather || Id == Intrinsic::vp_gather) + ? Instruction::Load + : Instruction::Store; + return thisT()->getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, + Alignment, CostKind, I); + } + case Intrinsic::masked_load: + case Intrinsic::masked_store: + return thisT()->getMaskedMemoryOpCost(MICA, CostKind); + case Intrinsic::masked_compressstore: + case Intrinsic::masked_expandload: + return thisT()->getExpandCompressMemoryOpCost(MICA, CostKind); + default: + llvm_unreachable("unexpected intrinsic"); + } + } + /// Compute a cost of the given call instruction. /// /// Compute the cost of calling function F with return type RetTy and diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h index a8bde824527a5..fea900f37ec74 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h @@ -159,6 +159,8 @@ class LLVM_ABI CallLowering { /// True if this call results in convergent operations. bool IsConvergent = true; + + GlobalValue *DeactivationSymbol = nullptr; }; /// Argument handling is mostly uniform between the four places that diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h index 40c7792f7e8a2..5f3f1d386569c 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h @@ -56,6 +56,7 @@ struct MachineIRBuilderState { MDNode *PCSections = nullptr; /// MMRA Metadata to be set on any instruction we create. MDNode *MMRA = nullptr; + Value *DS = nullptr; /// \name Fields describing the insertion point. /// @{ @@ -369,6 +370,7 @@ class LLVM_ABI MachineIRBuilder { State.II = MI.getIterator(); setPCSections(MI.getPCSections()); setMMRAMetadata(MI.getMMRAMetadata()); + setDeactivationSymbol(MI.getDeactivationSymbol()); } /// @} @@ -405,6 +407,9 @@ class LLVM_ABI MachineIRBuilder { /// Set the PC sections metadata to \p MD for all the next build instructions. void setMMRAMetadata(MDNode *MMRA) { State.MMRA = MMRA; } + Value *getDeactivationSymbol() { return State.DS; } + void setDeactivationSymbol(Value *DS) { State.DS = DS; } + /// Get the current instruction's MMRA metadata. MDNode *getMMRAMetadata() { return State.MMRA; } diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h index cdaa916548c25..b32f3dacbb3a4 100644 --- a/llvm/include/llvm/CodeGen/ISDOpcodes.h +++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h @@ -1579,6 +1579,10 @@ enum NodeType { // Outputs: Output Chain CLEAR_CACHE, + // Untyped node storing deactivation symbol reference + // (DeactivationSymbolSDNode). + DEACTIVATION_SYMBOL, + /// BUILTIN_OP_END - This must be the last enum value in this list. /// The target-specific pre-isel opcode values start here. BUILTIN_OP_END diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h index ef783f276b7d4..08ffdb2cb469d 100644 --- a/llvm/include/llvm/CodeGen/MachineFunction.h +++ b/llvm/include/llvm/CodeGen/MachineFunction.h @@ -1207,7 +1207,7 @@ class LLVM_ABI MachineFunction { ArrayRef MMOs, MCSymbol *PreInstrSymbol = nullptr, MCSymbol *PostInstrSymbol = nullptr, MDNode *HeapAllocMarker = nullptr, MDNode *PCSections = nullptr, uint32_t CFIType = 0, - MDNode *MMRAs = nullptr); + MDNode *MMRAs = nullptr, Value *DS = nullptr); /// Allocate a string and populate it with the given external symbol name. const char *createExternalSymbolName(StringRef Name); diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h index ca984459c365a..077e39b49df6f 100644 --- a/llvm/include/llvm/CodeGen/MachineInstr.h +++ b/llvm/include/llvm/CodeGen/MachineInstr.h @@ -160,8 +160,9 @@ class MachineInstr /// /// This has to be defined eagerly due to the implementation constraints of /// `PointerSumType` where it is used. - class ExtraInfo final : TrailingObjects { + class ExtraInfo final + : TrailingObjects { public: static ExtraInfo *create(BumpPtrAllocator &Allocator, ArrayRef MMOs, @@ -169,20 +170,23 @@ class MachineInstr MCSymbol *PostInstrSymbol = nullptr, MDNode *HeapAllocMarker = nullptr, MDNode *PCSections = nullptr, uint32_t CFIType = 0, - MDNode *MMRAs = nullptr) { + MDNode *MMRAs = nullptr, Value *DS = nullptr) { bool HasPreInstrSymbol = PreInstrSymbol != nullptr; bool HasPostInstrSymbol = PostInstrSymbol != nullptr; bool HasHeapAllocMarker = HeapAllocMarker != nullptr; bool HasMMRAs = MMRAs != nullptr; bool HasCFIType = CFIType != 0; bool HasPCSections = PCSections != nullptr; + bool HasDS = DS != nullptr; auto *Result = new (Allocator.Allocate( - totalSizeToAlloc( + totalSizeToAlloc( MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol, - HasHeapAllocMarker + HasPCSections + HasMMRAs, HasCFIType), + HasHeapAllocMarker + HasPCSections + HasMMRAs, HasCFIType, HasDS), alignof(ExtraInfo))) ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol, - HasHeapAllocMarker, HasPCSections, HasCFIType, HasMMRAs); + HasHeapAllocMarker, HasPCSections, HasCFIType, HasMMRAs, + HasDS); // Copy the actual data into the trailing objects. llvm::copy(MMOs, Result->getTrailingObjects()); @@ -202,6 +206,8 @@ class MachineInstr Result->getTrailingObjects()[0] = CFIType; if (HasMMRAs) Result->getTrailingObjects()[MDNodeIdx++] = MMRAs; + if (HasDS) + Result->getTrailingObjects()[0] = DS; return Result; } @@ -240,6 +246,10 @@ class MachineInstr : nullptr; } + Value *getDeactivationSymbol() const { + return HasDS ? getTrailingObjects()[0] : 0; + } + private: friend TrailingObjects; @@ -255,6 +265,7 @@ class MachineInstr const bool HasPCSections; const bool HasCFIType; const bool HasMMRAs; + const bool HasDS; // Implement the `TrailingObjects` internal API. size_t numTrailingObjects(OverloadToken) const { @@ -269,16 +280,17 @@ class MachineInstr size_t numTrailingObjects(OverloadToken) const { return HasCFIType; } + size_t numTrailingObjects(OverloadToken) const { return HasDS; } // Just a boring constructor to allow us to initialize the sizes. Always use // the `create` routine above. ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol, bool HasHeapAllocMarker, bool HasPCSections, bool HasCFIType, - bool HasMMRAs) + bool HasMMRAs, bool HasDS) : NumMMOs(NumMMOs), HasPreInstrSymbol(HasPreInstrSymbol), HasPostInstrSymbol(HasPostInstrSymbol), HasHeapAllocMarker(HasHeapAllocMarker), HasPCSections(HasPCSections), - HasCFIType(HasCFIType), HasMMRAs(HasMMRAs) {} + HasCFIType(HasCFIType), HasMMRAs(HasMMRAs), HasDS(HasDS) {} }; /// Enumeration of the kinds of inline extra info available. It is important @@ -867,6 +879,14 @@ class MachineInstr return nullptr; } + Value *getDeactivationSymbol() const { + if (!Info) + return nullptr; + if (ExtraInfo *EI = Info.get()) + return EI->getDeactivationSymbol(); + return nullptr; + } + /// Helper to extract a CFI type hash if one has been added. uint32_t getCFIType() const { if (!Info) @@ -1969,6 +1989,8 @@ class MachineInstr /// Set the CFI type for the instruction. LLVM_ABI void setCFIType(MachineFunction &MF, uint32_t Type); + LLVM_ABI void setDeactivationSymbol(MachineFunction &MF, Value *DS); + /// Return the MIFlags which represent both MachineInstrs. This /// should be used when merging two MachineInstrs into one. This routine does /// not modify the MIFlags of this MachineInstr. @@ -2088,7 +2110,7 @@ class MachineInstr void setExtraInfo(MachineFunction &MF, ArrayRef MMOs, MCSymbol *PreInstrSymbol, MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker, MDNode *PCSections, - uint32_t CFIType, MDNode *MMRAs); + uint32_t CFIType, MDNode *MMRAs, Value *DS); }; /// Special DenseMapInfo traits to compare MachineInstr* by *value* of the diff --git a/llvm/include/llvm/CodeGen/MachineInstrBuilder.h b/llvm/include/llvm/CodeGen/MachineInstrBuilder.h index e705d7d99544c..caeb430d6fd1c 100644 --- a/llvm/include/llvm/CodeGen/MachineInstrBuilder.h +++ b/llvm/include/llvm/CodeGen/MachineInstrBuilder.h @@ -70,29 +70,44 @@ enum { } // end namespace RegState /// Set of metadata that should be preserved when using BuildMI(). This provides -/// a more convenient way of preserving DebugLoc, PCSections and MMRA. +/// a more convenient way of preserving certain data from the original +/// instruction. class MIMetadata { public: MIMetadata() = default; - MIMetadata(DebugLoc DL, MDNode *PCSections = nullptr, MDNode *MMRA = nullptr) - : DL(std::move(DL)), PCSections(PCSections), MMRA(MMRA) {} + MIMetadata(DebugLoc DL, MDNode *PCSections = nullptr, MDNode *MMRA = nullptr, + Value *DeactivationSymbol = nullptr) + : DL(std::move(DL)), PCSections(PCSections), MMRA(MMRA), + DeactivationSymbol(DeactivationSymbol) {} MIMetadata(const DILocation *DI, MDNode *PCSections = nullptr, MDNode *MMRA = nullptr) : DL(DI), PCSections(PCSections), MMRA(MMRA) {} explicit MIMetadata(const Instruction &From) : DL(From.getDebugLoc()), - PCSections(From.getMetadata(LLVMContext::MD_pcsections)) {} + PCSections(From.getMetadata(LLVMContext::MD_pcsections)), + DeactivationSymbol(getDeactivationSymbol(&From)) {} explicit MIMetadata(const MachineInstr &From) - : DL(From.getDebugLoc()), PCSections(From.getPCSections()) {} + : DL(From.getDebugLoc()), PCSections(From.getPCSections()), + DeactivationSymbol(From.getDeactivationSymbol()) {} const DebugLoc &getDL() const { return DL; } MDNode *getPCSections() const { return PCSections; } MDNode *getMMRAMetadata() const { return MMRA; } + Value *getDeactivationSymbol() const { return DeactivationSymbol; } private: DebugLoc DL; MDNode *PCSections = nullptr; MDNode *MMRA = nullptr; + Value *DeactivationSymbol = nullptr; + + static inline Value *getDeactivationSymbol(const Instruction *I) { + if (auto *CB = dyn_cast(I)) + if (auto Bundle = + CB->getOperandBundle(llvm::LLVMContext::OB_deactivation_symbol)) + return Bundle->Inputs[0].get(); + return nullptr; + } }; class MachineInstrBuilder { @@ -348,6 +363,8 @@ class MachineInstrBuilder { MI->setPCSections(*MF, MIMD.getPCSections()); if (MIMD.getMMRAMetadata()) MI->setMMRAMetadata(*MF, MIMD.getMMRAMetadata()); + if (MIMD.getDeactivationSymbol()) + MI->setDeactivationSymbol(*MF, MIMD.getDeactivationSymbol()); return *this; } diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h index b024e8a68bd6e..501cbc947132e 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAG.h +++ b/llvm/include/llvm/CodeGen/SelectionDAG.h @@ -759,6 +759,7 @@ class SelectionDAG { int64_t offset = 0, unsigned TargetFlags = 0) { return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags); } + LLVM_ABI SDValue getDeactivationSymbol(const GlobalValue *GV); LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false); SDValue getTargetFrameIndex(int FI, EVT VT) { return getFrameIndex(FI, VT, true); diff --git a/llvm/include/llvm/CodeGen/SelectionDAGISel.h b/llvm/include/llvm/CodeGen/SelectionDAGISel.h index c5cdf76f4777e..7add717227963 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAGISel.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGISel.h @@ -151,6 +151,7 @@ class SelectionDAGISel { OPC_RecordChild7, OPC_RecordMemRef, OPC_CaptureGlueInput, + OPC_CaptureDeactivationSymbol, OPC_MoveChild, OPC_MoveChild0, OPC_MoveChild1, diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h index cfc8a4243e894..aa72e81b2ab54 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -2005,6 +2005,22 @@ class GlobalAddressSDNode : public SDNode { } }; +class DeactivationSymbolSDNode : public SDNode { + friend class SelectionDAG; + + const GlobalValue *TheGlobal; + + DeactivationSymbolSDNode(const GlobalValue *GV, SDVTList VTs) + : SDNode(ISD::DEACTIVATION_SYMBOL, 0, DebugLoc(), VTs), TheGlobal(GV) {} + +public: + const GlobalValue *getGlobal() const { return TheGlobal; } + + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::DEACTIVATION_SYMBOL; + } +}; + class FrameIndexSDNode : public SDNode { friend class SelectionDAG; diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index 7df5d8a09f0f6..b2697c81fd825 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -4765,6 +4765,7 @@ class LLVM_ABI TargetLowering : public TargetLoweringBase { SmallVector InVals; const ConstantInt *CFIType = nullptr; SDValue ConvergenceControlToken; + GlobalValue *DeactivationSymbol = nullptr; std::optional PAI; @@ -4918,6 +4919,11 @@ class LLVM_ABI TargetLowering : public TargetLoweringBase { return *this; } + CallLoweringInfo &setDeactivationSymbol(GlobalValue *Sym) { + DeactivationSymbol = Sym; + return *this; + } + ArgListTy &getArgs() { return Args; } diff --git a/llvm/include/llvm/CodeGen/ValueTypes.td b/llvm/include/llvm/CodeGen/ValueTypes.td index dfcc97b5880f5..74ea86774a8ee 100644 --- a/llvm/include/llvm/CodeGen/ValueTypes.td +++ b/llvm/include/llvm/CodeGen/ValueTypes.td @@ -6,16 +6,15 @@ // //===----------------------------------------------------------------------===// // -// ValueTypes.td - list of ValueType instances supported by the the +// ValueTypes.td - list of ValueType instances supported by the // CodeGen infrastructure. // //===----------------------------------------------------------------------===// -class ValueType { +class ValueType { string Namespace = "MVT"; - string LLVMName = NAME; + string LLVMName = llvm_name; int Size = size; - int Value = value; int nElem = 1; ValueType ElementType = ?; bit isOverloaded = false; @@ -31,22 +30,22 @@ class ValueType { bit isCheriCapability = false; } -class VTAny : ValueType<0, value> { +class VTAny : ValueType<0> { let isOverloaded = true; } -class VTInt - : ValueType { +class VTInt + : ValueType { let isInteger = true; } -class VTFP - : ValueType { +class VTFP + : ValueType { let isFP = true; } -class VTVec - : ValueType { +class VTVec + : ValueType { let nElem = nelem; let ElementType = elt; let isInteger = elt.isInteger; @@ -54,354 +53,348 @@ class VTVec let isVector = true; } -class VTScalableVec - : VTVec { +class VTScalableVec + : VTVec { let isScalable = true; } -class VTVecTup - : ValueType { +class VTVecTup + : ValueType { let NF = nf; let ElementType = dummy_elt; let isRISCVVecTuple = true; } -class VTCheriCapability : ValueType { +class VTCheriCapability : ValueType { let isCheriCapability = true; } defset list ValueTypes = { -def OtherVT : ValueType<0, 1> { // "Other" value - let LLVMName = "Other"; -} - -def i1 : VTInt<1, 2>; // One bit boolean value -def i2 : VTInt<2, 3>; // 2-bit integer value -def i4 : VTInt<4, 4>; // 4-bit integer value -def i8 : VTInt<8, 5>; // 8-bit integer value -def i16 : VTInt<16, 6>; // 16-bit integer value -def i32 : VTInt<32, 7>; // 32-bit integer value -def i64 : VTInt<64, 8>; // 64-bit integer value -def i128 : VTInt<128, 9>; // 128-bit integer value -def i256 : VTInt<256, 10>; // 256-bit integer value -def i512 : VTInt<512, 11>; // 512-bit integer value - -def bf16 : VTFP<16, 12>; // 16-bit brain floating point value -def f16 : VTFP<16, 13>; // 16-bit floating point value -def f32 : VTFP<32, 14>; // 32-bit floating point value -def f64 : VTFP<64, 15>; // 64-bit floating point value -def f80 : VTFP<80, 16>; // 80-bit floating point value -def f128 : VTFP<128, 17>; // 128-bit floating point value -def ppcf128 : VTFP<128, 18>; // PPC 128-bit floating point value - -def v1i1 : VTVec<1, i1, 19>; // 1 x i1 vector value -def v2i1 : VTVec<2, i1, 20>; // 2 x i1 vector value -def v3i1 : VTVec<3, i1, 21>; // 3 x i1 vector value -def v4i1 : VTVec<4, i1, 22>; // 4 x i1 vector value -def v5i1 : VTVec<5, i1, 23>; // 5 x i1 vector value -def v6i1 : VTVec<6, i1, 24>; // 6 x i1 vector value -def v7i1 : VTVec<7, i1, 25>; // 7 x i1 vector value -def v8i1 : VTVec<8, i1, 26>; // 8 x i1 vector value -def v16i1 : VTVec<16, i1, 27>; // 16 x i1 vector value -def v32i1 : VTVec<32, i1, 28>; // 32 x i1 vector value -def v64i1 : VTVec<64, i1, 29>; // 64 x i1 vector value -def v128i1 : VTVec<128, i1, 30>; // 128 x i1 vector value -def v256i1 : VTVec<256, i1, 31>; // 256 x i1 vector value -def v512i1 : VTVec<512, i1, 32>; // 512 x i1 vector value -def v1024i1 : VTVec<1024, i1, 33>; // 1024 x i1 vector value -def v2048i1 : VTVec<2048, i1, 34>; // 2048 x i1 vector value -def v4096i1 : VTVec<4096, i1, 35>; // 4096 x i1 vector value - -def v128i2 : VTVec<128, i2, 36>; // 128 x i2 vector value -def v256i2 : VTVec<256, i2, 37>; // 256 x i2 vector value - -def v64i4 : VTVec<64, i4, 38>; // 64 x i4 vector value -def v128i4 : VTVec<128, i4, 39>; // 128 x i4 vector value - -def v1i8 : VTVec<1, i8, 40>; // 1 x i8 vector value -def v2i8 : VTVec<2, i8, 41>; // 2 x i8 vector value -def v3i8 : VTVec<3, i8, 42>; // 3 x i8 vector value -def v4i8 : VTVec<4, i8, 43>; // 4 x i8 vector value -def v5i8 : VTVec<5, i8, 44>; // 5 x i8 vector value -def v6i8 : VTVec<6, i8, 45>; // 6 x i8 vector value -def v7i8 : VTVec<7, i8, 46>; // 7 x i8 vector value -def v8i8 : VTVec<8, i8, 47>; // 8 x i8 vector value -def v16i8 : VTVec<16, i8, 48>; // 16 x i8 vector value -def v32i8 : VTVec<32, i8, 49>; // 32 x i8 vector value -def v64i8 : VTVec<64, i8, 50>; // 64 x i8 vector value -def v128i8 : VTVec<128, i8, 51>; // 128 x i8 vector value -def v256i8 : VTVec<256, i8, 52>; // 256 x i8 vector value -def v512i8 : VTVec<512, i8, 53>; // 512 x i8 vector value -def v1024i8 : VTVec<1024, i8, 54>; // 1024 x i8 vector value - -def v1i16 : VTVec<1, i16, 55>; // 1 x i16 vector value -def v2i16 : VTVec<2, i16, 56>; // 2 x i16 vector value -def v3i16 : VTVec<3, i16, 57>; // 3 x i16 vector value -def v4i16 : VTVec<4, i16, 58>; // 4 x i16 vector value -def v5i16 : VTVec<5, i16, 59>; // 5 x i16 vector value -def v6i16 : VTVec<6, i16, 60>; // 6 x i16 vector value -def v7i16 : VTVec<7, i16, 61>; // 7 x i16 vector value -def v8i16 : VTVec<8, i16, 62>; // 8 x i16 vector value -def v16i16 : VTVec<16, i16, 63>; // 16 x i16 vector value -def v32i16 : VTVec<32, i16, 64>; // 32 x i16 vector value -def v64i16 : VTVec<64, i16, 65>; // 64 x i16 vector value -def v128i16 : VTVec<128, i16, 66>; // 128 x i16 vector value -def v256i16 : VTVec<256, i16, 67>; // 256 x i16 vector value -def v512i16 : VTVec<512, i16, 68>; // 512 x i16 vector value -def v4096i16 : VTVec<4096, i16, 69>; // 4096 x i16 vector value - -def v1i32 : VTVec<1, i32, 70>; // 1 x i32 vector value -def v2i32 : VTVec<2, i32, 71>; // 2 x i32 vector value -def v3i32 : VTVec<3, i32, 72>; // 3 x i32 vector value -def v4i32 : VTVec<4, i32, 73>; // 4 x i32 vector value -def v5i32 : VTVec<5, i32, 74>; // 5 x i32 vector value -def v6i32 : VTVec<6, i32, 75>; // 6 x i32 vector value -def v7i32 : VTVec<7, i32, 76>; // 7 x i32 vector value -def v8i32 : VTVec<8, i32, 77>; // 8 x i32 vector value -def v9i32 : VTVec<9, i32, 78>; // 9 x i32 vector value -def v10i32 : VTVec<10, i32, 79>; // 10 x i32 vector value -def v11i32 : VTVec<11, i32, 80>; // 11 x i32 vector value -def v12i32 : VTVec<12, i32, 81>; // 12 x i32 vector value -def v16i32 : VTVec<16, i32, 82>; // 16 x i32 vector value -def v32i32 : VTVec<32, i32, 83>; // 32 x i32 vector value -def v64i32 : VTVec<64, i32, 84>; // 64 x i32 vector value -def v128i32 : VTVec<128, i32, 85>; // 128 x i32 vector value -def v256i32 : VTVec<256, i32, 86>; // 256 x i32 vector value -def v512i32 : VTVec<512, i32, 87>; // 512 x i32 vector value -def v1024i32 : VTVec<1024, i32, 88>; // 1024 x i32 vector value -def v2048i32 : VTVec<2048, i32, 89>; // 2048 x i32 vector value -def v4096i32 : VTVec<4096, i32, 90>; // 4096 x i32 vector value - -def v1i64 : VTVec<1, i64, 91>; // 1 x i64 vector value -def v2i64 : VTVec<2, i64, 92>; // 2 x i64 vector value -def v3i64 : VTVec<3, i64, 93>; // 3 x i64 vector value -def v4i64 : VTVec<4, i64, 94>; // 4 x i64 vector value -def v8i64 : VTVec<8, i64, 95>; // 8 x i64 vector value -def v16i64 : VTVec<16, i64, 96>; // 16 x i64 vector value -def v32i64 : VTVec<32, i64, 97>; // 32 x i64 vector value -def v64i64 : VTVec<64, i64, 98>; // 64 x i64 vector value -def v128i64 : VTVec<128, i64, 99>; // 128 x i64 vector value -def v256i64 : VTVec<256, i64, 100>; // 256 x i64 vector value - -def v1i128 : VTVec<1, i128, 101>; // 1 x i128 vector value - -def v1f16 : VTVec<1, f16, 102>; // 1 x f16 vector value -def v2f16 : VTVec<2, f16, 103>; // 2 x f16 vector value -def v3f16 : VTVec<3, f16, 104>; // 3 x f16 vector value -def v4f16 : VTVec<4, f16, 105>; // 4 x f16 vector value -def v5f16 : VTVec<5, f16, 106>; // 5 x f16 vector value -def v6f16 : VTVec<6, f16, 107>; // 6 x f16 vector value -def v7f16 : VTVec<7, f16, 108>; // 7 x f16 vector value -def v8f16 : VTVec<8, f16, 109>; // 8 x f16 vector value -def v16f16 : VTVec<16, f16, 110>; // 16 x f16 vector value -def v32f16 : VTVec<32, f16, 111>; // 32 x f16 vector value -def v64f16 : VTVec<64, f16, 112>; // 64 x f16 vector value -def v128f16 : VTVec<128, f16, 113>; // 128 x f16 vector value -def v256f16 : VTVec<256, f16, 114>; // 256 x f16 vector value -def v512f16 : VTVec<512, f16, 115>; // 512 x f16 vector value -def v4096f16 : VTVec<4096, f16, 116>; // 4096 x f16 vector value - -def v1bf16 : VTVec<1, bf16, 117>; // 1 x bf16 vector value -def v2bf16 : VTVec<2, bf16, 118>; // 2 x bf16 vector value -def v3bf16 : VTVec<3, bf16, 119>; // 3 x bf16 vector value -def v4bf16 : VTVec<4, bf16, 120>; // 4 x bf16 vector value -def v8bf16 : VTVec<8, bf16, 121>; // 8 x bf16 vector value -def v16bf16 : VTVec<16, bf16, 122>; // 16 x bf16 vector value -def v32bf16 : VTVec<32, bf16, 123>; // 32 x bf16 vector value -def v64bf16 : VTVec<64, bf16, 124>; // 64 x bf16 vector value -def v128bf16 : VTVec<128, bf16, 125>; // 128 x bf16 vector value -def v4096bf16 : VTVec<4096, bf16, 126>; // 4096 x bf16 vector value - -def v1f32 : VTVec<1, f32, 127>; // 1 x f32 vector value -def v2f32 : VTVec<2, f32, 128>; // 2 x f32 vector value -def v3f32 : VTVec<3, f32, 129>; // 3 x f32 vector value -def v4f32 : VTVec<4, f32, 130>; // 4 x f32 vector value -def v5f32 : VTVec<5, f32, 131>; // 5 x f32 vector value -def v6f32 : VTVec<6, f32, 132>; // 6 x f32 vector value -def v7f32 : VTVec<7, f32, 133>; // 7 x f32 vector value -def v8f32 : VTVec<8, f32, 134>; // 8 x f32 vector value -def v9f32 : VTVec<9, f32, 135>; // 9 x f32 vector value -def v10f32 : VTVec<10, f32, 136>; // 10 x f32 vector value -def v11f32 : VTVec<11, f32, 137>; // 11 x f32 vector value -def v12f32 : VTVec<12, f32, 138>; // 12 x f32 vector value -def v16f32 : VTVec<16, f32, 139>; // 16 x f32 vector value -def v32f32 : VTVec<32, f32, 140>; // 32 x f32 vector value -def v64f32 : VTVec<64, f32, 141>; // 64 x f32 vector value -def v128f32 : VTVec<128, f32, 142>; // 128 x f32 vector value -def v256f32 : VTVec<256, f32, 143>; // 256 x f32 vector value -def v512f32 : VTVec<512, f32, 144>; // 512 x f32 vector value -def v1024f32 : VTVec<1024, f32, 145>; // 1024 x f32 vector value -def v2048f32 : VTVec<2048, f32, 146>; // 2048 x f32 vector value - -def v1f64 : VTVec<1, f64, 147>; // 1 x f64 vector value -def v2f64 : VTVec<2, f64, 148>; // 2 x f64 vector value -def v3f64 : VTVec<3, f64, 149>; // 3 x f64 vector value -def v4f64 : VTVec<4, f64, 150>; // 4 x f64 vector value -def v8f64 : VTVec<8, f64, 151>; // 8 x f64 vector value -def v16f64 : VTVec<16, f64, 152>; // 16 x f64 vector value -def v32f64 : VTVec<32, f64, 153>; // 32 x f64 vector value -def v64f64 : VTVec<64, f64, 154>; // 64 x f64 vector value -def v128f64 : VTVec<128, f64, 155>; // 128 x f64 vector value -def v256f64 : VTVec<256, f64, 156>; // 256 x f64 vector value - -def nxv1i1 : VTScalableVec<1, i1, 157>; // n x 1 x i1 vector value -def nxv2i1 : VTScalableVec<2, i1, 158>; // n x 2 x i1 vector value -def nxv4i1 : VTScalableVec<4, i1, 159>; // n x 4 x i1 vector value -def nxv8i1 : VTScalableVec<8, i1, 160>; // n x 8 x i1 vector value -def nxv16i1 : VTScalableVec<16, i1, 161>; // n x 16 x i1 vector value -def nxv32i1 : VTScalableVec<32, i1, 162>; // n x 32 x i1 vector value -def nxv64i1 : VTScalableVec<64, i1, 163>; // n x 64 x i1 vector value - -def nxv1i8 : VTScalableVec<1, i8, 164>; // n x 1 x i8 vector value -def nxv2i8 : VTScalableVec<2, i8, 165>; // n x 2 x i8 vector value -def nxv4i8 : VTScalableVec<4, i8, 166>; // n x 4 x i8 vector value -def nxv8i8 : VTScalableVec<8, i8, 167>; // n x 8 x i8 vector value -def nxv16i8 : VTScalableVec<16, i8, 168>; // n x 16 x i8 vector value -def nxv32i8 : VTScalableVec<32, i8, 169>; // n x 32 x i8 vector value -def nxv64i8 : VTScalableVec<64, i8, 170>; // n x 64 x i8 vector value - -def nxv1i16 : VTScalableVec<1, i16, 171>; // n x 1 x i16 vector value -def nxv2i16 : VTScalableVec<2, i16, 172>; // n x 2 x i16 vector value -def nxv4i16 : VTScalableVec<4, i16, 173>; // n x 4 x i16 vector value -def nxv8i16 : VTScalableVec<8, i16, 174>; // n x 8 x i16 vector value -def nxv16i16 : VTScalableVec<16, i16, 175>; // n x 16 x i16 vector value -def nxv32i16 : VTScalableVec<32, i16, 176>; // n x 32 x i16 vector value - -def nxv1i32 : VTScalableVec<1, i32, 177>; // n x 1 x i32 vector value -def nxv2i32 : VTScalableVec<2, i32, 178>; // n x 2 x i32 vector value -def nxv4i32 : VTScalableVec<4, i32, 179>; // n x 4 x i32 vector value -def nxv8i32 : VTScalableVec<8, i32, 180>; // n x 8 x i32 vector value -def nxv16i32 : VTScalableVec<16, i32, 181>; // n x 16 x i32 vector value -def nxv32i32 : VTScalableVec<32, i32, 182>; // n x 32 x i32 vector value - -def nxv1i64 : VTScalableVec<1, i64, 183>; // n x 1 x i64 vector value -def nxv2i64 : VTScalableVec<2, i64, 184>; // n x 2 x i64 vector value -def nxv4i64 : VTScalableVec<4, i64, 185>; // n x 4 x i64 vector value -def nxv8i64 : VTScalableVec<8, i64, 186>; // n x 8 x i64 vector value -def nxv16i64 : VTScalableVec<16, i64, 187>; // n x 16 x i64 vector value -def nxv32i64 : VTScalableVec<32, i64, 188>; // n x 32 x i64 vector value - -def nxv1f16 : VTScalableVec<1, f16, 189>; // n x 1 x f16 vector value -def nxv2f16 : VTScalableVec<2, f16, 190>; // n x 2 x f16 vector value -def nxv4f16 : VTScalableVec<4, f16, 191>; // n x 4 x f16 vector value -def nxv8f16 : VTScalableVec<8, f16, 192>; // n x 8 x f16 vector value -def nxv16f16 : VTScalableVec<16, f16, 193>; // n x 16 x f16 vector value -def nxv32f16 : VTScalableVec<32, f16, 194>; // n x 32 x f16 vector value - -def nxv1bf16 : VTScalableVec<1, bf16, 195>; // n x 1 x bf16 vector value -def nxv2bf16 : VTScalableVec<2, bf16, 196>; // n x 2 x bf16 vector value -def nxv4bf16 : VTScalableVec<4, bf16, 197>; // n x 4 x bf16 vector value -def nxv8bf16 : VTScalableVec<8, bf16, 198>; // n x 8 x bf16 vector value -def nxv16bf16 : VTScalableVec<16, bf16, 199>; // n x 16 x bf16 vector value -def nxv32bf16 : VTScalableVec<32, bf16, 200>; // n x 32 x bf16 vector value - -def nxv1f32 : VTScalableVec<1, f32, 201>; // n x 1 x f32 vector value -def nxv2f32 : VTScalableVec<2, f32, 202>; // n x 2 x f32 vector value -def nxv4f32 : VTScalableVec<4, f32, 203>; // n x 4 x f32 vector value -def nxv8f32 : VTScalableVec<8, f32, 204>; // n x 8 x f32 vector value -def nxv16f32 : VTScalableVec<16, f32, 205>; // n x 16 x f32 vector value - -def nxv1f64 : VTScalableVec<1, f64, 206>; // n x 1 x f64 vector value -def nxv2f64 : VTScalableVec<2, f64, 207>; // n x 2 x f64 vector value -def nxv4f64 : VTScalableVec<4, f64, 208>; // n x 4 x f64 vector value -def nxv8f64 : VTScalableVec<8, f64, 209>; // n x 8 x f64 vector value +def OtherVT : ValueType<0, "Other">; // "Other" value + +def i1 : VTInt<1>; // One bit boolean value +def i2 : VTInt<2>; // 2-bit integer value +def i4 : VTInt<4>; // 4-bit integer value +def i8 : VTInt<8>; // 8-bit integer value +def i16 : VTInt<16>; // 16-bit integer value +def i32 : VTInt<32>; // 32-bit integer value +def i64 : VTInt<64>; // 64-bit integer value +def i128 : VTInt<128>; // 128-bit integer value +def i256 : VTInt<256>; // 256-bit integer value +def i512 : VTInt<512>; // 512-bit integer value + +def bf16 : VTFP<16>; // 16-bit brain floating point value +def f16 : VTFP<16>; // 16-bit floating point value +def f32 : VTFP<32>; // 32-bit floating point value +def f64 : VTFP<64>; // 64-bit floating point value +def f80 : VTFP<80>; // 80-bit floating point value +def f128 : VTFP<128>; // 128-bit floating point value +def ppcf128 : VTFP<128>; // PPC 128-bit floating point value + +def v1i1 : VTVec<1, i1>; // 1 x i1 vector value +def v2i1 : VTVec<2, i1>; // 2 x i1 vector value +def v3i1 : VTVec<3, i1>; // 3 x i1 vector value +def v4i1 : VTVec<4, i1>; // 4 x i1 vector value +def v5i1 : VTVec<5, i1>; // 5 x i1 vector value +def v6i1 : VTVec<6, i1>; // 6 x i1 vector value +def v7i1 : VTVec<7, i1>; // 7 x i1 vector value +def v8i1 : VTVec<8, i1>; // 8 x i1 vector value +def v16i1 : VTVec<16, i1>; // 16 x i1 vector value +def v32i1 : VTVec<32, i1>; // 32 x i1 vector value +def v64i1 : VTVec<64, i1>; // 64 x i1 vector value +def v128i1 : VTVec<128, i1>; // 128 x i1 vector value +def v256i1 : VTVec<256, i1>; // 256 x i1 vector value +def v512i1 : VTVec<512, i1>; // 512 x i1 vector value +def v1024i1 : VTVec<1024, i1>; // 1024 x i1 vector value +def v2048i1 : VTVec<2048, i1>; // 2048 x i1 vector value +def v4096i1 : VTVec<4096, i1>; // 4096 x i1 vector value + +def v128i2 : VTVec<128, i2>; // 128 x i2 vector value +def v256i2 : VTVec<256, i2>; // 256 x i2 vector value + +def v64i4 : VTVec<64, i4>; // 64 x i4 vector value +def v128i4 : VTVec<128, i4>; // 128 x i4 vector value + +def v1i8 : VTVec<1, i8>; // 1 x i8 vector value +def v2i8 : VTVec<2, i8>; // 2 x i8 vector value +def v3i8 : VTVec<3, i8>; // 3 x i8 vector value +def v4i8 : VTVec<4, i8>; // 4 x i8 vector value +def v5i8 : VTVec<5, i8>; // 5 x i8 vector value +def v6i8 : VTVec<6, i8>; // 6 x i8 vector value +def v7i8 : VTVec<7, i8>; // 7 x i8 vector value +def v8i8 : VTVec<8, i8>; // 8 x i8 vector value +def v16i8 : VTVec<16, i8>; // 16 x i8 vector value +def v32i8 : VTVec<32, i8>; // 32 x i8 vector value +def v64i8 : VTVec<64, i8>; // 64 x i8 vector value +def v128i8 : VTVec<128, i8>; // 128 x i8 vector value +def v256i8 : VTVec<256, i8>; // 256 x i8 vector value +def v512i8 : VTVec<512, i8>; // 512 x i8 vector value +def v1024i8 : VTVec<1024, i8>; // 1024 x i8 vector value + +def v1i16 : VTVec<1, i16>; // 1 x i16 vector value +def v2i16 : VTVec<2, i16>; // 2 x i16 vector value +def v3i16 : VTVec<3, i16>; // 3 x i16 vector value +def v4i16 : VTVec<4, i16>; // 4 x i16 vector value +def v5i16 : VTVec<5, i16>; // 5 x i16 vector value +def v6i16 : VTVec<6, i16>; // 6 x i16 vector value +def v7i16 : VTVec<7, i16>; // 7 x i16 vector value +def v8i16 : VTVec<8, i16>; // 8 x i16 vector value +def v16i16 : VTVec<16, i16>; // 16 x i16 vector value +def v32i16 : VTVec<32, i16>; // 32 x i16 vector value +def v64i16 : VTVec<64, i16>; // 64 x i16 vector value +def v128i16 : VTVec<128, i16>; // 128 x i16 vector value +def v256i16 : VTVec<256, i16>; // 256 x i16 vector value +def v512i16 : VTVec<512, i16>; // 512 x i16 vector value +def v4096i16 : VTVec<4096, i16>; // 4096 x i16 vector value + +def v1i32 : VTVec<1, i32>; // 1 x i32 vector value +def v2i32 : VTVec<2, i32>; // 2 x i32 vector value +def v3i32 : VTVec<3, i32>; // 3 x i32 vector value +def v4i32 : VTVec<4, i32>; // 4 x i32 vector value +def v5i32 : VTVec<5, i32>; // 5 x i32 vector value +def v6i32 : VTVec<6, i32>; // 6 x i32 vector value +def v7i32 : VTVec<7, i32>; // 7 x i32 vector value +def v8i32 : VTVec<8, i32>; // 8 x i32 vector value +def v9i32 : VTVec<9, i32>; // 9 x i32 vector value +def v10i32 : VTVec<10, i32>; // 10 x i32 vector value +def v11i32 : VTVec<11, i32>; // 11 x i32 vector value +def v12i32 : VTVec<12, i32>; // 12 x i32 vector value +def v16i32 : VTVec<16, i32>; // 16 x i32 vector value +def v32i32 : VTVec<32, i32>; // 32 x i32 vector value +def v64i32 : VTVec<64, i32>; // 64 x i32 vector value +def v128i32 : VTVec<128, i32>; // 128 x i32 vector value +def v256i32 : VTVec<256, i32>; // 256 x i32 vector value +def v512i32 : VTVec<512, i32>; // 512 x i32 vector value +def v1024i32 : VTVec<1024, i32>; // 1024 x i32 vector value +def v2048i32 : VTVec<2048, i32>; // 2048 x i32 vector value +def v4096i32 : VTVec<4096, i32>; // 4096 x i32 vector value + +def v1i64 : VTVec<1, i64>; // 1 x i64 vector value +def v2i64 : VTVec<2, i64>; // 2 x i64 vector value +def v3i64 : VTVec<3, i64>; // 3 x i64 vector value +def v4i64 : VTVec<4, i64>; // 4 x i64 vector value +def v8i64 : VTVec<8, i64>; // 8 x i64 vector value +def v16i64 : VTVec<16, i64>; // 16 x i64 vector value +def v32i64 : VTVec<32, i64>; // 32 x i64 vector value +def v64i64 : VTVec<64, i64>; // 64 x i64 vector value +def v128i64 : VTVec<128, i64>; // 128 x i64 vector value +def v256i64 : VTVec<256, i64>; // 256 x i64 vector value + +def v1i128 : VTVec<1, i128>; // 1 x i128 vector value + +def v1f16 : VTVec<1, f16>; // 1 x f16 vector value +def v2f16 : VTVec<2, f16>; // 2 x f16 vector value +def v3f16 : VTVec<3, f16>; // 3 x f16 vector value +def v4f16 : VTVec<4, f16>; // 4 x f16 vector value +def v5f16 : VTVec<5, f16>; // 5 x f16 vector value +def v6f16 : VTVec<6, f16>; // 6 x f16 vector value +def v7f16 : VTVec<7, f16>; // 7 x f16 vector value +def v8f16 : VTVec<8, f16>; // 8 x f16 vector value +def v16f16 : VTVec<16, f16>; // 16 x f16 vector value +def v32f16 : VTVec<32, f16>; // 32 x f16 vector value +def v64f16 : VTVec<64, f16>; // 64 x f16 vector value +def v128f16 : VTVec<128, f16>; // 128 x f16 vector value +def v256f16 : VTVec<256, f16>; // 256 x f16 vector value +def v512f16 : VTVec<512, f16>; // 512 x f16 vector value +def v4096f16 : VTVec<4096, f16>; // 4096 x f16 vector value + +def v1bf16 : VTVec<1, bf16>; // 1 x bf16 vector value +def v2bf16 : VTVec<2, bf16>; // 2 x bf16 vector value +def v3bf16 : VTVec<3, bf16>; // 3 x bf16 vector value +def v4bf16 : VTVec<4, bf16>; // 4 x bf16 vector value +def v8bf16 : VTVec<8, bf16>; // 8 x bf16 vector value +def v16bf16 : VTVec<16, bf16>; // 16 x bf16 vector value +def v32bf16 : VTVec<32, bf16>; // 32 x bf16 vector value +def v64bf16 : VTVec<64, bf16>; // 64 x bf16 vector value +def v128bf16 : VTVec<128, bf16>; // 128 x bf16 vector value +def v4096bf16 : VTVec<4096, bf16>; // 4096 x bf16 vector value + +def v1f32 : VTVec<1, f32>; // 1 x f32 vector value +def v2f32 : VTVec<2, f32>; // 2 x f32 vector value +def v3f32 : VTVec<3, f32>; // 3 x f32 vector value +def v4f32 : VTVec<4, f32>; // 4 x f32 vector value +def v5f32 : VTVec<5, f32>; // 5 x f32 vector value +def v6f32 : VTVec<6, f32>; // 6 x f32 vector value +def v7f32 : VTVec<7, f32>; // 7 x f32 vector value +def v8f32 : VTVec<8, f32>; // 8 x f32 vector value +def v9f32 : VTVec<9, f32>; // 9 x f32 vector value +def v10f32 : VTVec<10, f32>; // 10 x f32 vector value +def v11f32 : VTVec<11, f32>; // 11 x f32 vector value +def v12f32 : VTVec<12, f32>; // 12 x f32 vector value +def v16f32 : VTVec<16, f32>; // 16 x f32 vector value +def v32f32 : VTVec<32, f32>; // 32 x f32 vector value +def v64f32 : VTVec<64, f32>; // 64 x f32 vector value +def v128f32 : VTVec<128, f32>; // 128 x f32 vector value +def v256f32 : VTVec<256, f32>; // 256 x f32 vector value +def v512f32 : VTVec<512, f32>; // 512 x f32 vector value +def v1024f32 : VTVec<1024, f32>; // 1024 x f32 vector value +def v2048f32 : VTVec<2048, f32>; // 2048 x f32 vector value + +def v1f64 : VTVec<1, f64>; // 1 x f64 vector value +def v2f64 : VTVec<2, f64>; // 2 x f64 vector value +def v3f64 : VTVec<3, f64>; // 3 x f64 vector value +def v4f64 : VTVec<4, f64>; // 4 x f64 vector value +def v8f64 : VTVec<8, f64>; // 8 x f64 vector value +def v16f64 : VTVec<16, f64>; // 16 x f64 vector value +def v32f64 : VTVec<32, f64>; // 32 x f64 vector value +def v64f64 : VTVec<64, f64>; // 64 x f64 vector value +def v128f64 : VTVec<128, f64>; // 128 x f64 vector value +def v256f64 : VTVec<256, f64>; // 256 x f64 vector value + +def nxv1i1 : VTScalableVec<1, i1>; // n x 1 x i1 vector value +def nxv2i1 : VTScalableVec<2, i1>; // n x 2 x i1 vector value +def nxv4i1 : VTScalableVec<4, i1>; // n x 4 x i1 vector value +def nxv8i1 : VTScalableVec<8, i1>; // n x 8 x i1 vector value +def nxv16i1 : VTScalableVec<16, i1>; // n x 16 x i1 vector value +def nxv32i1 : VTScalableVec<32, i1>; // n x 32 x i1 vector value +def nxv64i1 : VTScalableVec<64, i1>; // n x 64 x i1 vector value + +def nxv1i8 : VTScalableVec<1, i8>; // n x 1 x i8 vector value +def nxv2i8 : VTScalableVec<2, i8>; // n x 2 x i8 vector value +def nxv4i8 : VTScalableVec<4, i8>; // n x 4 x i8 vector value +def nxv8i8 : VTScalableVec<8, i8>; // n x 8 x i8 vector value +def nxv16i8 : VTScalableVec<16, i8>; // n x 16 x i8 vector value +def nxv32i8 : VTScalableVec<32, i8>; // n x 32 x i8 vector value +def nxv64i8 : VTScalableVec<64, i8>; // n x 64 x i8 vector value + +def nxv1i16 : VTScalableVec<1, i16>; // n x 1 x i16 vector value +def nxv2i16 : VTScalableVec<2, i16>; // n x 2 x i16 vector value +def nxv4i16 : VTScalableVec<4, i16>; // n x 4 x i16 vector value +def nxv8i16 : VTScalableVec<8, i16>; // n x 8 x i16 vector value +def nxv16i16 : VTScalableVec<16, i16>; // n x 16 x i16 vector value +def nxv32i16 : VTScalableVec<32, i16>; // n x 32 x i16 vector value + +def nxv1i32 : VTScalableVec<1, i32>; // n x 1 x i32 vector value +def nxv2i32 : VTScalableVec<2, i32>; // n x 2 x i32 vector value +def nxv4i32 : VTScalableVec<4, i32>; // n x 4 x i32 vector value +def nxv8i32 : VTScalableVec<8, i32>; // n x 8 x i32 vector value +def nxv16i32 : VTScalableVec<16, i32>; // n x 16 x i32 vector value +def nxv32i32 : VTScalableVec<32, i32>; // n x 32 x i32 vector value + +def nxv1i64 : VTScalableVec<1, i64>; // n x 1 x i64 vector value +def nxv2i64 : VTScalableVec<2, i64>; // n x 2 x i64 vector value +def nxv4i64 : VTScalableVec<4, i64>; // n x 4 x i64 vector value +def nxv8i64 : VTScalableVec<8, i64>; // n x 8 x i64 vector value +def nxv16i64 : VTScalableVec<16, i64>; // n x 16 x i64 vector value +def nxv32i64 : VTScalableVec<32, i64>; // n x 32 x i64 vector value + +def nxv1f16 : VTScalableVec<1, f16>; // n x 1 x f16 vector value +def nxv2f16 : VTScalableVec<2, f16>; // n x 2 x f16 vector value +def nxv4f16 : VTScalableVec<4, f16>; // n x 4 x f16 vector value +def nxv8f16 : VTScalableVec<8, f16>; // n x 8 x f16 vector value +def nxv16f16 : VTScalableVec<16, f16>; // n x 16 x f16 vector value +def nxv32f16 : VTScalableVec<32, f16>; // n x 32 x f16 vector value + +def nxv1bf16 : VTScalableVec<1, bf16>; // n x 1 x bf16 vector value +def nxv2bf16 : VTScalableVec<2, bf16>; // n x 2 x bf16 vector value +def nxv4bf16 : VTScalableVec<4, bf16>; // n x 4 x bf16 vector value +def nxv8bf16 : VTScalableVec<8, bf16>; // n x 8 x bf16 vector value +def nxv16bf16 : VTScalableVec<16, bf16>; // n x 16 x bf16 vector value +def nxv32bf16 : VTScalableVec<32, bf16>; // n x 32 x bf16 vector value + +def nxv1f32 : VTScalableVec<1, f32>; // n x 1 x f32 vector value +def nxv2f32 : VTScalableVec<2, f32>; // n x 2 x f32 vector value +def nxv4f32 : VTScalableVec<4, f32>; // n x 4 x f32 vector value +def nxv8f32 : VTScalableVec<8, f32>; // n x 8 x f32 vector value +def nxv16f32 : VTScalableVec<16, f32>; // n x 16 x f32 vector value + +def nxv1f64 : VTScalableVec<1, f64>; // n x 1 x f64 vector value +def nxv2f64 : VTScalableVec<2, f64>; // n x 2 x f64 vector value +def nxv4f64 : VTScalableVec<4, f64>; // n x 4 x f64 vector value +def nxv8f64 : VTScalableVec<8, f64>; // n x 8 x f64 vector value // Sz = NF * MinNumElts * 8(bits) -def riscv_nxv1i8x2 : VTVecTup<16, 2, i8, 210>; // RISCV vector tuple(min_num_elts=1, nf=2) -def riscv_nxv1i8x3 : VTVecTup<24, 3, i8, 211>; // RISCV vector tuple(min_num_elts=1, nf=3) -def riscv_nxv1i8x4 : VTVecTup<32, 4, i8, 212>; // RISCV vector tuple(min_num_elts=1, nf=4) -def riscv_nxv1i8x5 : VTVecTup<40, 5, i8, 213>; // RISCV vector tuple(min_num_elts=1, nf=5) -def riscv_nxv1i8x6 : VTVecTup<48, 6, i8, 214>; // RISCV vector tuple(min_num_elts=1, nf=6) -def riscv_nxv1i8x7 : VTVecTup<56, 7, i8, 215>; // RISCV vector tuple(min_num_elts=1, nf=7) -def riscv_nxv1i8x8 : VTVecTup<64, 8, i8, 216>; // RISCV vector tuple(min_num_elts=1, nf=8) -def riscv_nxv2i8x2 : VTVecTup<32, 2, i8, 217>; // RISCV vector tuple(min_num_elts=2, nf=2) -def riscv_nxv2i8x3 : VTVecTup<48, 3, i8, 218>; // RISCV vector tuple(min_num_elts=2, nf=3) -def riscv_nxv2i8x4 : VTVecTup<64, 4, i8, 219>; // RISCV vector tuple(min_num_elts=2, nf=4) -def riscv_nxv2i8x5 : VTVecTup<80, 5, i8, 220>; // RISCV vector tuple(min_num_elts=2, nf=5) -def riscv_nxv2i8x6 : VTVecTup<96, 6, i8, 221>; // RISCV vector tuple(min_num_elts=2, nf=6) -def riscv_nxv2i8x7 : VTVecTup<112, 7, i8, 222>; // RISCV vector tuple(min_num_elts=2, nf=7) -def riscv_nxv2i8x8 : VTVecTup<128, 8, i8, 223>; // RISCV vector tuple(min_num_elts=2, nf=8) -def riscv_nxv4i8x2 : VTVecTup<64, 2, i8, 224>; // RISCV vector tuple(min_num_elts=4, nf=2) -def riscv_nxv4i8x3 : VTVecTup<96, 3, i8, 225>; // RISCV vector tuple(min_num_elts=4, nf=3) -def riscv_nxv4i8x4 : VTVecTup<128, 4, i8, 226>; // RISCV vector tuple(min_num_elts=4, nf=4) -def riscv_nxv4i8x5 : VTVecTup<160, 5, i8, 227>; // RISCV vector tuple(min_num_elts=4, nf=5) -def riscv_nxv4i8x6 : VTVecTup<192, 6, i8, 228>; // RISCV vector tuple(min_num_elts=4, nf=6) -def riscv_nxv4i8x7 : VTVecTup<224, 7, i8, 229>; // RISCV vector tuple(min_num_elts=4, nf=7) -def riscv_nxv4i8x8 : VTVecTup<256, 8, i8, 230>; // RISCV vector tuple(min_num_elts=4, nf=8) -def riscv_nxv8i8x2 : VTVecTup<128, 2, i8, 231>; // RISCV vector tuple(min_num_elts=8, nf=2) -def riscv_nxv8i8x3 : VTVecTup<192, 3, i8, 232>; // RISCV vector tuple(min_num_elts=8, nf=3) -def riscv_nxv8i8x4 : VTVecTup<256, 4, i8, 233>; // RISCV vector tuple(min_num_elts=8, nf=4) -def riscv_nxv8i8x5 : VTVecTup<320, 5, i8, 234>; // RISCV vector tuple(min_num_elts=8, nf=5) -def riscv_nxv8i8x6 : VTVecTup<384, 6, i8, 235>; // RISCV vector tuple(min_num_elts=8, nf=6) -def riscv_nxv8i8x7 : VTVecTup<448, 7, i8, 236>; // RISCV vector tuple(min_num_elts=8, nf=7) -def riscv_nxv8i8x8 : VTVecTup<512, 8, i8, 237>; // RISCV vector tuple(min_num_elts=8, nf=8) -def riscv_nxv16i8x2 : VTVecTup<256, 2, i8, 238>; // RISCV vector tuple(min_num_elts=16, nf=2) -def riscv_nxv16i8x3 : VTVecTup<384, 3, i8, 239>; // RISCV vector tuple(min_num_elts=16, nf=3) -def riscv_nxv16i8x4 : VTVecTup<512, 4, i8, 240>; // RISCV vector tuple(min_num_elts=16, nf=4) -def riscv_nxv32i8x2 : VTVecTup<512, 2, i8, 241>; // RISCV vector tuple(min_num_elts=32, nf=2) - -def x86mmx : ValueType<64, 242>; // X86 MMX value -def Glue : ValueType<0, 243>; // Pre-RA sched glue -def isVoid : ValueType<0, 244>; // Produces no value -def untyped : ValueType<8, 245> { // Produces an untyped value - let LLVMName = "Untyped"; -} -def funcref : ValueType<0, 246>; // WebAssembly's funcref type -def externref : ValueType<0, 247>; // WebAssembly's externref type -def exnref : ValueType<0, 248>; // WebAssembly's exnref type -def x86amx : ValueType<8192, 249>; // X86 AMX value -def i64x8 : ValueType<512, 250>; // 8 Consecutive GPRs (AArch64) +def riscv_nxv1i8x2 : VTVecTup<16, 2, i8>; // RISCV vector tuple(min_num_elts=1, nf=2) +def riscv_nxv1i8x3 : VTVecTup<24, 3, i8>; // RISCV vector tuple(min_num_elts=1, nf=3) +def riscv_nxv1i8x4 : VTVecTup<32, 4, i8>; // RISCV vector tuple(min_num_elts=1, nf=4) +def riscv_nxv1i8x5 : VTVecTup<40, 5, i8>; // RISCV vector tuple(min_num_elts=1, nf=5) +def riscv_nxv1i8x6 : VTVecTup<48, 6, i8>; // RISCV vector tuple(min_num_elts=1, nf=6) +def riscv_nxv1i8x7 : VTVecTup<56, 7, i8>; // RISCV vector tuple(min_num_elts=1, nf=7) +def riscv_nxv1i8x8 : VTVecTup<64, 8, i8>; // RISCV vector tuple(min_num_elts=1, nf=8) +def riscv_nxv2i8x2 : VTVecTup<32, 2, i8>; // RISCV vector tuple(min_num_elts=2, nf=2) +def riscv_nxv2i8x3 : VTVecTup<48, 3, i8>; // RISCV vector tuple(min_num_elts=2, nf=3) +def riscv_nxv2i8x4 : VTVecTup<64, 4, i8>; // RISCV vector tuple(min_num_elts=2, nf=4) +def riscv_nxv2i8x5 : VTVecTup<80, 5, i8>; // RISCV vector tuple(min_num_elts=2, nf=5) +def riscv_nxv2i8x6 : VTVecTup<96, 6, i8>; // RISCV vector tuple(min_num_elts=2, nf=6) +def riscv_nxv2i8x7 : VTVecTup<112, 7, i8>; // RISCV vector tuple(min_num_elts=2, nf=7) +def riscv_nxv2i8x8 : VTVecTup<128, 8, i8>; // RISCV vector tuple(min_num_elts=2, nf=8) +def riscv_nxv4i8x2 : VTVecTup<64, 2, i8>; // RISCV vector tuple(min_num_elts=4, nf=2) +def riscv_nxv4i8x3 : VTVecTup<96, 3, i8>; // RISCV vector tuple(min_num_elts=4, nf=3) +def riscv_nxv4i8x4 : VTVecTup<128, 4, i8>; // RISCV vector tuple(min_num_elts=4, nf=4) +def riscv_nxv4i8x5 : VTVecTup<160, 5, i8>; // RISCV vector tuple(min_num_elts=4, nf=5) +def riscv_nxv4i8x6 : VTVecTup<192, 6, i8>; // RISCV vector tuple(min_num_elts=4, nf=6) +def riscv_nxv4i8x7 : VTVecTup<224, 7, i8>; // RISCV vector tuple(min_num_elts=4, nf=7) +def riscv_nxv4i8x8 : VTVecTup<256, 8, i8>; // RISCV vector tuple(min_num_elts=4, nf=8) +def riscv_nxv8i8x2 : VTVecTup<128, 2, i8>; // RISCV vector tuple(min_num_elts=8, nf=2) +def riscv_nxv8i8x3 : VTVecTup<192, 3, i8>; // RISCV vector tuple(min_num_elts=8, nf=3) +def riscv_nxv8i8x4 : VTVecTup<256, 4, i8>; // RISCV vector tuple(min_num_elts=8, nf=4) +def riscv_nxv8i8x5 : VTVecTup<320, 5, i8>; // RISCV vector tuple(min_num_elts=8, nf=5) +def riscv_nxv8i8x6 : VTVecTup<384, 6, i8>; // RISCV vector tuple(min_num_elts=8, nf=6) +def riscv_nxv8i8x7 : VTVecTup<448, 7, i8>; // RISCV vector tuple(min_num_elts=8, nf=7) +def riscv_nxv8i8x8 : VTVecTup<512, 8, i8>; // RISCV vector tuple(min_num_elts=8, nf=8) +def riscv_nxv16i8x2 : VTVecTup<256, 2, i8>; // RISCV vector tuple(min_num_elts=16, nf=2) +def riscv_nxv16i8x3 : VTVecTup<384, 3, i8>; // RISCV vector tuple(min_num_elts=16, nf=3) +def riscv_nxv16i8x4 : VTVecTup<512, 4, i8>; // RISCV vector tuple(min_num_elts=16, nf=4) +def riscv_nxv32i8x2 : VTVecTup<512, 2, i8>; // RISCV vector tuple(min_num_elts=32, nf=2) + +def x86mmx : ValueType<64>; // X86 MMX value +def Glue : ValueType<0>; // Pre-RA sched glue +def isVoid : ValueType<0>; // Produces no value +def untyped : ValueType<8, "Untyped">; // Produces an untyped value +def funcref : ValueType<0>; // WebAssembly's funcref type +def externref : ValueType<0>; // WebAssembly's externref type +def exnref : ValueType<0>; // WebAssembly's exnref type +def x86amx : ValueType<8192>; // X86 AMX value +def i64x8 : ValueType<512>; // 8 Consecutive GPRs (AArch64) def aarch64svcount - : ValueType<16, 251>; // AArch64 predicate-as-counter -def spirvbuiltin : ValueType<0, 252>; // SPIR-V's builtin type + : ValueType<16>; // AArch64 predicate-as-counter +def spirvbuiltin : ValueType<0>; // SPIR-V's builtin type // AMDGPU buffer fat pointer, buffer rsrc + offset, rewritten before MIR translation. // FIXME: Remove this and the getPointerType() override if MVT::i160 is added. -def amdgpuBufferFatPointer : ValueType<160, 253>; +def amdgpuBufferFatPointer : ValueType<160>; // AMDGPU buffer strided pointer, buffer rsrc + index + offset, doesn't reach MIR. // FIXME: Remove this and the getPointerType() override if MVT::i82 is added. -def amdgpuBufferStridedPointer : ValueType<192, 254>; +def amdgpuBufferStridedPointer : ValueType<192>; -def aarch64mfp8 : ValueType<8, 255>; // 8-bit value in FPR (AArch64) +def aarch64mfp8 : ValueType<8>; // 8-bit value in FPR (AArch64) // CHERI capabilities. Pointer-like values that carry additional metadata // for enforcing safety guarantees on CHERI-enabled targets. -def c64 : VTCheriCapability<64, 256>; // 64-bit CHERI capability value -def c128 : VTCheriCapability<128, 257>; // 128-bit CHERI capability value +def c64 : VTCheriCapability<64>; // 64-bit CHERI capability value +def c128 : VTCheriCapability<128>; // 128-bit CHERI capability value let isNormalValueType = false in { // Pseudo valuetype mapped to the current CHERI capability pointer size. // Should only be used in TableGen. -def cPTR : VTAny<503>; +def cPTR : VTAny; -def token : ValueType<0, 504>; // TokenTy -def MetadataVT : ValueType<0, 505> { // Metadata - let LLVMName = "Metadata"; -} +def token : ValueType<0>; // TokenTy +def MetadataVT : ValueType<0, "Metadata">; // Metadata // Pseudo valuetype to represent "pointer to any address space" // Should only be used in TableGen. -def pAny : VTAny<506>; +def pAny : VTAny; // Pseudo valuetype to represent "vector of any size" // Should only be used in TableGen. -def vAny : VTAny<507>; +def vAny : VTAny; // Pseudo valuetype to represent "float of any format" // Should only be used in TableGen. -def fAny : VTAny<508>; +def fAny : VTAny; // Pseudo valuetype to represent "integer of any bit width" // Should only be used in TableGen. -def iAny : VTAny<509>; +def iAny : VTAny; // Pseudo valuetype mapped to the current pointer size. // Should only be used in TableGen. -def iPTR : ValueType<0, 510>; +def iPTR : ValueType<0>; // Pseudo valuetype to represent "any type of any size". // Should only be used in TableGen. -def Any : VTAny<511>; +def Any : VTAny; } // isNormalValueType = false @@ -414,6 +407,6 @@ def Any : VTAny<511>; /// e.g. def p0 : PtrValueType ; class PtrValueType : - ValueType { + ValueType { int AddrSpace = addrspace; } diff --git a/llvm/include/llvm/CodeGenTypes/MachineValueType.h b/llvm/include/llvm/CodeGenTypes/MachineValueType.h index 69d52e33d900f..08a9c85a213e0 100644 --- a/llvm/include/llvm/CodeGenTypes/MachineValueType.h +++ b/llvm/include/llvm/CodeGenTypes/MachineValueType.h @@ -40,8 +40,7 @@ namespace llvm { // are considered extended value types. INVALID_SIMPLE_VALUE_TYPE = 0, -#define GET_VT_ATTR(Ty, n, sz, Any, Int, FP, Vec, Sc, Tup, NF, NElem, EltTy) \ - Ty = n, +#define GET_VT_ATTR(Ty, sz, Any, Int, FP, Vec, Sc, Tup, NF, NElem, EltTy) Ty, #define GET_VT_RANGES #include "llvm/CodeGen/GenVT.inc" #undef GET_VT_ATTR @@ -187,7 +186,7 @@ namespace llvm { /// Return true if this is an overloaded type for TableGen. bool isOverloaded() const { switch (SimpleTy) { -#define GET_VT_ATTR(Ty, n, sz, Any, Int, FP, Vec, Sc, Tup, NF, NElem, EltTy) \ +#define GET_VT_ATTR(Ty, sz, Any, Int, FP, Vec, Sc, Tup, NF, NElem, EltTy) \ case Ty: \ return Any; #include "llvm/CodeGen/GenVT.inc" @@ -270,7 +269,7 @@ namespace llvm { MVT getVectorElementType() const { assert(SimpleTy >= FIRST_VALUETYPE && SimpleTy <= LAST_VALUETYPE); static constexpr SimpleValueType EltTyTable[] = { -#define GET_VT_ATTR(Ty, N, Sz, Any, Int, FP, Vec, Sc, Tup, NF, NElem, EltTy) \ +#define GET_VT_ATTR(Ty, Sz, Any, Int, FP, Vec, Sc, Tup, NF, NElem, EltTy) \ EltTy, #include "llvm/CodeGen/GenVT.inc" #undef GET_VT_ATTR @@ -284,7 +283,7 @@ namespace llvm { unsigned getVectorMinNumElements() const { assert(SimpleTy >= FIRST_VALUETYPE && SimpleTy <= LAST_VALUETYPE); static constexpr uint16_t NElemTable[] = { -#define GET_VT_ATTR(Ty, N, Sz, Any, Int, FP, Vec, Sc, Tup, NF, NElem, EltTy) \ +#define GET_VT_ATTR(Ty, Sz, Any, Int, FP, Vec, Sc, Tup, NF, NElem, EltTy) \ NElem, #include "llvm/CodeGen/GenVT.inc" #undef GET_VT_ATTR @@ -314,7 +313,7 @@ namespace llvm { /// base size. TypeSize getSizeInBits() const { static constexpr TypeSize SizeTable[] = { -#define GET_VT_ATTR(Ty, N, Sz, Any, Int, FP, Vec, Sc, Tup, NF, NElem, EltTy) \ +#define GET_VT_ATTR(Ty, Sz, Any, Int, FP, Vec, Sc, Tup, NF, NElem, EltTy) \ TypeSize(Sz, Sc || Tup || Ty == aarch64svcount /* FIXME: Not in the td. \ */), #include "llvm/CodeGen/GenVT.inc" @@ -437,7 +436,7 @@ namespace llvm { } static MVT getFloatingPointVT(unsigned BitWidth) { -#define GET_VT_ATTR(Ty, n, sz, Any, Int, FP, Vec, Sc, Tup, NF, NElem, EltTy) \ +#define GET_VT_ATTR(Ty, sz, Any, Int, FP, Vec, Sc, Tup, NF, NElem, EltTy) \ if (FP == 3 && sz == BitWidth) \ return Ty; #include "llvm/CodeGen/GenVT.inc" @@ -447,7 +446,7 @@ namespace llvm { } static MVT getIntegerVT(unsigned BitWidth) { -#define GET_VT_ATTR(Ty, n, sz, Any, Int, FP, Vec, Sc, Tup, NF, NElem, EltTy) \ +#define GET_VT_ATTR(Ty, sz, Any, Int, FP, Vec, Sc, Tup, NF, NElem, EltTy) \ if (Int == 3 && sz == BitWidth) \ return Ty; #include "llvm/CodeGen/GenVT.inc" @@ -477,7 +476,7 @@ namespace llvm { } static MVT getRISCVVectorTupleVT(unsigned Sz, unsigned NFields) { -#define GET_VT_ATTR(Ty, n, sz, Any, Int, FP, Vec, Sc, Tup, NF, nElem, EltTy) \ +#define GET_VT_ATTR(Ty, sz, Any, Int, FP, Vec, Sc, Tup, NF, nElem, EltTy) \ if (Tup && sz == Sz && NF == NFields) \ return Ty; #include "llvm/CodeGen/GenVT.inc" @@ -491,8 +490,7 @@ namespace llvm { assert(isRISCVVectorTuple() && SimpleTy >= FIRST_VALUETYPE && SimpleTy <= LAST_VALUETYPE); static constexpr uint8_t NFTable[] = { -#define GET_VT_ATTR(Ty, N, Sz, Any, Int, FP, Vec, Sc, Tup, NF, NElem, EltTy) \ - NF, +#define GET_VT_ATTR(Ty, Sz, Any, Int, FP, Vec, Sc, Tup, NF, NElem, EltTy) NF, #include "llvm/CodeGen/GenVT.inc" #undef GET_VT_ATTR }; diff --git a/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h b/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h index a7ba79164c471..93412d9d22f8c 100644 --- a/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h @@ -338,9 +338,9 @@ template class WaitingOnGraph { // incorporate NewSNs. std::vector> ReadyNodes, FailedNodes; processReadyOrFailed(ModifiedPendingSNs, ReadyNodes, FailedNodes, - SuperNodeDeps, ElemToPendingSN, FailedSNs); + SuperNodeDeps, FailedSNs, &ElemToPendingSN); processReadyOrFailed(NewSNs, ReadyNodes, FailedNodes, SuperNodeDeps, - ElemToNewSN, FailedSNs); + FailedSNs, nullptr); CoalesceToPendingSNs.coalesce(ModifiedPendingSNs, ElemToPendingSN); CoalesceToPendingSNs.coalesce(NewSNs, ElemToPendingSN); @@ -591,8 +591,11 @@ template class WaitingOnGraph { std::vector> &Ready, std::vector> &Failed, SuperNodeDepsMap &SuperNodeDeps, - ElemToSuperNodeMap &ElemToSNs, - std::vector FailedSNs) { + const std::vector &FailedSNs, + ElemToSuperNodeMap *ElemToSNs) { + + SmallVector ToRemoveFromElemToSNs; + for (size_t I = 0; I != SNs.size();) { auto &SN = SNs[I]; @@ -609,6 +612,8 @@ template class WaitingOnGraph { bool SNReady = SN->Deps.empty(); if (SNReady || SNFailed) { + if (ElemToSNs) + ToRemoveFromElemToSNs.push_back(SN.get()); auto &NodeList = SNFailed ? Failed : Ready; NodeList.push_back(std::move(SN)); std::swap(SN, SNs.back()); @@ -616,6 +621,15 @@ template class WaitingOnGraph { } else ++I; } + + // Update ElemToSNs (if passed) to remove elements pointing at SN. + for (auto *SN : ToRemoveFromElemToSNs) { + for (auto &[Container, Elems] : SN->defs()) { + auto &Row = (*ElemToSNs)[Container]; + for (auto &Elem : Elems) + Row.erase(Elem); + } + } } std::vector> PendingSNs; diff --git a/llvm/include/llvm/Frontend/OpenMP/OMP.td b/llvm/include/llvm/Frontend/OpenMP/OMP.td index ade00e7ca27d5..da70048d28c12 100644 --- a/llvm/include/llvm/Frontend/OpenMP/OMP.td +++ b/llvm/include/llvm/Frontend/OpenMP/OMP.td @@ -490,7 +490,8 @@ def OMP_SCHEDULE_Dynamic : EnumVal<"dynamic", 3, 1> {} def OMP_SCHEDULE_Guided : EnumVal<"guided", 4, 1> {} def OMP_SCHEDULE_Auto : EnumVal<"auto", 5, 1> {} def OMP_SCHEDULE_Runtime : EnumVal<"runtime", 6, 1> {} -def OMP_SCHEDULE_Default : EnumVal<"default", 7, 0> { let isDefault = 1; } +def OMP_SCHEDULE_Distribute : EnumVal<"distribute", 7, 1> {} +def OMP_SCHEDULE_Default : EnumVal<"default", 8, 0> { let isDefault = 1; } def OMPC_Schedule : Clause<[Spelling<"schedule">]> { let clangClass = "OMPScheduleClause"; let flangClass = "OmpScheduleClause"; @@ -501,6 +502,7 @@ def OMPC_Schedule : Clause<[Spelling<"schedule">]> { OMP_SCHEDULE_Guided, OMP_SCHEDULE_Auto, OMP_SCHEDULE_Runtime, + OMP_SCHEDULE_Distribute, OMP_SCHEDULE_Default ]; } diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h index f864a895a1259..b801e212ceced 100644 --- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h +++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h @@ -1116,11 +1116,17 @@ class OpenMPIRBuilder { /// \param NeedsBarrier Indicates whether a barrier must be inserted after /// the loop. /// \param LoopType Type of workshare loop. + /// \param HasDistSchedule Defines if the clause being lowered is + /// dist_schedule as this is handled slightly differently + /// \param DistScheduleSchedType Defines the Schedule Type for the Distribute + /// loop. Defaults to None if no Distribute loop is present. /// /// \returns Point where to insert code after the workshare construct. InsertPointOrErrorTy applyStaticWorkshareLoop( DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, - omp::WorksharingLoopType LoopType, bool NeedsBarrier); + omp::WorksharingLoopType LoopType, bool NeedsBarrier, + bool HasDistSchedule = false, + omp::OMPScheduleType DistScheduleSchedType = omp::OMPScheduleType::None); /// Modifies the canonical loop a statically-scheduled workshare loop with a /// user-specified chunk size. @@ -1133,13 +1139,22 @@ class OpenMPIRBuilder { /// \param NeedsBarrier Indicates whether a barrier must be inserted after the /// loop. /// \param ChunkSize The user-specified chunk size. + /// \param SchedType Optional type of scheduling to be passed to the init + /// function. + /// \param DistScheduleChunkSize The size of dist_shcedule chunk considered + /// as a unit when + /// scheduling. If \p nullptr, defaults to 1. + /// \param DistScheduleSchedType Defines the Schedule Type for the Distribute + /// loop. Defaults to None if no Distribute loop is present. /// /// \returns Point where to insert code after the workshare construct. - InsertPointOrErrorTy applyStaticChunkedWorkshareLoop(DebugLoc DL, - CanonicalLoopInfo *CLI, - InsertPointTy AllocaIP, - bool NeedsBarrier, - Value *ChunkSize); + InsertPointOrErrorTy applyStaticChunkedWorkshareLoop( + DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, + bool NeedsBarrier, Value *ChunkSize, + omp::OMPScheduleType SchedType = + omp::OMPScheduleType::UnorderedStaticChunked, + Value *DistScheduleChunkSize = nullptr, + omp::OMPScheduleType DistScheduleSchedType = omp::OMPScheduleType::None); /// Modifies the canonical loop to be a dynamically-scheduled workshare loop. /// @@ -1218,6 +1233,10 @@ class OpenMPIRBuilder { /// \param LoopType Information about type of loop worksharing. /// It corresponds to type of loop workshare OpenMP pragma. /// \param NoLoop If true, no-loop code is generated. + /// \param HasDistSchedule Defines if the clause being lowered is + /// dist_schedule as this is handled slightly differently + /// + /// \param DistScheduleChunkSize The chunk size for dist_schedule loop /// /// \returns Point where to insert code after the workshare construct. LLVM_ABI InsertPointOrErrorTy applyWorkshareLoop( @@ -1229,7 +1248,8 @@ class OpenMPIRBuilder { bool HasOrderedClause = false, omp::WorksharingLoopType LoopType = omp::WorksharingLoopType::ForStaticLoop, - bool NoLoop = false); + bool NoLoop = false, bool HasDistSchedule = false, + Value *DistScheduleChunkSize = nullptr); /// Tile a loop nest. /// @@ -1446,6 +1466,9 @@ class OpenMPIRBuilder { using ReductionGenAtomicCBTy = std::function; + using ReductionGenDataPtrPtrCBTy = std::function; + /// Enum class for reduction evaluation types scalar, complex and aggregate. enum class EvalKind { Scalar, Complex, Aggregate }; @@ -1454,17 +1477,25 @@ class OpenMPIRBuilder { ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable, EvalKind EvaluationKind, ReductionGenCBTy ReductionGen, ReductionGenClangCBTy ReductionGenClang, - ReductionGenAtomicCBTy AtomicReductionGen) + ReductionGenAtomicCBTy AtomicReductionGen, + ReductionGenDataPtrPtrCBTy DataPtrPtrGen, + Type *ByRefAllocatedType = nullptr, + Type *ByRefElementType = nullptr) : ElementType(ElementType), Variable(Variable), PrivateVariable(PrivateVariable), EvaluationKind(EvaluationKind), ReductionGen(ReductionGen), ReductionGenClang(ReductionGenClang), - AtomicReductionGen(AtomicReductionGen) {} + AtomicReductionGen(AtomicReductionGen), DataPtrPtrGen(DataPtrPtrGen), + ByRefAllocatedType(ByRefAllocatedType), + ByRefElementType(ByRefElementType) {} + ReductionInfo(Value *PrivateVariable) : ElementType(nullptr), Variable(nullptr), PrivateVariable(PrivateVariable), EvaluationKind(EvalKind::Scalar), - ReductionGen(), ReductionGenClang(), AtomicReductionGen() {} + ReductionGen(), ReductionGenClang(), AtomicReductionGen(), + DataPtrPtrGen() {} - /// Reduction element type, must match pointee type of variable. + /// Reduction element type, must match pointee type of variable. For by-ref + /// reductions, this would be just an opaque `ptr`. Type *ElementType; /// Reduction variable of pointer type. @@ -1491,6 +1522,21 @@ class OpenMPIRBuilder { /// reduction. If null, the implementation will use the non-atomic version /// along with the appropriate synchronization mechanisms. ReductionGenAtomicCBTy AtomicReductionGen; + + ReductionGenDataPtrPtrCBTy DataPtrPtrGen; + + /// For by-ref reductions, we need to keep track of 2 extra types that are + /// potentially different: + /// * The allocated type is the type of the storage allocated by the + /// reduction op's `alloc` region. For example, for allocatables and arrays, + /// this type would be the descriptor/box struct. + Type *ByRefAllocatedType; + + /// * The by-ref element type is the type of the actual storage needed for + /// the data of the allocatable or array. For example, an float allocatable + /// of would need some float storage to store intermediate reduction + /// results. + Type *ByRefElementType; }; enum class CopyAction : unsigned { @@ -1535,14 +1581,15 @@ class OpenMPIRBuilder { /// Function to shuffle over the value from the remote lane. void shuffleAndStore(InsertPointTy AllocaIP, Value *SrcAddr, Value *DstAddr, - Type *ElementType, Value *Offset, - Type *ReductionArrayTy); + Type *ElementType, Value *Offset, Type *ReductionArrayTy, + bool IsByRefElem); /// Emit instructions to copy a Reduce list, which contains partially /// aggregated values, in the specified direction. - void emitReductionListCopy( + Error emitReductionListCopy( InsertPointTy AllocaIP, CopyAction Action, Type *ReductionArrayTy, ArrayRef ReductionInfos, Value *SrcBase, Value *DestBase, + ArrayRef IsByRef, CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}); /// Emit a helper that reduces data across two OpenMP threads (lanes) @@ -1616,11 +1663,13 @@ class OpenMPIRBuilder { /// \param ReduceFn The reduction function. /// \param FuncAttrs Optional param to specify any function attributes that /// need to be copied to the new function. + /// \param IsByRef For each reduction clause, whether the reduction is by-ref + /// or not. /// /// \return The ShuffleAndReduce function. - Function *emitShuffleAndReduceFunction( + Expected emitShuffleAndReduceFunction( ArrayRef ReductionInfos, - Function *ReduceFn, AttributeList FuncAttrs); + Function *ReduceFn, AttributeList FuncAttrs, ArrayRef IsByRef); /// Helper function for CreateCanonicalScanLoops to create InputLoop /// in the firstGen and Scan Loop in the SecondGen @@ -1680,12 +1729,14 @@ class OpenMPIRBuilder { /// \param ReductionInfos Array type containing the ReductionOps. /// \param FuncAttrs Optional param to specify any function attributes that /// need to be copied to the new function. + /// \param IsByRef For each reduction clause, whether the reduction is by-ref + /// or not. /// /// \return The InterWarpCopy function. Expected emitInterWarpCopyFunction(const LocationDescription &Loc, ArrayRef ReductionInfos, - AttributeList FuncAttrs); + AttributeList FuncAttrs, ArrayRef IsByRef); /// This function emits a helper that copies all the reduction variables from /// the team into the provided global buffer for the reduction variables. @@ -1779,6 +1830,7 @@ class OpenMPIRBuilder { /// \return The reduction function. Expected createReductionFunction( StringRef ReducerName, ArrayRef ReductionInfos, + ArrayRef IsByRef, ReductionGenCBKind ReductionGenCBKind = ReductionGenCBKind::MLIR, AttributeList FuncAttrs = {}); @@ -2031,11 +2083,13 @@ class OpenMPIRBuilder { /// reduction variables. /// \param AllocaIP An insertion point suitable for allocas usable /// in reductions. - /// \param CodeGenIP An insertion point suitable for code - /// generation. \param ReductionInfos A list of info on each reduction - /// variable. \param IsNoWait Optional flag set if the reduction is - /// marked as - /// nowait. + /// \param CodeGenIP An insertion point suitable for code + /// generation. + /// \param ReductionInfos A list of info on each reduction + /// variable. + /// \param IsNoWait Optional flag set if the reduction is + /// marked as nowait. + /// \param IsByRef For each reduction clause, whether the reduction is by-ref. /// \param IsTeamsReduction Optional flag set if it is a teams /// reduction. /// \param GridValue Optional GPU grid value. @@ -2045,7 +2099,8 @@ class OpenMPIRBuilder { LLVM_ABI InsertPointOrErrorTy createReductionsGPU( const LocationDescription &Loc, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, ArrayRef ReductionInfos, - bool IsNoWait = false, bool IsTeamsReduction = false, + ArrayRef IsByRef, bool IsNoWait = false, + bool IsTeamsReduction = false, ReductionGenCBKind ReductionGenCBKind = ReductionGenCBKind::MLIR, std::optional GridValue = {}, unsigned ReductionBufNum = 1024, Value *SrcLocInfo = nullptr); diff --git a/llvm/include/llvm/IR/Argument.h b/llvm/include/llvm/IR/Argument.h index b9a73b3eb5fc2..6ffc0f8fd5155 100644 --- a/llvm/include/llvm/IR/Argument.h +++ b/llvm/include/llvm/IR/Argument.h @@ -108,12 +108,6 @@ class Argument final : public Value { /// returned. Otherwise, nullptr. LLVM_ABI Type *getPointeeInMemoryValueType() const; - /// If this is a byval or inalloca argument, return its alignment. - /// FIXME: Remove this function once transition to Align is over. - /// Use getParamAlign() instead. - LLVM_ABI LLVM_DEPRECATED("Use getParamAlign() instead", - "getParamAlign") uint64_t getParamAlignment() const; - /// If this is a byval or inalloca argument, return its alignment. LLVM_ABI MaybeAlign getParamAlign() const; diff --git a/llvm/include/llvm/IR/Constants.h b/llvm/include/llvm/IR/Constants.h index e06e6adbc3130..e3f2eb9fa44b8 100644 --- a/llvm/include/llvm/IR/Constants.h +++ b/llvm/include/llvm/IR/Constants.h @@ -1033,10 +1033,10 @@ class ConstantPtrAuth final : public Constant { friend struct ConstantPtrAuthKeyType; friend class Constant; - constexpr static IntrusiveOperandsAllocMarker AllocMarker{4}; + constexpr static IntrusiveOperandsAllocMarker AllocMarker{5}; ConstantPtrAuth(Constant *Ptr, ConstantInt *Key, ConstantInt *Disc, - Constant *AddrDisc); + Constant *AddrDisc, Constant *DeactivationSymbol); void *operator new(size_t s) { return User::operator new(s, AllocMarker); } @@ -1046,7 +1046,8 @@ class ConstantPtrAuth final : public Constant { public: /// Return a pointer signed with the specified parameters. LLVM_ABI static ConstantPtrAuth *get(Constant *Ptr, ConstantInt *Key, - ConstantInt *Disc, Constant *AddrDisc); + ConstantInt *Disc, Constant *AddrDisc, + Constant *DeactivationSymbol); /// Produce a new ptrauth expression signing the given value using /// the same schema as is stored in one. @@ -1078,6 +1079,10 @@ class ConstantPtrAuth final : public Constant { return !getAddrDiscriminator()->isNullValue(); } + Constant *getDeactivationSymbol() const { + return cast(Op<4>().get()); + } + /// A constant value for the address discriminator which has special /// significance to ctors/dtors lowering. Regular address discrimination can't /// be applied for them since uses of llvm.global_{c|d}tors are disallowed @@ -1106,7 +1111,7 @@ class ConstantPtrAuth final : public Constant { template <> struct OperandTraits - : public FixedNumOperandTraits {}; + : public FixedNumOperandTraits {}; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantPtrAuth, Constant) diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td index 77fdb8295faa8..1c86c6815f049 100644 --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -2019,7 +2019,6 @@ def int_aarch64_sve_ext : AdvSIMD_2VectorArgIndexed_Intrinsic<[IntrSpecula def int_aarch64_sve_sel : AdvSIMD_Pred2VectorArg_Intrinsic<[IntrSpeculatable]>; def int_aarch64_sve_lasta : AdvSIMD_SVE_Reduce_Intrinsic<[IntrSpeculatable]>; def int_aarch64_sve_lastb : AdvSIMD_SVE_Reduce_Intrinsic<[IntrSpeculatable]>; -def int_aarch64_sve_rev : AdvSIMD_1VectorArg_Intrinsic<[IntrSpeculatable]>; def int_aarch64_sve_rev_b16 : AdvSIMD_SVE_2SVBoolArg_Intrinsic<[IntrSpeculatable]>; def int_aarch64_sve_rev_b32 : AdvSIMD_SVE_2SVBoolArg_Intrinsic<[IntrSpeculatable]>; def int_aarch64_sve_rev_b64 : AdvSIMD_SVE_2SVBoolArg_Intrinsic<[IntrSpeculatable]>; diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXCV.td b/llvm/include/llvm/IR/IntrinsicsRISCVXCV.td index 9f6a9964903ae..465665c838bae 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCVXCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCVXCV.td @@ -90,4 +90,8 @@ let TargetPrefix = "riscv" in { def int_riscv_cv_mac_machhuRN : ScalarCoreVMacGprGprGprImmIntrinsic; def int_riscv_cv_mac_macsRN : ScalarCoreVMacGprGprGprImmIntrinsic; def int_riscv_cv_mac_machhsRN : ScalarCoreVMacGprGprGprImmIntrinsic; + + def int_riscv_cv_elw_elw + : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], + [IntrReadMem, IntrArgMemOnly, IntrHasSideEffects]>; } // TargetPrefix = "riscv" diff --git a/llvm/include/llvm/IR/LLVMContext.h b/llvm/include/llvm/IR/LLVMContext.h index 5972dcb637dfa..d938f4609742b 100644 --- a/llvm/include/llvm/IR/LLVMContext.h +++ b/llvm/include/llvm/IR/LLVMContext.h @@ -98,7 +98,8 @@ class LLVMContext { OB_kcfi = 8, // "kcfi" OB_convergencectrl = 9, // "convergencectrl" OB_align = 10, // "align" - OB_LastBundleID = OB_align // Marker for last bundle ID + OB_deactivation_symbol = 11, // "deactivation-symbol" + OB_LastBundleID = OB_deactivation_symbol }; /// getMDKindID - Return a unique non-zero ID for the specified metadata kind. diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.h b/llvm/include/llvm/IR/RuntimeLibcalls.h index cf96547063cd0..41fe448382992 100644 --- a/llvm/include/llvm/IR/RuntimeLibcalls.h +++ b/llvm/include/llvm/IR/RuntimeLibcalls.h @@ -211,6 +211,16 @@ struct RuntimeLibcallsInfo { return true; } + static bool darwinHasMemsetPattern(const Triple &TT) { + // memset_pattern{4,8,16} is only available on iOS 3.0 and Mac OS X 10.5 and + // later. All versions of watchOS support it. + if (TT.isMacOSX()) + return !TT.isMacOSXVersionLT(10, 5); + if (TT.isiOS()) + return !TT.isOSVersionLT(3, 0); + return TT.isWatchOS(); + } + static bool hasAEABILibcalls(const Triple &TT) { return TT.isTargetAEABI() || TT.isTargetGNUAEABI() || TT.isTargetMuslAEABI() || TT.isOSFuchsia() || TT.isAndroid(); diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.td b/llvm/include/llvm/IR/RuntimeLibcalls.td index 11e6127e0741d..09e33d7f89e8a 100644 --- a/llvm/include/llvm/IR/RuntimeLibcalls.td +++ b/llvm/include/llvm/IR/RuntimeLibcalls.td @@ -24,6 +24,7 @@ def isNotOSWindows : RuntimeLibcallPredicate<"!TT.isOSWindows()">; def isNotOSLinux : RuntimeLibcallPredicate<[{!TT.isOSLinux()}]>; def isNotOSMSVCRT : RuntimeLibcallPredicate<"!TT.isOSMSVCRT()">; def isPS : RuntimeLibcallPredicate<"TT.isPS()">; +def isMacOSX : RuntimeLibcallPredicate<[{TT.isMacOSX()}]>; def isNotOSWindowsOrIsCygwinMinGW : RuntimeLibcallPredicate<"!TT.isOSWindows() || TT.isOSCygMing()">; def isWindowsMSVCEnvironment : RuntimeLibcallPredicate< @@ -50,6 +51,7 @@ def isWindowsMSVCOrItaniumEnvironment : RuntimeLibcallPredicate< def isGNUEnvironment : RuntimeLibcallPredicate<"TT.isGNUEnvironment()">; def darwinHasSinCosStret : RuntimeLibcallPredicate<"darwinHasSinCosStret(TT)">; def darwinHasExp10 : RuntimeLibcallPredicate<"darwinHasExp10(TT)">; +def darwinHasMemsetPattern : RuntimeLibcallPredicate<[{darwinHasMemsetPattern(TT)}]>; def hasExp10 : RuntimeLibcallPredicate<[{!TT.isOSDarwin()}]>; @@ -154,6 +156,8 @@ foreach FPTy = ["F32", "F64", "F80", "F128", "PPCF128"] in { def SINCOS_#FPTy : RuntimeLibcall; def REMQUO_#FPTy : RuntimeLibcall; def FDIM_#FPTy : RuntimeLibcall; + + def CABS_#FPTy : RuntimeLibcall; } foreach FPTy = [ "F32", "F64" ] in { @@ -382,7 +386,9 @@ def MEMMOVE : RuntimeLibcall; def MEMMOVE_CHK : RuntimeLibcall; def MEMSET : RuntimeLibcall; def MEMSET_CHK : RuntimeLibcall; +def MALLOC : RuntimeLibcall; def CALLOC : RuntimeLibcall; +def FREE : RuntimeLibcall; def BZERO : RuntimeLibcall; def STRLEN : RuntimeLibcall; @@ -569,6 +575,302 @@ def OBJC_RETAIN_AUTORELEASE : RuntimeLibcall; def OBJC_SYNC_ENTER : RuntimeLibcall; def OBJC_SYNC_EXIT : RuntimeLibcall; +def ABORT : RuntimeLibcall; +def ABS : RuntimeLibcall; +def ACCESS : RuntimeLibcall; +def ALIGNED_ALLOC : RuntimeLibcall; +def ATEXIT : RuntimeLibcall; +def ATOF : RuntimeLibcall; +def ATOI : RuntimeLibcall; +def ATOL : RuntimeLibcall; +def ATOLL : RuntimeLibcall; +def BCMP : RuntimeLibcall; +def BCOPY : RuntimeLibcall; +def CHMOD : RuntimeLibcall; +def CHOWN : RuntimeLibcall; +def CLEARERR : RuntimeLibcall; +def CLOSEDIR : RuntimeLibcall; +def CTERMID : RuntimeLibcall; +def CXA_ATEXIT : RuntimeLibcall; +def CXA_GUARD_ABORT : RuntimeLibcall; +def CXA_GUARD_ACQUIRE : RuntimeLibcall; +def CXA_GUARD_RELEASE : RuntimeLibcall; +def CXA_THROW : RuntimeLibcall; +def DUNDER_ISOC99_SCANF : RuntimeLibcall; +def DUNDER_ISOC99_SSCANF : RuntimeLibcall; +def DUNDER_STRDUP : RuntimeLibcall; +def DUNDER_STRNDUP : RuntimeLibcall; +def DUNDER_STRTOK_R : RuntimeLibcall; +def ENUM_VARIANT : RuntimeLibcall; +def EXECL : RuntimeLibcall; +def EXECLE : RuntimeLibcall; +def EXECLP : RuntimeLibcall; +def EXECV : RuntimeLibcall; +def EXECVE : RuntimeLibcall; +def EXECVP : RuntimeLibcall; +def EXECVPE : RuntimeLibcall; +def EXIT : RuntimeLibcall; +def FCLOSE : RuntimeLibcall; +def FDOPEN : RuntimeLibcall; +def FEOF : RuntimeLibcall; +def FERROR : RuntimeLibcall; +def FFLUSH : RuntimeLibcall; +def FFS : RuntimeLibcall; +def FFSL : RuntimeLibcall; +def FFSLL : RuntimeLibcall; +def FGETC : RuntimeLibcall; +def FGETC_UNLOCKED : RuntimeLibcall; +def FGETPOS : RuntimeLibcall; +def FGETS : RuntimeLibcall; +def FGETS_UNLOCKED : RuntimeLibcall; +def FILENO : RuntimeLibcall; +def FIPRINTF : RuntimeLibcall; +def FLOCKFILE : RuntimeLibcall; +def FLS : RuntimeLibcall; +def FLSL : RuntimeLibcall; +def FLSLL : RuntimeLibcall; +def FOPEN : RuntimeLibcall; +def FOPEN64 : RuntimeLibcall; +def FORK : RuntimeLibcall; +def FPRINTF : RuntimeLibcall; +def FPUTC : RuntimeLibcall; +def FPUTC_UNLOCKED : RuntimeLibcall; +def FPUTS : RuntimeLibcall; +def FPUTS_UNLOCKED : RuntimeLibcall; +def FREAD : RuntimeLibcall; +def FREAD_UNLOCKED : RuntimeLibcall; +def FSCANF : RuntimeLibcall; +def FSEEK : RuntimeLibcall; +def FSEEKO : RuntimeLibcall; +def FSEEKO64 : RuntimeLibcall; +def FSETPOS : RuntimeLibcall; +def FSTAT : RuntimeLibcall; +def FSTAT64 : RuntimeLibcall; +def FSTATVFS : RuntimeLibcall; +def FSTATVFS64 : RuntimeLibcall; +def FTELL : RuntimeLibcall; +def FTELLO : RuntimeLibcall; +def FTELLO64 : RuntimeLibcall; +def FTRYLOCKFILE : RuntimeLibcall; +def FUNLOCKFILE : RuntimeLibcall; +def FWRITE : RuntimeLibcall; +def FWRITE_UNLOCKED : RuntimeLibcall; +def GETC : RuntimeLibcall; +def GETCHAR : RuntimeLibcall; +def GETCHAR_UNLOCKED : RuntimeLibcall; +def GETC_UNLOCKED : RuntimeLibcall; +def GETENV : RuntimeLibcall; +def GETITIMER : RuntimeLibcall; +def GETLOGIN_R : RuntimeLibcall; +def GETPWNAM : RuntimeLibcall; +def GETS : RuntimeLibcall; +def GETTIMEOFDAY : RuntimeLibcall; +def HTONL : RuntimeLibcall; +def HTONS : RuntimeLibcall; +def IPRINTF : RuntimeLibcall; +def ISASCII : RuntimeLibcall; +def ISDIGIT : RuntimeLibcall; +def LABS : RuntimeLibcall; +def LCHOWN : RuntimeLibcall; +def LLABS : RuntimeLibcall; +def LSTAT : RuntimeLibcall; +def LSTAT64 : RuntimeLibcall; +def MEMALIGN : RuntimeLibcall; +def MEMCCPY : RuntimeLibcall; +def MEMCCPY_CHK : RuntimeLibcall; +def MEMCHR : RuntimeLibcall; +def MEMPCPY : RuntimeLibcall; +def MEMPCPY_CHK : RuntimeLibcall; +def MEMRCHR : RuntimeLibcall; +def MEMSET_PATTERN16 : RuntimeLibcall; +def MEMSET_PATTERN4 : RuntimeLibcall; +def MEMSET_PATTERN8 : RuntimeLibcall; +def MKDIR : RuntimeLibcall; +def MKTIME : RuntimeLibcall; +def MSVC_DELETE_ARRAY_PTR32 : RuntimeLibcall; +def MSVC_DELETE_ARRAY_PTR32_INT : RuntimeLibcall; +def MSVC_DELETE_ARRAY_PTR32_NOTHROW : RuntimeLibcall; +def MSVC_DELETE_ARRAY_PTR64 : RuntimeLibcall; +def MSVC_DELETE_ARRAY_PTR64_LONGLONG : RuntimeLibcall; +def MSVC_DELETE_ARRAY_PTR64_NOTHROW : RuntimeLibcall; +def MSVC_DELETE_PTR32 : RuntimeLibcall; +def MSVC_DELETE_PTR32_INT : RuntimeLibcall; +def MSVC_DELETE_PTR32_NOTHROW : RuntimeLibcall; +def MSVC_DELETE_PTR64 : RuntimeLibcall; +def MSVC_DELETE_PTR64_LONGLONG : RuntimeLibcall; +def MSVC_DELETE_PTR64_NOTHROW : RuntimeLibcall; +def MSVC_NEW_ARRAY_INT : RuntimeLibcall; +def MSVC_NEW_ARRAY_INT_NOTHROW : RuntimeLibcall; +def MSVC_NEW_ARRAY_LONGLONG : RuntimeLibcall; +def MSVC_NEW_ARRAY_LONGLONG_NOTHROW : RuntimeLibcall; +def MSVC_NEW_INT : RuntimeLibcall; +def MSVC_NEW_INT_NOTHROW : RuntimeLibcall; +def MSVC_NEW_LONGLONG : RuntimeLibcall; +def MSVC_NEW_LONGLONG_NOTHROW : RuntimeLibcall; +def NTOHL : RuntimeLibcall; +def NTOHS : RuntimeLibcall; +def OPEN : RuntimeLibcall; +def OPEN64 : RuntimeLibcall; +def OPENDIR : RuntimeLibcall; +def PCLOSE : RuntimeLibcall; +def PERROR : RuntimeLibcall; +def POPEN : RuntimeLibcall; +def POSIX_MEMALIGN : RuntimeLibcall; +def PREAD : RuntimeLibcall; +def PRINTF : RuntimeLibcall; +def PUTC : RuntimeLibcall; +def PUTCHAR : RuntimeLibcall; +def PUTCHAR_UNLOCKED : RuntimeLibcall; +def PUTC_UNLOCKED : RuntimeLibcall; +def PUTS : RuntimeLibcall; +def PVALLOC : RuntimeLibcall; +def PWRITE : RuntimeLibcall; +def QSORT : RuntimeLibcall; +def READ : RuntimeLibcall; +def READLINK : RuntimeLibcall; +def REALLOC : RuntimeLibcall; +def REALLOCARRAY : RuntimeLibcall; +def REALLOCF : RuntimeLibcall; +def REALPATH : RuntimeLibcall; +def REMOVE : RuntimeLibcall; +def RENAME : RuntimeLibcall; +def REWIND : RuntimeLibcall; +def RMDIR : RuntimeLibcall; +def SCANF : RuntimeLibcall; +def SETBUF : RuntimeLibcall; +def SETITIMER : RuntimeLibcall; +def SETVBUF : RuntimeLibcall; +def SIPRINTF : RuntimeLibcall; +def SIZE_RETURNING_NEW : RuntimeLibcall; +def SIZE_RETURNING_NEW_ALIGNED : RuntimeLibcall; +def SIZE_RETURNING_NEW_ALIGNED_HOT_COLD : RuntimeLibcall; +def SIZE_RETURNING_NEW_HOT_COLD : RuntimeLibcall; +def SMALL_FPRINTF : RuntimeLibcall; +def SMALL_PRINTF : RuntimeLibcall; +def SMALL_SPRINTF : RuntimeLibcall; +def SNPRINTF : RuntimeLibcall; +def SNPRINTF_CHK : RuntimeLibcall; +def SPRINTF : RuntimeLibcall; +def SPRINTF_CHK : RuntimeLibcall; +def SSCANF : RuntimeLibcall; +def STAT : RuntimeLibcall; +def STAT64 : RuntimeLibcall; +def STATVFS : RuntimeLibcall; +def STATVFS64 : RuntimeLibcall; +def STPCPY : RuntimeLibcall; +def STPCPY_CHK : RuntimeLibcall; +def STPNCPY : RuntimeLibcall; +def STPNCPY_CHK : RuntimeLibcall; +def STRCASECMP : RuntimeLibcall; +def STRCAT : RuntimeLibcall; +def STRCAT_CHK : RuntimeLibcall; +def STRCHR : RuntimeLibcall; +def STRCMP : RuntimeLibcall; +def STRCOLL : RuntimeLibcall; +def STRCPY : RuntimeLibcall; +def STRCPY_CHK : RuntimeLibcall; +def STRCSPN : RuntimeLibcall; +def STRDUP : RuntimeLibcall; +def STRLCAT : RuntimeLibcall; +def STRLCAT_CHK : RuntimeLibcall; +def STRLCPY : RuntimeLibcall; +def STRLCPY_CHK : RuntimeLibcall; +def STRLEN_CHK : RuntimeLibcall; +def STRNCASECMP : RuntimeLibcall; +def STRNCAT : RuntimeLibcall; +def STRNCAT_CHK : RuntimeLibcall; +def STRNCMP : RuntimeLibcall; +def STRNCPY : RuntimeLibcall; +def STRNCPY_CHK : RuntimeLibcall; +def STRNDUP : RuntimeLibcall; +def STRNLEN : RuntimeLibcall; +def STRPBRK : RuntimeLibcall; +def STRRCHR : RuntimeLibcall; +def STRSPN : RuntimeLibcall; +def STRSTR : RuntimeLibcall; +def STRTOD : RuntimeLibcall; +def STRTOF : RuntimeLibcall; +def STRTOK : RuntimeLibcall; +def STRTOK_R : RuntimeLibcall; +def STRTOL : RuntimeLibcall; +def STRTOLD : RuntimeLibcall; +def STRTOLL : RuntimeLibcall; +def STRTOUL : RuntimeLibcall; +def STRTOULL : RuntimeLibcall; +def STRXFRM : RuntimeLibcall; +def SYSTEM : RuntimeLibcall; +def TERMINATE : RuntimeLibcall; +def TIMES : RuntimeLibcall; +def TMPFILE : RuntimeLibcall; +def TMPFILE64 : RuntimeLibcall; +def TOASCII : RuntimeLibcall; +def UNAME : RuntimeLibcall; +def UNDER_IO_GETC : RuntimeLibcall; +def UNDER_IO_PUTC : RuntimeLibcall; +def UNGETC : RuntimeLibcall; +def UNLINK : RuntimeLibcall; +def UNSETENV : RuntimeLibcall; +def UTIME : RuntimeLibcall; +def UTIMES : RuntimeLibcall; +def VALLOC : RuntimeLibcall; +def VEC_CALLOC : RuntimeLibcall; +def VEC_FREE : RuntimeLibcall; +def VEC_MALLOC : RuntimeLibcall; +def VEC_REALLOC : RuntimeLibcall; +def VFPRINTF : RuntimeLibcall; +def VFSCANF : RuntimeLibcall; +def VPRINTF : RuntimeLibcall; +def VSCANF : RuntimeLibcall; +def VSNPRINTF : RuntimeLibcall; +def VSNPRINTF_CHK : RuntimeLibcall; +def VSPRINTF : RuntimeLibcall; +def VSPRINTF_CHK : RuntimeLibcall; +def VSSCANF : RuntimeLibcall; +def WCSLEN : RuntimeLibcall; +def WRITE : RuntimeLibcall; +def ZDAPV : RuntimeLibcall; +def ZDAPVJ : RuntimeLibcall; +def ZDAPVJST11ALIGN_VAL_T : RuntimeLibcall; +def ZDAPVM : RuntimeLibcall; +def ZDAPVMST11ALIGN_VAL_T : RuntimeLibcall; +def ZDAPVRKST9NOTHROW_T : RuntimeLibcall; +def ZDAPVST11ALIGN_VAL_T : RuntimeLibcall; +def ZDAPVST11ALIGN_VAL_TRKST9NOTHROW_T : RuntimeLibcall; +def ZDLPV : RuntimeLibcall; +def ZDLPVJ : RuntimeLibcall; +def ZDLPVJST11ALIGN_VAL_T : RuntimeLibcall; +def ZDLPVM : RuntimeLibcall; +def ZDLPVMST11ALIGN_VAL_T : RuntimeLibcall; +def ZDLPVRKST9NOTHROW_T : RuntimeLibcall; +def ZDLPVST11ALIGN_VAL_T : RuntimeLibcall; +def ZDLPVST11ALIGN_VAL_TRKST9NOTHROW_T : RuntimeLibcall; +def ZNAJ : RuntimeLibcall; +def ZNAJRKST9NOTHROW_T : RuntimeLibcall; +def ZNAJST11ALIGN_VAL_T : RuntimeLibcall; +def ZNAJST11ALIGN_VAL_TRKST9NOTHROW_T : RuntimeLibcall; +def ZNAM : RuntimeLibcall; +def ZNAM12__HOT_COLD_T : RuntimeLibcall; +def ZNAMRKST9NOTHROW_T : RuntimeLibcall; +def ZNAMRKST9NOTHROW_T12__HOT_COLD_T : RuntimeLibcall; +def ZNAMST11ALIGN_VAL_T : RuntimeLibcall; +def ZNAMST11ALIGN_VAL_T12__HOT_COLD_T : RuntimeLibcall; +def ZNAMST11ALIGN_VAL_TRKST9NOTHROW_T : RuntimeLibcall; +def ZNAMST11ALIGN_VAL_TRKST9NOTHROW_T12__HOT_COLD_T : RuntimeLibcall; +def ZNWJ : RuntimeLibcall; +def ZNWJRKST9NOTHROW_T : RuntimeLibcall; +def ZNWJST11ALIGN_VAL_T : RuntimeLibcall; +def ZNWJST11ALIGN_VAL_TRKST9NOTHROW_T : RuntimeLibcall; +def ZNWM : RuntimeLibcall; +def ZNWM12__HOT_COLD_T : RuntimeLibcall; +def ZNWMRKST9NOTHROW_T : RuntimeLibcall; +def ZNWMRKST9NOTHROW_T12__HOT_COLD_T : RuntimeLibcall; +def ZNWMST11ALIGN_VAL_T : RuntimeLibcall; +def ZNWMST11ALIGN_VAL_T12__HOT_COLD_T : RuntimeLibcall; +def ZNWMST11ALIGN_VAL_TRKST9NOTHROW_T : RuntimeLibcall; +def ZNWMST11ALIGN_VAL_TRKST9NOTHROW_T12__HOT_COLD_T : RuntimeLibcall; +def KMPC_ALLOC_SHARED : RuntimeLibcall; +def KMPC_FREE_SHARED : RuntimeLibcall; + //-------------------------------------------------------------------- // Global variable references //-------------------------------------------------------------------- @@ -1101,8 +1403,11 @@ def __memcpy_chk : RuntimeLibcallImpl; def __memmove_chk : RuntimeLibcallImpl; def __memset_chk : RuntimeLibcallImpl; +def malloc : RuntimeLibcallImpl; + // DSEPass can emit calloc if it finds a pair of malloc/memset def calloc : RuntimeLibcallImpl; +def free : RuntimeLibcallImpl; } // End let IsDefault = true @@ -1115,6 +1420,353 @@ def exp10l_ppcf128 : RuntimeLibcallImpl; // Stack Protector Fail def __stack_chk_fail : RuntimeLibcallImpl; +//-------------------------------------------------------------------- +// Other functions from TargetLibraryInfo +// +// TODO: These need to be organized by library and added to relevant +// systems. +/// +// -------------------------------------------------------------------- + +def __2_YAPAXI_Z : RuntimeLibcallImpl; +def __2_YAPAXIABUnothrow_t_std___Z + : RuntimeLibcallImpl; +def __2_YAPEAX_K_Z : RuntimeLibcallImpl; +def __2_YAPEAX_KAEBUnothrow_t_std___Z + : RuntimeLibcallImpl; +def __3_YAXPAX_Z : RuntimeLibcallImpl; +def __3_YAXPAXABUnothrow_t_std___Z + : RuntimeLibcallImpl; +def __3_YAXPAXI_Z : RuntimeLibcallImpl; +def __3_YAXPEAX_Z : RuntimeLibcallImpl; +def __3_YAXPEAXAEBUnothrow_t_std___Z + : RuntimeLibcallImpl; +def __3_YAXPEAX_K_Z + : RuntimeLibcallImpl; +def ___U_YAPAXI_Z : RuntimeLibcallImpl; +def ___U_YAPAXIABUnothrow_t_std___Z + : RuntimeLibcallImpl; +def ___U_YAPEAX_K_Z + : RuntimeLibcallImpl; +def ___U_YAPEAX_KAEBUnothrow_t_std___Z + : RuntimeLibcallImpl; +def ___V_YAXPAX_Z + : RuntimeLibcallImpl; +def ___V_YAXPAXABUnothrow_t_std___Z + : RuntimeLibcallImpl; +def ___V_YAXPAXI_Z + : RuntimeLibcallImpl; +def ___V_YAXPEAX_Z + : RuntimeLibcallImpl; +def ___V_YAXPEAXAEBUnothrow_t_std___Z + : RuntimeLibcallImpl; +def ___V_YAXPEAX_K_Z + : RuntimeLibcallImpl; +def _IO_getc : RuntimeLibcallImpl; +def _IO_putc : RuntimeLibcallImpl; +def _ZdaPv : RuntimeLibcallImpl; +def _ZdaPvRKSt9nothrow_t : RuntimeLibcallImpl; +def _ZdaPvSt11align_val_t : RuntimeLibcallImpl; +def _ZdaPvSt11align_val_tRKSt9nothrow_t + : RuntimeLibcallImpl; +def _ZdaPvj : RuntimeLibcallImpl; +def _ZdaPvjSt11align_val_t : RuntimeLibcallImpl; +def _ZdaPvm : RuntimeLibcallImpl; +def _ZdaPvmSt11align_val_t : RuntimeLibcallImpl; +def _ZdlPv : RuntimeLibcallImpl; +def _ZdlPvRKSt9nothrow_t : RuntimeLibcallImpl; +def _ZdlPvSt11align_val_t : RuntimeLibcallImpl; +def _ZdlPvSt11align_val_tRKSt9nothrow_t + : RuntimeLibcallImpl; +def _ZdlPvj : RuntimeLibcallImpl; +def _ZdlPvjSt11align_val_t : RuntimeLibcallImpl; +def _ZdlPvm : RuntimeLibcallImpl; +def _ZdlPvmSt11align_val_t : RuntimeLibcallImpl; +def _Znaj : RuntimeLibcallImpl; +def _ZnajRKSt9nothrow_t : RuntimeLibcallImpl; +def _ZnajSt11align_val_t : RuntimeLibcallImpl; +def _ZnajSt11align_val_tRKSt9nothrow_t + : RuntimeLibcallImpl; +def _Znam : RuntimeLibcallImpl; +def _Znam12__hot_cold_t : RuntimeLibcallImpl; +def _ZnamRKSt9nothrow_t : RuntimeLibcallImpl; +def _ZnamRKSt9nothrow_t12__hot_cold_t + : RuntimeLibcallImpl; +def _ZnamSt11align_val_t : RuntimeLibcallImpl; +def _ZnamSt11align_val_t12__hot_cold_t + : RuntimeLibcallImpl; +def _ZnamSt11align_val_tRKSt9nothrow_t + : RuntimeLibcallImpl; +def _ZnamSt11align_val_tRKSt9nothrow_t12__hot_cold_t + : RuntimeLibcallImpl; +def _Znwj : RuntimeLibcallImpl; +def _ZnwjRKSt9nothrow_t : RuntimeLibcallImpl; +def _ZnwjSt11align_val_t : RuntimeLibcallImpl; +def _ZnwjSt11align_val_tRKSt9nothrow_t + : RuntimeLibcallImpl; +def _Znwm : RuntimeLibcallImpl; +def _Znwm12__hot_cold_t : RuntimeLibcallImpl; +def _ZnwmRKSt9nothrow_t : RuntimeLibcallImpl; +def _ZnwmRKSt9nothrow_t12__hot_cold_t + : RuntimeLibcallImpl; +def _ZnwmSt11align_val_t : RuntimeLibcallImpl; +def _ZnwmSt11align_val_t12__hot_cold_t + : RuntimeLibcallImpl; +def _ZnwmSt11align_val_tRKSt9nothrow_t + : RuntimeLibcallImpl; +def _ZnwmSt11align_val_tRKSt9nothrow_t12__hot_cold_t + : RuntimeLibcallImpl; +def __size_returning_new : RuntimeLibcallImpl; +def __size_returning_new_hot_cold + : RuntimeLibcallImpl; +def __size_returning_new_aligned + : RuntimeLibcallImpl; +def __size_returning_new_aligned_hot_cold + : RuntimeLibcallImpl; +def __cxa_atexit : RuntimeLibcallImpl; +def atexit : RuntimeLibcallImpl; +def abort : RuntimeLibcallImpl; +def exit : RuntimeLibcallImpl; +def _Exit : RuntimeLibcallImpl; +def _ZSt9terminatev : RuntimeLibcallImpl; +def __cxa_throw : RuntimeLibcallImpl; +def __cxa_guard_abort : RuntimeLibcallImpl; +def __cxa_guard_acquire : RuntimeLibcallImpl; +def __cxa_guard_release : RuntimeLibcallImpl; +def __isoc99_scanf : RuntimeLibcallImpl; +def __isoc99_sscanf : RuntimeLibcallImpl; +def __kmpc_alloc_shared : RuntimeLibcallImpl; +def __kmpc_free_shared : RuntimeLibcallImpl; +def __memccpy_chk : RuntimeLibcallImpl; +def __mempcpy_chk : RuntimeLibcallImpl; +def __small_fprintf : RuntimeLibcallImpl; +def __small_printf : RuntimeLibcallImpl; +def __small_sprintf : RuntimeLibcallImpl; +def __snprintf_chk : RuntimeLibcallImpl; +def __sprintf_chk : RuntimeLibcallImpl; +def __stpcpy_chk : RuntimeLibcallImpl; +def __stpncpy_chk : RuntimeLibcallImpl; +def __strcat_chk : RuntimeLibcallImpl; +def __strcpy_chk : RuntimeLibcallImpl; +def __strdup : RuntimeLibcallImpl; +def __strlcat_chk : RuntimeLibcallImpl; +def __strlcpy_chk : RuntimeLibcallImpl; +def __strlen_chk : RuntimeLibcallImpl; +def __strncat_chk : RuntimeLibcallImpl; +def __strncpy_chk : RuntimeLibcallImpl; +def __strndup : RuntimeLibcallImpl; +def __strtok_r : RuntimeLibcallImpl; +def __vsnprintf_chk : RuntimeLibcallImpl; +def __vsprintf_chk : RuntimeLibcallImpl; +def abs : RuntimeLibcallImpl; +def access : RuntimeLibcallImpl; +def aligned_alloc : RuntimeLibcallImpl; +def atof : RuntimeLibcallImpl; +def atoi : RuntimeLibcallImpl; +def atol : RuntimeLibcallImpl; +def atoll : RuntimeLibcallImpl; +def bcmp : RuntimeLibcallImpl; +def bcopy : RuntimeLibcallImpl; +def cabs : RuntimeLibcallImpl; +def cabsf : RuntimeLibcallImpl; +defm cabsl : LibmLongDoubleLibCall; +def chmod : RuntimeLibcallImpl; +def chown : RuntimeLibcallImpl; +def clearerr : RuntimeLibcallImpl; +def closedir : RuntimeLibcallImpl; +def ctermid : RuntimeLibcallImpl; +def execl : RuntimeLibcallImpl; +def execle : RuntimeLibcallImpl; +def execlp : RuntimeLibcallImpl; +def execv : RuntimeLibcallImpl; +def execvP : RuntimeLibcallImpl; +def execve : RuntimeLibcallImpl; +def execvp : RuntimeLibcallImpl; +def execvpe : RuntimeLibcallImpl; +def fclose : RuntimeLibcallImpl; +def fdopen : RuntimeLibcallImpl; +def feof : RuntimeLibcallImpl; +def ferror : RuntimeLibcallImpl; +def fflush : RuntimeLibcallImpl; +def ffs : RuntimeLibcallImpl; +def ffsl : RuntimeLibcallImpl; +def ffsll : RuntimeLibcallImpl; +def fgetc : RuntimeLibcallImpl; +def fgetc_unlocked : RuntimeLibcallImpl; +def fgetpos : RuntimeLibcallImpl; +def fgets : RuntimeLibcallImpl; +def fgets_unlocked : RuntimeLibcallImpl; +def fileno : RuntimeLibcallImpl; +def fiprintf : RuntimeLibcallImpl; +def flockfile : RuntimeLibcallImpl; +def fls : RuntimeLibcallImpl; +def flsl : RuntimeLibcallImpl; +def flsll : RuntimeLibcallImpl; +def fopen : RuntimeLibcallImpl; +def fopen64 : RuntimeLibcallImpl; +def fork : RuntimeLibcallImpl; +def fprintf : RuntimeLibcallImpl; +def fputc : RuntimeLibcallImpl; +def fputc_unlocked : RuntimeLibcallImpl; +def fputs : RuntimeLibcallImpl; +def fputs_unlocked : RuntimeLibcallImpl; +def fread : RuntimeLibcallImpl; +def fread_unlocked : RuntimeLibcallImpl; +def fscanf : RuntimeLibcallImpl; +def fseek : RuntimeLibcallImpl; +def fseeko : RuntimeLibcallImpl; +def fseeko64 : RuntimeLibcallImpl; +def fsetpos : RuntimeLibcallImpl; +def fstat : RuntimeLibcallImpl; +def fstat64 : RuntimeLibcallImpl; +def fstatvfs : RuntimeLibcallImpl; +def fstatvfs64 : RuntimeLibcallImpl; +def ftell : RuntimeLibcallImpl; +def ftello : RuntimeLibcallImpl; +def ftello64 : RuntimeLibcallImpl; +def ftrylockfile : RuntimeLibcallImpl; +def funlockfile : RuntimeLibcallImpl; +def fwrite : RuntimeLibcallImpl; +def fwrite_unlocked : RuntimeLibcallImpl; +def getc : RuntimeLibcallImpl; +def getc_unlocked : RuntimeLibcallImpl; +def getchar : RuntimeLibcallImpl; +def getchar_unlocked : RuntimeLibcallImpl; +def getenv : RuntimeLibcallImpl; +def getitimer : RuntimeLibcallImpl; +def getlogin_r : RuntimeLibcallImpl; +def getpwnam : RuntimeLibcallImpl; +def gets : RuntimeLibcallImpl; +def gettimeofday : RuntimeLibcallImpl; +def htonl : RuntimeLibcallImpl; +def htons : RuntimeLibcallImpl; +def iprintf : RuntimeLibcallImpl; +def isascii : RuntimeLibcallImpl; +def isdigit : RuntimeLibcallImpl; +def labs : RuntimeLibcallImpl; +def lchown : RuntimeLibcallImpl; +def llabs : RuntimeLibcallImpl; +def lstat : RuntimeLibcallImpl; +def lstat64 : RuntimeLibcallImpl; +def memalign : RuntimeLibcallImpl; +def memccpy : RuntimeLibcallImpl; +def memchr : RuntimeLibcallImpl; +def memcmp : RuntimeLibcallImpl; +def mempcpy : RuntimeLibcallImpl; +def memrchr : RuntimeLibcallImpl; +def memset_pattern16 : RuntimeLibcallImpl; +def memset_pattern4 : RuntimeLibcallImpl; +def memset_pattern8 : RuntimeLibcallImpl; +def mkdir : RuntimeLibcallImpl; +def mktime : RuntimeLibcallImpl; +def ntohl : RuntimeLibcallImpl; +def ntohs : RuntimeLibcallImpl; +def open : RuntimeLibcallImpl; +def open64 : RuntimeLibcallImpl; +def opendir : RuntimeLibcallImpl; +def pclose : RuntimeLibcallImpl; +def perror : RuntimeLibcallImpl; +def popen : RuntimeLibcallImpl; +def posix_memalign : RuntimeLibcallImpl; +def pread : RuntimeLibcallImpl; +def printf : RuntimeLibcallImpl; +def putc : RuntimeLibcallImpl; +def putc_unlocked : RuntimeLibcallImpl; +def putchar : RuntimeLibcallImpl; +def putchar_unlocked : RuntimeLibcallImpl; +def puts : RuntimeLibcallImpl; +def pvalloc : RuntimeLibcallImpl; +def pwrite : RuntimeLibcallImpl; +def qsort : RuntimeLibcallImpl; +def read : RuntimeLibcallImpl; +def readlink : RuntimeLibcallImpl; +def realloc : RuntimeLibcallImpl; +def reallocf : RuntimeLibcallImpl; +def reallocarray : RuntimeLibcallImpl; +def realpath : RuntimeLibcallImpl; +def remove : RuntimeLibcallImpl; +def rename : RuntimeLibcallImpl; +def rewind : RuntimeLibcallImpl; +def rmdir : RuntimeLibcallImpl; +def scanf : RuntimeLibcallImpl; +def setbuf : RuntimeLibcallImpl; +def setitimer : RuntimeLibcallImpl; +def setvbuf : RuntimeLibcallImpl; +def siprintf : RuntimeLibcallImpl; +def snprintf : RuntimeLibcallImpl; +def sprintf : RuntimeLibcallImpl; +def sscanf : RuntimeLibcallImpl; +def stat : RuntimeLibcallImpl; +def stat64 : RuntimeLibcallImpl; +def statvfs : RuntimeLibcallImpl; +def statvfs64 : RuntimeLibcallImpl; +def stpcpy : RuntimeLibcallImpl; +def stpncpy : RuntimeLibcallImpl; +def strcasecmp : RuntimeLibcallImpl; +def strcat : RuntimeLibcallImpl; +def strchr : RuntimeLibcallImpl; +def strcmp : RuntimeLibcallImpl; +def strcoll : RuntimeLibcallImpl; +def strcpy : RuntimeLibcallImpl; +def strcspn : RuntimeLibcallImpl; +def strdup : RuntimeLibcallImpl; +def strlcat : RuntimeLibcallImpl; +def strlcpy : RuntimeLibcallImpl; +def strlen : RuntimeLibcallImpl; +def strncasecmp : RuntimeLibcallImpl; +def strncat : RuntimeLibcallImpl; +def strncmp : RuntimeLibcallImpl; +def strncpy : RuntimeLibcallImpl; +def strndup : RuntimeLibcallImpl; +def strnlen : RuntimeLibcallImpl; +def strpbrk : RuntimeLibcallImpl; +def strrchr : RuntimeLibcallImpl; +def strspn : RuntimeLibcallImpl; +def strstr : RuntimeLibcallImpl; +def strtod : RuntimeLibcallImpl; +def strtof : RuntimeLibcallImpl; +def strtok : RuntimeLibcallImpl; +def strtok_r : RuntimeLibcallImpl; +def strtol : RuntimeLibcallImpl; +def strtold : RuntimeLibcallImpl; +def strtoll : RuntimeLibcallImpl; +def strtoul : RuntimeLibcallImpl; +def strtoull : RuntimeLibcallImpl; +def strxfrm : RuntimeLibcallImpl; +def system : RuntimeLibcallImpl; +def times : RuntimeLibcallImpl; +def tmpfile : RuntimeLibcallImpl; +def tmpfile64 : RuntimeLibcallImpl; +def toascii : RuntimeLibcallImpl; +def uname : RuntimeLibcallImpl; +def ungetc : RuntimeLibcallImpl; +def unlink : RuntimeLibcallImpl; +def unsetenv : RuntimeLibcallImpl; +def utime : RuntimeLibcallImpl; +def utimes : RuntimeLibcallImpl; +def valloc : RuntimeLibcallImpl; +def vec_calloc : RuntimeLibcallImpl; +def vec_free : RuntimeLibcallImpl; +def vec_malloc : RuntimeLibcallImpl; +def vec_realloc : RuntimeLibcallImpl; +def vfprintf : RuntimeLibcallImpl; +def vfscanf : RuntimeLibcallImpl; +def vprintf : RuntimeLibcallImpl; +def vscanf : RuntimeLibcallImpl; +def vsnprintf : RuntimeLibcallImpl; +def vsprintf : RuntimeLibcallImpl; +def vsscanf : RuntimeLibcallImpl; +def wcslen : RuntimeLibcallImpl; +def write : RuntimeLibcallImpl; + //-------------------------------------------------------------------- // compiler-rt/libgcc but 64-bit only, not available by default //-------------------------------------------------------------------- @@ -1326,6 +1978,15 @@ defvar DarwinSinCosStret = LibcallImpls<(add __sincosf_stret, __sincos_stret, darwinHasSinCosStret>; defvar DarwinExp10 = LibcallImpls<(add __exp10f, __exp10), darwinHasExp10>; +defvar DarwinMemsetPattern = LibcallImpls<(add memset_pattern4, + memset_pattern8, + memset_pattern16), + darwinHasMemsetPattern>; + +defvar MacOSUnlockedIO = LibcallImpls<(add + getc_unlocked, getchar_unlocked, putc_unlocked, putchar_unlocked), + isMacOSX>; + defvar SecurityCheckCookieIfWinMSVC = LibcallImpls<(add __security_check_cookie, __security_cookie), isWindowsMSVCOrItaniumEnvironment>; @@ -1483,7 +2144,8 @@ def AArch64SystemLibrary : SystemRuntimeLibrary< AArch64LibcallImpls, LibcallImpls<(add Int128RTLibcalls), isAArch64_ILP64>, LibcallImpls<(add bzero), isOSDarwin>, - DarwinExp10, DarwinSinCosStret, + DarwinExp10, DarwinSinCosStret, DarwinMemsetPattern, + MacOSUnlockedIO, LibmHasSinCosF32, LibmHasSinCosF64, LibmHasSinCosF128, DefaultLibmExp10, DefaultStackProtector, @@ -1953,7 +2615,7 @@ def ARMSystemLibrary WindowARMFPIntCasts, SecurityCheckCookieIfWinMSVC, AEABIDivRemCalls, - DarwinSinCosStret, DarwinExp10, + DarwinSinCosStret, DarwinExp10, DarwinMemsetPattern, LibmHasSinCosF32, LibmHasSinCosF64, LibmHasSinCosF128, DefaultLibmExp10, @@ -2638,7 +3300,7 @@ defvar MemChkLibcalls = [__memcpy_chk, __memset_chk, __memmove_chk]; defvar X86CommonLibcalls = (add (sub WinDefaultLibcallImpls, WindowsDivRemMulLibcallOverrides, MemChkLibcalls), - DarwinSinCosStret, DarwinExp10, + DarwinSinCosStret, DarwinExp10, DarwinMemsetPattern, MacOSUnlockedIO, X86_F128_Libcalls, LibmHasSinCosF80, // FIXME: Depends on long double SinCosF32F64Libcalls, @@ -2687,6 +3349,7 @@ def XCoreSystemLibrary (add DefaultRuntimeLibcallImpls, exp10f, exp10, exp10l_f128, __memcpy_4, + iprintf, siprintf, fiprintf, LibcallImpls<(add LibmF128Libcalls, LibmF128FiniteLibcalls), isGNUEnvironment> )>; @@ -2795,6 +3458,7 @@ def SystemZZOSSystemLibrary def emscripten_return_address : RuntimeLibcallImpl; def isWasm : RuntimeLibcallPredicate<"TT.isWasm()">; +def isOSEmscripten : RuntimeLibcallPredicate<[{TT.isOSEmscripten()}]>; // Define the emscripten name for return address helper. // TODO: when implementing other Wasm backends, make this generic or only do @@ -2806,6 +3470,9 @@ def WasmSystemLibrary exp10f, exp10, _Unwind_CallPersonality, emscripten_return_address, + LibcallImpls<(add __small_printf, + __small_sprintf, + __small_fprintf), isOSEmscripten>, __stack_chk_fail, __stack_chk_guard)>; //===----------------------------------------------------------------------===// diff --git a/llvm/include/llvm/MC/MCInstrDesc.h b/llvm/include/llvm/MC/MCInstrDesc.h index c2f15b81da02c..5722213347d51 100644 --- a/llvm/include/llvm/MC/MCInstrDesc.h +++ b/llvm/include/llvm/MC/MCInstrDesc.h @@ -49,8 +49,7 @@ enum OperandConstraint { /// private, all access should go through the MCOperandInfo accessors. /// See the accessors for a description of what these are. enum OperandFlags { - LookupPtrRegClass = 0, - LookupRegClassByHwMode, + LookupRegClassByHwMode = 0, Predicate, OptionalDef, BranchTarget @@ -90,9 +89,6 @@ class MCOperandInfo { /// operand is a register. If LookupRegClassByHwMode is set, then this is an /// index into a table in TargetInstrInfo or MCInstrInfo which contains the /// real register class ID. - /// - /// If isLookupPtrRegClass is set, then this is an index that is passed to - /// TargetRegisterInfo::getPointerRegClass(x) to get a dynamic register class. int16_t RegClass; /// These are flags from the MCOI::OperandFlags enum. @@ -104,13 +100,6 @@ class MCOperandInfo { /// Operand constraints (see OperandConstraint enum). uint16_t Constraints; - /// Set if this operand is a pointer value and it requires a callback - /// to look up its register class. - // TODO: Deprecated in favor of isLookupRegClassByHwMode - bool isLookupPtrRegClass() const { - return Flags & (1 << MCOI::LookupPtrRegClass); - } - /// Set if this operand is a value that requires the current hwmode to look up /// its register class. bool isLookupRegClassByHwMode() const { diff --git a/llvm/include/llvm/MC/MCObjectFileInfo.h b/llvm/include/llvm/MC/MCObjectFileInfo.h index ed7f462c3c598..51b7d73d46036 100644 --- a/llvm/include/llvm/MC/MCObjectFileInfo.h +++ b/llvm/include/llvm/MC/MCObjectFileInfo.h @@ -29,10 +29,6 @@ class MCSection; class LLVM_ABI MCObjectFileInfo { protected: - /// True if target object file supports a weak_definition of constant 0 for an - /// omitted EH frame. - bool SupportsWeakOmittedEHFrame = false; - /// True if the target object file supports emitting a compact unwind section /// without an associated EH frame section. bool SupportsCompactUnwindWithoutEHFrame = false; @@ -260,9 +256,6 @@ class LLVM_ABI MCObjectFileInfo { virtual ~MCObjectFileInfo(); MCContext &getContext() const { return *Ctx; } - bool getSupportsWeakOmittedEHFrame() const { - return SupportsWeakOmittedEHFrame; - } bool getSupportsCompactUnwindWithoutEHFrame() const { return SupportsCompactUnwindWithoutEHFrame; } diff --git a/llvm/include/llvm/MC/MCObjectStreamer.h b/llvm/include/llvm/MC/MCObjectStreamer.h index d9aecd881b51c..3c5a6ce42e4f8 100644 --- a/llvm/include/llvm/MC/MCObjectStreamer.h +++ b/llvm/include/llvm/MC/MCObjectStreamer.h @@ -77,7 +77,7 @@ class MCObjectStreamer : public MCStreamer { /// Object streamers require the integrated assembler. bool isIntegratedAssemblerRequired() const override { return true; } - void emitFrames(MCAsmBackend *MAB); + void emitFrames(); MCSymbol *emitCFILabel() override; void emitCFISections(bool EH, bool Debug, bool SFrame) override; diff --git a/llvm/include/llvm/MC/MCSymbol.h b/llvm/include/llvm/MC/MCSymbol.h index e31d0374baf4a..eef248354b70f 100644 --- a/llvm/include/llvm/MC/MCSymbol.h +++ b/llvm/include/llvm/MC/MCSymbol.h @@ -383,6 +383,8 @@ inline raw_ostream &operator<<(raw_ostream &OS, const MCSymbol &Sym) { return OS; } +bool isRangeRelaxable(const MCSymbol *Begin, const MCSymbol *End); + } // end namespace llvm #endif // LLVM_MC_MCSYMBOL_H diff --git a/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h b/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h index adfdccdb5ab77..168131b43cca8 100644 --- a/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h +++ b/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h @@ -22,6 +22,7 @@ namespace llvm { class MCTargetOptions; enum class EmitDwarfUnwindType; +class StringRef; namespace mc { @@ -62,9 +63,9 @@ LLVM_ABI bool getX86RelaxRelocations(); LLVM_ABI bool getX86Sse2Avx(); -LLVM_ABI std::string getABIName(); +LLVM_ABI StringRef getABIName(); -LLVM_ABI std::string getAsSecureLogFile(); +LLVM_ABI StringRef getAsSecureLogFile(); /// Create this object with static storage to register mc-related command /// line options. diff --git a/llvm/include/llvm/ProfileData/SampleProf.h b/llvm/include/llvm/ProfileData/SampleProf.h index 3dd34aba2d716..05f1b568b0643 100644 --- a/llvm/include/llvm/ProfileData/SampleProf.h +++ b/llvm/include/llvm/ProfileData/SampleProf.h @@ -1072,7 +1072,7 @@ class FunctionSamples { TypeCountMap &TypeCounts = getTypeSamplesAt(Loc); bool Overflowed = false; - for (const auto [Type, Count] : Other) { + for (const auto &[Type, Count] : Other) { FunctionId TypeId(Type); bool RowOverflow = false; TypeCounts[TypeId] = SaturatingMultiplyAdd( diff --git a/llvm/include/llvm/SandboxIR/Constant.h b/llvm/include/llvm/SandboxIR/Constant.h index 6f682a7059d10..2fe923f6c3866 100644 --- a/llvm/include/llvm/SandboxIR/Constant.h +++ b/llvm/include/llvm/SandboxIR/Constant.h @@ -1363,7 +1363,8 @@ class ConstantPtrAuth final : public Constant { public: /// Return a pointer signed with the specified parameters. LLVM_ABI static ConstantPtrAuth *get(Constant *Ptr, ConstantInt *Key, - ConstantInt *Disc, Constant *AddrDisc); + ConstantInt *Disc, Constant *AddrDisc, + Constant *DeactivationSymbol); /// The pointer that is signed in this ptrauth signed pointer. LLVM_ABI Constant *getPointer() const; @@ -1378,6 +1379,8 @@ class ConstantPtrAuth final : public Constant { /// the only global-initializer user of the ptrauth signed pointer. LLVM_ABI Constant *getAddrDiscriminator() const; + Constant *getDeactivationSymbol() const; + /// Whether there is any non-null address discriminator. bool hasAddressDiscriminator() const { return cast(Val)->hasAddressDiscriminator(); diff --git a/llvm/include/llvm/Support/AllocToken.h b/llvm/include/llvm/Support/AllocToken.h index e40d8163a9d7c..1dc3a0cacef24 100644 --- a/llvm/include/llvm/Support/AllocToken.h +++ b/llvm/include/llvm/Support/AllocToken.h @@ -46,6 +46,9 @@ inline constexpr AllocTokenMode DefaultAllocTokenMode = LLVM_ABI std::optional getAllocTokenModeFromString(StringRef Name); +/// Returns the canonical string name for the given AllocTokenMode. +LLVM_ABI StringRef getAllocTokenModeAsString(AllocTokenMode Mode); + /// Metadata about an allocation used to generate a token ID. struct AllocTokenMetadata { SmallString<64> TypeName; diff --git a/llvm/include/llvm/Target/Target.td b/llvm/include/llvm/Target/Target.td index 6abde996e6dc8..ef2ccb0abeb1e 100644 --- a/llvm/include/llvm/Target/Target.td +++ b/llvm/include/llvm/Target/Target.td @@ -61,7 +61,7 @@ class HwModeSelect Ms, int ObjectsLength> { // objects could be used. This is specifically applicable to selection // patterns. class ValueTypeByHwMode Ms, list Ts> - : HwModeSelect, ValueType<0, 0> { + : HwModeSelect, ValueType<0, "INVALID_SIMPLE_VALUE_TYPE"> { // The length of this list must be the same as the length of Ms. list Objects = Ts; } @@ -73,7 +73,7 @@ class ValueTypeByHwMode Ms, list Ts> // patterns. class PtrValueTypeByHwMode : HwModeSelect, - PtrValueType, addrspace> { + PtrValueType, addrspace> { // The length of this list must be the same as the length of Ms. list Objects = scalar.Objects; } @@ -694,6 +694,7 @@ class Instruction : InstructionEncoding { // If so, make sure to override // TargetInstrInfo::getInsertSubregLikeInputs. bit variadicOpsAreDefs = false; // Are variadic operands definitions? + bit supportsDeactivationSymbol = false; // Does the instruction have side effects that are not captured by any // operands of the instruction or other flags? @@ -918,16 +919,23 @@ def slice; def encoder; def decoder; -/// PointerLikeRegClass - Values that are designed to have pointer width are -/// derived from this. TableGen treats the register class as having a symbolic -/// type that it doesn't know, and resolves the actual regclass to use by using -/// the TargetRegisterInfo::getPointerRegClass() hook at codegen time. -/// -/// This is deprecated in favor of RegClassByHwMode. +/// PointerLikeRegClass - Pseudoinstruction operands that are designed +/// to have pointer width are derived from this. This should only be +/// used by StandardPseudoInstruction instructions. No target specific +/// instruction should use this. class PointerLikeRegClass { int RegClassKind = Kind; } +/// ptr_rc definition - Mark this operand as being a pointer value +/// whose register class needs to be defined by the target. Targets +/// should provide instruction definition overrides which substitute +/// the uses of this with the backend defined RegisterClass or +/// RegClassByHwMode to use for pointer virtual registers for a +/// particular opcode (typically by defining a subsitute instruction +/// with RemapPointerOperands). +def ptr_rc : PointerLikeRegClass<0>; + /// RegClassByHwMode - Operands that change the register class based /// on the subtarget are derived from this. TableGen /// treats the register class as having a symbolic kind that it @@ -941,13 +949,6 @@ class RegClassByHwMode Modes, list Objects = RegClasses; } -/// ptr_rc definition - Mark this operand as being a pointer value whose -/// register class is resolved dynamically via a callback to TargetInstrInfo. -/// FIXME: We should probably change this to a class which contain a list of -/// flags. But currently we have but one flag. -// Deprecated, use RegClassByHwMode instead. -def ptr_rc : PointerLikeRegClass<0>; - /// unknown definition - Mark this operand as being of unknown type, causing /// it to be resolved by inference in the context it is used. class unknown_class; diff --git a/llvm/include/llvm/TargetParser/X86TargetParser.def b/llvm/include/llvm/TargetParser/X86TargetParser.def index 826752b088bcd..09592bcea27f4 100644 --- a/llvm/include/llvm/TargetParser/X86TargetParser.def +++ b/llvm/include/llvm/TargetParser/X86TargetParser.def @@ -121,16 +121,14 @@ X86_CPU_SUBTYPE_ALIAS(INTEL_COREI7_PANTHERLAKE, "wildcatlake") #undef X86_CPU_SUBTYPE_ALIAS #undef X86_CPU_SUBTYPE -// This macro is used for cpu types present in compiler-rt/libgcc. The third -// parameter PRIORITY is as required by the attribute 'target' checking. Note -// that not all are supported/prioritized by GCC, so synchronization with GCC's -// implementation may require changing some existing values. -// -// We cannot just re-sort the list though because its order is dictated by the -// order of bits in CodeGenFunction::GetX86CpuSupportsMask. -// We cannot re-adjust the position of X86_FEATURE_COMPAT at the whole list. +// X86_FEATURE_COMPAT is used for cpu types present in compiler-rt/libgcc (i.e. +// types we can multiversion on). The third parameter PRIORITY is required +// by the attribute 'target' checking. + +// Order of bits has to match what's implemented in compiler-rt/libgcc. That's what the +// ABI_VALUE is for - CodeGenFunction::GetX86CpuSupportsMask uses it. #ifndef X86_FEATURE_COMPAT -#define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY) X86_FEATURE(ENUM, STR) +#define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY, ABI_VALUE) X86_FEATURE(ENUM, STR) #endif #ifndef X86_FEATURE @@ -138,139 +136,138 @@ X86_CPU_SUBTYPE_ALIAS(INTEL_COREI7_PANTHERLAKE, "wildcatlake") #endif #ifndef X86_MICROARCH_LEVEL -#define X86_MICROARCH_LEVEL(ENUM, STR, PRIORITY) +#define X86_MICROARCH_LEVEL(ENUM, STR, PRIORITY, ABI_VALUE) #endif -X86_FEATURE_COMPAT(CMOV, "cmov", 0) -X86_FEATURE_COMPAT(MMX, "mmx", 1) -X86_FEATURE_COMPAT(POPCNT, "popcnt", 9) -X86_FEATURE_COMPAT(SSE, "sse", 2) -X86_FEATURE_COMPAT(SSE2, "sse2", 3) -X86_FEATURE_COMPAT(SSE3, "sse3", 4) -X86_FEATURE_COMPAT(SSSE3, "ssse3", 5) -X86_FEATURE_COMPAT(SSE4_1, "sse4.1", 7) -X86_FEATURE_COMPAT(SSE4_2, "sse4.2", 8) -X86_FEATURE_COMPAT(AVX, "avx", 12) -X86_FEATURE_COMPAT(AVX2, "avx2", 18) -X86_FEATURE_COMPAT(SSE4_A, "sse4a", 6) -X86_FEATURE_COMPAT(FMA4, "fma4", 14) -X86_FEATURE_COMPAT(XOP, "xop", 15) -X86_FEATURE_COMPAT(FMA, "fma", 16) -X86_FEATURE_COMPAT(AVX512F, "avx512f", 19) -X86_FEATURE_COMPAT(BMI, "bmi", 13) -X86_FEATURE_COMPAT(BMI2, "bmi2", 17) -X86_FEATURE_COMPAT(AES, "aes", 10) -X86_FEATURE_COMPAT(PCLMUL, "pclmul", 11) -X86_FEATURE_COMPAT(AVX512VL, "avx512vl", 20) -X86_FEATURE_COMPAT(AVX512BW, "avx512bw", 21) -X86_FEATURE_COMPAT(AVX512DQ, "avx512dq", 22) -X86_FEATURE_COMPAT(AVX512CD, "avx512cd", 23) -X86_FEATURE (NF, "nf") -X86_FEATURE (CF, "cf") -X86_FEATURE_COMPAT(AVX512VBMI, "avx512vbmi", 24) -X86_FEATURE_COMPAT(AVX512IFMA, "avx512ifma", 25) -X86_FEATURE_COMPAT(AVX5124VNNIW, "avx5124vnniw", 26) -X86_FEATURE_COMPAT(AVX5124FMAPS, "avx5124fmaps", 27) -X86_FEATURE_COMPAT(AVX512VPOPCNTDQ, "avx512vpopcntdq", 28) -X86_FEATURE_COMPAT(AVX512VBMI2, "avx512vbmi2", 29) -X86_FEATURE_COMPAT(GFNI, "gfni", 30) -X86_FEATURE_COMPAT(VPCLMULQDQ, "vpclmulqdq", 31) -X86_FEATURE_COMPAT(AVX512VNNI, "avx512vnni", 32) -X86_FEATURE_COMPAT(AVX512BITALG, "avx512bitalg", 33) -X86_FEATURE_COMPAT(AVX512BF16, "avx512bf16", 34) -X86_FEATURE_COMPAT(AVX512VP2INTERSECT, "avx512vp2intersect", 35) -// Below Features has some missings comparing to gcc, it's because gcc has some -// not one-to-one mapped in llvm. +// These are the feature we can multiversion on. There are gaps because LLVM +// doesn't support each feature that GCC does. +X86_FEATURE_COMPAT(CMOV, "cmov", 0, 0) +X86_FEATURE_COMPAT(MMX, "mmx", 1, 1) +X86_FEATURE_COMPAT(POPCNT, "popcnt", 9, 2) +X86_FEATURE_COMPAT(SSE, "sse", 2, 3) +X86_FEATURE_COMPAT(SSE2, "sse2", 3, 4) +X86_FEATURE_COMPAT(SSE3, "sse3", 4, 5) +X86_FEATURE_COMPAT(SSSE3, "ssse3", 5, 6) +X86_FEATURE_COMPAT(SSE4_1, "sse4.1", 7, 7) +X86_FEATURE_COMPAT(SSE4_2, "sse4.2", 8, 8) +X86_FEATURE_COMPAT(AVX, "avx", 12, 9) +X86_FEATURE_COMPAT(AVX2, "avx2", 18, 10) +X86_FEATURE_COMPAT(SSE4_A, "sse4a", 6, 11) +X86_FEATURE_COMPAT(FMA4, "fma4", 14, 12) +X86_FEATURE_COMPAT(XOP, "xop", 15, 13) +X86_FEATURE_COMPAT(FMA, "fma", 16, 14) +X86_FEATURE_COMPAT(AVX512F, "avx512f", 19, 15) +X86_FEATURE_COMPAT(BMI, "bmi", 13, 16) +X86_FEATURE_COMPAT(BMI2, "bmi2", 17, 17) +X86_FEATURE_COMPAT(AES, "aes", 10, 18) +X86_FEATURE_COMPAT(PCLMUL, "pclmul", 11, 19) +X86_FEATURE_COMPAT(AVX512VL, "avx512vl", 20, 20) +X86_FEATURE_COMPAT(AVX512BW, "avx512bw", 21, 21) +X86_FEATURE_COMPAT(AVX512DQ, "avx512dq", 22, 22) +X86_FEATURE_COMPAT(AVX512CD, "avx512cd", 23, 23) +X86_FEATURE_COMPAT(AVX512VBMI, "avx512vbmi", 24, 26) +X86_FEATURE_COMPAT(AVX512IFMA, "avx512ifma", 25, 27) +X86_FEATURE_COMPAT(AVX512VPOPCNTDQ, "avx512vpopcntdq", 26, 30) +X86_FEATURE_COMPAT(AVX512VBMI2, "avx512vbmi2", 27, 31) +X86_FEATURE_COMPAT(GFNI, "gfni", 28, 32) +X86_FEATURE_COMPAT(VPCLMULQDQ, "vpclmulqdq", 29, 33) +X86_FEATURE_COMPAT(AVX512VNNI, "avx512vnni", 30, 34) +X86_FEATURE_COMPAT(AVX512BITALG, "avx512bitalg", 31, 35) +X86_FEATURE_COMPAT(AVX512BF16, "avx512bf16", 32, 36) +X86_FEATURE_COMPAT(AVX512VP2INTERSECT, "avx512vp2intersect", 33, 37) +X86_FEATURE_COMPAT(ADX, "adx", 0, 40) +X86_FEATURE_COMPAT(CLDEMOTE, "cldemote", 0, 42) +X86_FEATURE_COMPAT(CLFLUSHOPT, "clflushopt", 0, 43) +X86_FEATURE_COMPAT(CLWB, "clwb", 0, 44) +X86_FEATURE_COMPAT(CLZERO, "clzero", 0, 45) +X86_FEATURE_COMPAT(CMPXCHG16B, "cx16", 0, 46) +X86_FEATURE_COMPAT(ENQCMD, "enqcmd", 0, 48) +X86_FEATURE_COMPAT(F16C, "f16c", 0, 49) +X86_FEATURE_COMPAT(FSGSBASE, "fsgsbase", 0, 50) +X86_FEATURE_COMPAT(SAHF, "sahf", 0, 54) +X86_FEATURE_COMPAT(64BIT, "64bit", 0, 55) // Also known as "LM" +X86_FEATURE_COMPAT(LWP, "lwp", 0, 56) +X86_FEATURE_COMPAT(LZCNT, "lzcnt", 0, 57) +X86_FEATURE_COMPAT(MOVBE, "movbe", 0, 58) +X86_FEATURE_COMPAT(MOVDIR64B, "movdir64b", 0, 59) +X86_FEATURE_COMPAT(MOVDIRI, "movdiri", 0, 60) +X86_FEATURE_COMPAT(MWAITX, "mwaitx", 0, 61) +X86_FEATURE_COMPAT(PCONFIG, "pconfig", 0, 63) +X86_FEATURE_COMPAT(PKU, "pku", 0, 64) +X86_FEATURE_COMPAT(PRFCHW, "prfchw", 0, 66) +X86_FEATURE_COMPAT(PTWRITE, "ptwrite", 0, 67) +X86_FEATURE_COMPAT(RDPID, "rdpid", 0, 68) +X86_FEATURE_COMPAT(RDRND, "rdrnd", 0, 69) +X86_FEATURE_COMPAT(RDSEED, "rdseed", 0, 70) +X86_FEATURE_COMPAT(RTM, "rtm", 0, 71) +X86_FEATURE_COMPAT(SERIALIZE, "serialize", 0, 72) +X86_FEATURE_COMPAT(SGX, "sgx", 0, 73) +X86_FEATURE_COMPAT(SHA, "sha", 0, 74) +X86_FEATURE_COMPAT(SHSTK, "shstk", 0, 75) +X86_FEATURE_COMPAT(TBM, "tbm", 0, 76) +X86_FEATURE_COMPAT(TSXLDTRK, "tsxldtrk", 0, 77) +X86_FEATURE_COMPAT(VAES, "vaes", 0, 78) +X86_FEATURE_COMPAT(WAITPKG, "waitpkg", 0, 79) +X86_FEATURE_COMPAT(WBNOINVD, "wbnoinvd", 0, 80) +X86_FEATURE_COMPAT(XSAVE, "xsave", 0, 81) +X86_FEATURE_COMPAT(XSAVEC, "xsavec", 0, 82) +X86_FEATURE_COMPAT(XSAVEOPT, "xsaveopt", 0, 83) +X86_FEATURE_COMPAT(XSAVES, "xsaves", 0, 84) +X86_FEATURE_COMPAT(AMX_TILE, "amx-tile", 0, 85) +X86_FEATURE_COMPAT(AMX_INT8, "amx-int8", 0, 86) +X86_FEATURE_COMPAT(AMX_BF16, "amx-bf16", 0, 87) +X86_FEATURE_COMPAT(UINTR, "uintr", 0, 88) +X86_FEATURE_COMPAT(HRESET, "hreset", 0, 89) +X86_FEATURE_COMPAT(KL, "kl", 0, 90) +X86_FEATURE_COMPAT(WIDEKL, "widekl", 0, 92) +X86_FEATURE_COMPAT(AVXVNNI, "avxvnni", 0, 93) +X86_FEATURE_COMPAT(AVX512FP16, "avx512fp16", 0, 94) +X86_MICROARCH_LEVEL(X86_64_BASELINE, "x86-64", 0, 95) +X86_MICROARCH_LEVEL(X86_64_V2, "x86-64-v2", 0, 96) +X86_MICROARCH_LEVEL(X86_64_V3, "x86-64-v3", 0, 97) +X86_MICROARCH_LEVEL(X86_64_V4, "x86-64-v4", 0, 98) +X86_FEATURE_COMPAT(AVXIFMA, "avxifma", 0, 99) +X86_FEATURE_COMPAT(AVXVNNIINT8, "avxvnniint8", 0, 100) +X86_FEATURE_COMPAT(AVXNECONVERT, "avxneconvert", 0, 101) +X86_FEATURE_COMPAT(CMPCCXADD, "cmpccxadd", 0, 102) +X86_FEATURE_COMPAT(AMX_FP16, "amx-fp16", 0, 103) +X86_FEATURE_COMPAT(PREFETCHI, "prefetchi", 0, 104) +X86_FEATURE_COMPAT(RAOINT, "raoint", 0, 105) +X86_FEATURE_COMPAT(AMX_COMPLEX, "amx-complex", 0, 106) +X86_FEATURE_COMPAT(AVXVNNIINT16, "avxvnniint16", 0, 107) +X86_FEATURE_COMPAT(SM3, "sm3", 0, 108) +X86_FEATURE_COMPAT(SHA512, "sha512", 0, 109) +X86_FEATURE_COMPAT(SM4, "sm4", 0, 110) +X86_FEATURE_COMPAT(APXF, "apxf", 0, 111) +X86_FEATURE_COMPAT(USERMSR, "usermsr", 0, 112) +X86_FEATURE_COMPAT(AVX10_1, "avx10.1", 34, 114) +X86_FEATURE_COMPAT(AVX10_2, "avx10.2", 35, 116) +X86_FEATURE_COMPAT(AMX_AVX512, "amx-avx512", 0, 117) +X86_FEATURE_COMPAT(AMX_TF32, "amx-tf32", 0, 118) +X86_FEATURE_COMPAT(AMX_FP8, "amx-fp8", 0, 120) +X86_FEATURE_COMPAT(MOVRS, "movrs", 0, 121) +X86_FEATURE_COMPAT(AMX_MOVRS, "amx-movrs", 0, 122) + +// Features we don't multiversion on. +X86_FEATURE (NF, "nf") +X86_FEATURE (CF, "cf") +X86_FEATURE (AVX5124VNNIW, "avx5124vnniw") +X86_FEATURE (AVX5124FMAPS, "avx5124fmaps") +X86_FEATURE (CMPXCHG8B, "cx8") +X86_FEATURE (CRC32, "crc32") +X86_FEATURE (INVPCID, "invpcid") +X86_FEATURE (RDPRU, "rdpru") +X86_FEATURE (VZEROUPPER, "vzeroupper") +X86_FEATURE (X87, "x87") +X86_FEATURE (EVEX512, "evex512") +X86_FEATURE (FXSR, "fxsr") +X86_FEATURE (CCMP, "ccmp") +X86_FEATURE (Push2Pop2, "push2pop2") +X86_FEATURE (PPX, "ppx") +X86_FEATURE (NDD, "ndd") +X86_FEATURE (EGPR, "egpr") +X86_FEATURE (ZU, "zu") -// FIXME: dummy features were added to keep the numeric values of later features -// stable. Since the values need to be ABI stable, they should be changed to -// have explicitly assigned values, and then these dummy features removed. -X86_FEATURE (DUMMYFEATURE1, "__dummyfeature1") -X86_FEATURE (DUMMYFEATURE2, "__dummyfeature2") -X86_FEATURE_COMPAT(ADX, "adx", 0) -X86_FEATURE (64BIT, "64bit") -X86_FEATURE_COMPAT(CLDEMOTE, "cldemote", 0) -X86_FEATURE_COMPAT(CLFLUSHOPT, "clflushopt", 0) -X86_FEATURE_COMPAT(CLWB, "clwb", 0) -X86_FEATURE_COMPAT(CLZERO, "clzero", 0) -X86_FEATURE_COMPAT(CMPXCHG16B, "cx16", 0) -X86_FEATURE (CMPXCHG8B, "cx8") -X86_FEATURE_COMPAT(ENQCMD, "enqcmd", 0) -X86_FEATURE_COMPAT(F16C, "f16c", 0) -X86_FEATURE_COMPAT(FSGSBASE, "fsgsbase", 0) -X86_FEATURE (CRC32, "crc32") -X86_FEATURE (INVPCID, "invpcid") -X86_FEATURE (RDPRU, "rdpru") -X86_FEATURE (SAHF, "sahf") -X86_FEATURE (VZEROUPPER, "vzeroupper") -X86_FEATURE_COMPAT(LWP, "lwp", 0) -X86_FEATURE_COMPAT(LZCNT, "lzcnt", 0) -X86_FEATURE_COMPAT(MOVBE, "movbe", 0) -X86_FEATURE_COMPAT(MOVDIR64B, "movdir64b", 0) -X86_FEATURE_COMPAT(MOVDIRI, "movdiri", 0) -X86_FEATURE_COMPAT(MWAITX, "mwaitx", 0) -X86_FEATURE (X87, "x87") -X86_FEATURE_COMPAT(PCONFIG, "pconfig", 0) -X86_FEATURE_COMPAT(PKU, "pku", 0) -X86_FEATURE (EVEX512, "evex512") -X86_FEATURE_COMPAT(PRFCHW, "prfchw", 0) -X86_FEATURE_COMPAT(PTWRITE, "ptwrite", 0) -X86_FEATURE_COMPAT(RDPID, "rdpid", 0) -X86_FEATURE_COMPAT(RDRND, "rdrnd", 0) -X86_FEATURE_COMPAT(RDSEED, "rdseed", 0) -X86_FEATURE_COMPAT(RTM, "rtm", 0) -X86_FEATURE_COMPAT(SERIALIZE, "serialize", 0) -X86_FEATURE_COMPAT(SGX, "sgx", 0) -X86_FEATURE_COMPAT(SHA, "sha", 0) -X86_FEATURE_COMPAT(SHSTK, "shstk", 0) -X86_FEATURE_COMPAT(TBM, "tbm", 0) -X86_FEATURE_COMPAT(TSXLDTRK, "tsxldtrk", 0) -X86_FEATURE_COMPAT(VAES, "vaes", 0) -X86_FEATURE_COMPAT(WAITPKG, "waitpkg", 0) -X86_FEATURE_COMPAT(WBNOINVD, "wbnoinvd", 0) -X86_FEATURE_COMPAT(XSAVE, "xsave", 0) -X86_FEATURE_COMPAT(XSAVEC, "xsavec", 0) -X86_FEATURE_COMPAT(XSAVEOPT, "xsaveopt", 0) -X86_FEATURE_COMPAT(XSAVES, "xsaves", 0) -X86_FEATURE_COMPAT(AMX_TILE, "amx-tile", 0) -X86_FEATURE_COMPAT(AMX_INT8, "amx-int8", 0) -X86_FEATURE_COMPAT(AMX_BF16, "amx-bf16", 0) -X86_FEATURE_COMPAT(UINTR, "uintr", 0) -X86_FEATURE_COMPAT(HRESET, "hreset", 0) -X86_FEATURE_COMPAT(KL, "kl", 0) -X86_FEATURE (FXSR, "fxsr") -X86_FEATURE_COMPAT(WIDEKL, "widekl", 0) -X86_FEATURE_COMPAT(AVXVNNI, "avxvnni", 0) -X86_FEATURE_COMPAT(AVX512FP16, "avx512fp16", 0) -X86_FEATURE (CCMP, "ccmp") -X86_FEATURE (Push2Pop2, "push2pop2") -X86_FEATURE (PPX, "ppx") -X86_FEATURE (NDD, "ndd") -X86_FEATURE_COMPAT(AVXIFMA, "avxifma", 0) -X86_FEATURE_COMPAT(AVXVNNIINT8, "avxvnniint8", 0) -X86_FEATURE_COMPAT(AVXNECONVERT, "avxneconvert", 0) -X86_FEATURE_COMPAT(CMPCCXADD, "cmpccxadd", 0) -X86_FEATURE_COMPAT(AMX_FP16, "amx-fp16", 0) -X86_FEATURE_COMPAT(PREFETCHI, "prefetchi", 0) -X86_FEATURE_COMPAT(RAOINT, "raoint", 0) -X86_FEATURE_COMPAT(AMX_COMPLEX, "amx-complex", 0) -X86_FEATURE_COMPAT(AVXVNNIINT16, "avxvnniint16", 0) -X86_FEATURE_COMPAT(SM3, "sm3", 0) -X86_FEATURE_COMPAT(SHA512, "sha512", 0) -X86_FEATURE_COMPAT(SM4, "sm4", 0) -X86_FEATURE (EGPR, "egpr") -X86_FEATURE_COMPAT(USERMSR, "usermsr", 0) -X86_FEATURE_COMPAT(AVX10_1, "avx10.1", 36) -X86_FEATURE (DUMMYFEATURE3, "__dummyfeature3") -X86_FEATURE_COMPAT(AVX10_2, "avx10.2", 37) -X86_FEATURE (DUMMYFEATURE4, "__dummyfeature4") -//FIXME: make MOVRS _COMPAT defined when gcc landed relate patch. -X86_FEATURE (MOVRS, "movrs") -X86_FEATURE (ZU, "zu") -X86_FEATURE (AMX_FP8, "amx-fp8") -X86_FEATURE (AMX_MOVRS, "amx-movrs") -X86_FEATURE (AMX_AVX512, "amx-avx512") -X86_FEATURE (AMX_TF32, "amx-tf32") // These features aren't really CPU features, but the frontend can set them. X86_FEATURE (RETPOLINE_EXTERNAL_THUNK, "retpoline-external-thunk") X86_FEATURE (RETPOLINE_INDIRECT_BRANCHES, "retpoline-indirect-branches") @@ -278,11 +275,9 @@ X86_FEATURE (RETPOLINE_INDIRECT_CALLS, "retpoline-indirect-calls") X86_FEATURE (LVI_CFI, "lvi-cfi") X86_FEATURE (LVI_LOAD_HARDENING, "lvi-load-hardening") -X86_MICROARCH_LEVEL(X86_64_BASELINE,"x86-64", 95) -X86_MICROARCH_LEVEL(X86_64_V2, "x86-64-v2", 96) -X86_MICROARCH_LEVEL(X86_64_V3, "x86-64-v3", 97) -X86_MICROARCH_LEVEL(X86_64_V4, "x86-64-v4", 98) -X86_MICROARCH_LEVEL(APXF, "apxf", 111) +// Max number of priorities. Priorities form a consecutive range +#define MAX_PRIORITY 35 + #undef X86_FEATURE_COMPAT #undef X86_FEATURE #undef X86_MICROARCH_LEVEL diff --git a/llvm/include/llvm/TargetParser/X86TargetParser.h b/llvm/include/llvm/TargetParser/X86TargetParser.h index 80f3d35da9a9b..46061f9d1fc7d 100644 --- a/llvm/include/llvm/TargetParser/X86TargetParser.h +++ b/llvm/include/llvm/TargetParser/X86TargetParser.h @@ -60,8 +60,7 @@ enum ProcessorFeatures { #define X86_FEATURE(ENUM, STRING) FEATURE_##ENUM, #include "llvm/TargetParser/X86TargetParser.def" CPU_FEATURE_MAX, - -#define X86_MICROARCH_LEVEL(ENUM, STRING, PRIORITY) FEATURE_##ENUM = PRIORITY, +#define X86_MICROARCH_LEVEL(ENUM, STR, PRIORITY, ABI_VALUE) FEATURE_##ENUM, #include "llvm/TargetParser/X86TargetParser.def" }; diff --git a/llvm/include/llvm/Transforms/Instrumentation/BoundsChecking.h b/llvm/include/llvm/Transforms/Instrumentation/BoundsChecking.h index 8e7df5e6b10f0..e4bfcd395c2d6 100644 --- a/llvm/include/llvm/Transforms/Instrumentation/BoundsChecking.h +++ b/llvm/include/llvm/Transforms/Instrumentation/BoundsChecking.h @@ -11,6 +11,7 @@ #include "llvm/IR/PassManager.h" #include "llvm/Support/Compiler.h" +#include "llvm/TargetParser/Triple.h" #include namespace llvm { @@ -23,10 +24,12 @@ class BoundsCheckingPass : public PassInfoMixin { public: struct Options { struct Runtime { - Runtime(bool MinRuntime, bool MayReturn) - : MinRuntime(MinRuntime), MayReturn(MayReturn) {} + Runtime(bool MinRuntime, bool MayReturn, bool HandlerPreserveAllRegs) + : MinRuntime(MinRuntime), MayReturn(MayReturn), + HandlerPreserveAllRegs(HandlerPreserveAllRegs) {} bool MinRuntime; bool MayReturn; + bool HandlerPreserveAllRegs; }; std::optional Rt; // Trap if empty. bool Merge = false; diff --git a/llvm/lib/Analysis/Delinearization.cpp b/llvm/lib/Analysis/Delinearization.cpp index 4064b25d9d4e7..0c3b02ae09f47 100644 --- a/llvm/lib/Analysis/Delinearization.cpp +++ b/llvm/lib/Analysis/Delinearization.cpp @@ -656,6 +656,108 @@ bool llvm::delinearizeFixedSizeArray(ScalarEvolution &SE, const SCEV *Expr, return !Subscripts.empty(); } +static bool isKnownNonNegative(ScalarEvolution *SE, const SCEV *S, + const Value *Ptr) { + bool Inbounds = false; + if (auto *SrcGEP = dyn_cast(Ptr)) + Inbounds = SrcGEP->isInBounds(); + if (Inbounds) { + if (const SCEVAddRecExpr *AddRec = dyn_cast(S)) { + if (AddRec->isAffine()) { + // We know S is for Ptr, the operand on a load/store, so doesn't wrap. + // If both parts are NonNegative, the end result will be NonNegative + if (SE->isKnownNonNegative(AddRec->getStart()) && + SE->isKnownNonNegative(AddRec->getOperand(1))) + return true; + } + } + } + + return SE->isKnownNonNegative(S); +} + +/// Compare to see if S is less than Size, using +/// +/// isKnownNegative(S - Size) +/// +/// with some extra checking if S is an AddRec and we can prove less-than using +/// the loop bounds. +static bool isKnownLessThan(ScalarEvolution *SE, const SCEV *S, + const SCEV *Size) { + // First unify to the same type + auto *SType = dyn_cast(S->getType()); + auto *SizeType = dyn_cast(Size->getType()); + if (!SType || !SizeType) + return false; + Type *MaxType = + (SType->getBitWidth() >= SizeType->getBitWidth()) ? SType : SizeType; + S = SE->getTruncateOrZeroExtend(S, MaxType); + Size = SE->getTruncateOrZeroExtend(Size, MaxType); + + auto CollectUpperBound = [&](const Loop *L, Type *T) -> const SCEV * { + if (SE->hasLoopInvariantBackedgeTakenCount(L)) { + const SCEV *UB = SE->getBackedgeTakenCount(L); + return SE->getTruncateOrZeroExtend(UB, T); + } + return nullptr; + }; + + auto CheckAddRecBECount = [&]() { + const SCEVAddRecExpr *AddRec = dyn_cast(S); + if (!AddRec || !AddRec->isAffine() || !AddRec->hasNoSignedWrap()) + return false; + const SCEV *BECount = CollectUpperBound(AddRec->getLoop(), MaxType); + // If the BTC cannot be computed, check the base case for S. + if (!BECount || isa(BECount)) + return false; + const SCEV *Start = AddRec->getStart(); + const SCEV *Step = AddRec->getStepRecurrence(*SE); + const SCEV *End = AddRec->evaluateAtIteration(BECount, *SE); + const SCEV *Diff0 = SE->getMinusSCEV(Start, Size); + const SCEV *Diff1 = SE->getMinusSCEV(End, Size); + + // If the value of Step is non-negative and the AddRec is non-wrap, it + // reaches its maximum at the last iteration. So it's enouth to check + // whether End - Size is negative. + if (SE->isKnownNonNegative(Step) && SE->isKnownNegative(Diff1)) + return true; + + // If the value of Step is non-positive and the AddRec is non-wrap, the + // initial value is its maximum. + if (SE->isKnownNonPositive(Step) && SE->isKnownNegative(Diff0)) + return true; + + // Even if we don't know the sign of Step, either Start or End must be + // the maximum value of the AddRec since it is non-wrap. + if (SE->isKnownNegative(Diff0) && SE->isKnownNegative(Diff1)) + return true; + + return false; + }; + + if (CheckAddRecBECount()) + return true; + + // Check using normal isKnownNegative + const SCEV *LimitedBound = SE->getMinusSCEV(S, Size); + return SE->isKnownNegative(LimitedBound); +} + +bool llvm::validateDelinearizationResult(ScalarEvolution &SE, + ArrayRef Sizes, + ArrayRef Subscripts, + const Value *Ptr) { + for (size_t I = 1; I < Sizes.size(); ++I) { + const SCEV *Size = Sizes[I - 1]; + const SCEV *Subscript = Subscripts[I]; + if (!isKnownNonNegative(&SE, Subscript, Ptr)) + return false; + if (!isKnownLessThan(&SE, Subscript, Size)) + return false; + } + return true; +} + bool llvm::getIndexExpressionsFromGEP(ScalarEvolution &SE, const GetElementPtrInst *GEP, SmallVectorImpl &Subscripts, @@ -704,44 +806,6 @@ bool llvm::getIndexExpressionsFromGEP(ScalarEvolution &SE, return !Subscripts.empty(); } -bool llvm::tryDelinearizeFixedSizeImpl( - ScalarEvolution *SE, Instruction *Inst, const SCEV *AccessFn, - SmallVectorImpl &Subscripts, SmallVectorImpl &Sizes) { - Value *SrcPtr = getLoadStorePointerOperand(Inst); - - // Check the simple case where the array dimensions are fixed size. - auto *SrcGEP = dyn_cast(SrcPtr); - if (!SrcGEP) - return false; - - getIndexExpressionsFromGEP(*SE, SrcGEP, Subscripts, Sizes); - - // Check that the two size arrays are non-empty and equal in length and - // value. - // TODO: it would be better to let the caller to clear Subscripts, similar - // to how we handle Sizes. - if (Sizes.empty() || Subscripts.size() <= 1) { - Subscripts.clear(); - return false; - } - - // Check that for identical base pointers we do not miss index offsets - // that have been added before this GEP is applied. - Value *SrcBasePtr = SrcGEP->getOperand(0)->stripPointerCasts(); - const SCEVUnknown *SrcBase = - dyn_cast(SE->getPointerBase(AccessFn)); - if (!SrcBase || SrcBasePtr != SrcBase->getValue()) { - Subscripts.clear(); - return false; - } - - assert(Subscripts.size() == Sizes.size() + 1 && - "Expected equal number of entries in the list of size and " - "subscript."); - - return true; -} - namespace { void printDelinearization(raw_ostream &O, Function *F, LoopInfo *LI, @@ -804,6 +868,11 @@ void printDelinearization(raw_ostream &O, Function *F, LoopInfo *LI, for (int i = 0; i < Size; i++) O << "[" << *Subscripts[i] << "]"; O << "\n"; + + bool IsValid = validateDelinearizationResult( + *SE, Sizes, Subscripts, getLoadStorePointerOperand(&Inst)); + O << "Delinearization validation: " << (IsValid ? "Succeeded" : "Failed") + << "\n"; } } diff --git a/llvm/lib/Analysis/DependenceAnalysis.cpp b/llvm/lib/Analysis/DependenceAnalysis.cpp index b0398fe7e93b3..253f4d1441098 100644 --- a/llvm/lib/Analysis/DependenceAnalysis.cpp +++ b/llvm/lib/Analysis/DependenceAnalysis.cpp @@ -441,11 +441,6 @@ static void dumpExampleDependence(raw_ostream &OS, DependenceInfo *DA, } } } - SCEVUnionPredicate Assumptions = DA->getRuntimeAssumptions(); - if (!Assumptions.isAlwaysTrue()) { - OS << "Runtime Assumptions:\n"; - Assumptions.print(OS, 0); - } } void DependenceAnalysisWrapperPass::print(raw_ostream &OS, @@ -1176,83 +1171,6 @@ bool DependenceInfo::isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *X, } } -/// Compare to see if S is less than Size, using -/// -/// isKnownNegative(S - Size) -/// -/// with some extra checking if S is an AddRec and we can prove less-than using -/// the loop bounds. -bool DependenceInfo::isKnownLessThan(const SCEV *S, const SCEV *Size) const { - // First unify to the same type - auto *SType = dyn_cast(S->getType()); - auto *SizeType = dyn_cast(Size->getType()); - if (!SType || !SizeType) - return false; - Type *MaxType = - (SType->getBitWidth() >= SizeType->getBitWidth()) ? SType : SizeType; - S = SE->getTruncateOrZeroExtend(S, MaxType); - Size = SE->getTruncateOrZeroExtend(Size, MaxType); - - auto CheckAddRecBECount = [&]() { - const SCEVAddRecExpr *AddRec = dyn_cast(S); - if (!AddRec || !AddRec->isAffine() || !AddRec->hasNoSignedWrap()) - return false; - const SCEV *BECount = collectUpperBound(AddRec->getLoop(), MaxType); - // If the BTC cannot be computed, check the base case for S. - if (!BECount || isa(BECount)) - return false; - const SCEV *Start = AddRec->getStart(); - const SCEV *Step = AddRec->getStepRecurrence(*SE); - const SCEV *End = AddRec->evaluateAtIteration(BECount, *SE); - const SCEV *Diff0 = SE->getMinusSCEV(Start, Size); - const SCEV *Diff1 = SE->getMinusSCEV(End, Size); - - // If the value of Step is non-negative and the AddRec is non-wrap, it - // reaches its maximum at the last iteration. So it's enouth to check - // whether End - Size is negative. - if (SE->isKnownNonNegative(Step) && SE->isKnownNegative(Diff1)) - return true; - - // If the value of Step is non-positive and the AddRec is non-wrap, the - // initial value is its maximum. - if (SE->isKnownNonPositive(Step) && SE->isKnownNegative(Diff0)) - return true; - - // Even if we don't know the sign of Step, either Start or End must be - // the maximum value of the AddRec since it is non-wrap. - if (SE->isKnownNegative(Diff0) && SE->isKnownNegative(Diff1)) - return true; - - return false; - }; - - if (CheckAddRecBECount()) - return true; - - // Check using normal isKnownNegative - const SCEV *LimitedBound = SE->getMinusSCEV(S, Size); - return SE->isKnownNegative(LimitedBound); -} - -bool DependenceInfo::isKnownNonNegative(const SCEV *S, const Value *Ptr) const { - bool Inbounds = false; - if (auto *SrcGEP = dyn_cast(Ptr)) - Inbounds = SrcGEP->isInBounds(); - if (Inbounds) { - if (const SCEVAddRecExpr *AddRec = dyn_cast(S)) { - if (AddRec->isAffine()) { - // We know S is for Ptr, the operand on a load/store, so doesn't wrap. - // If both parts are NonNegative, the end result will be NonNegative - if (SE->isKnownNonNegative(AddRec->getStart()) && - SE->isKnownNonNegative(AddRec->getOperand(1))) - return true; - } - } - } - - return SE->isKnownNonNegative(S); -} - // All subscripts are all the same type. // Loop bound may be smaller (e.g., a char). // Should zero extend loop bound, since it's always >= 0. @@ -3360,35 +3278,8 @@ bool DependenceInfo::tryDelinearizeFixedSize( // iff the subscripts are positive and are less than the range of the // dimension. if (!DisableDelinearizationChecks) { - auto AllIndicesInRange = [&](ArrayRef DimensionSizes, - SmallVectorImpl &Subscripts, - Value *Ptr) { - size_t SSize = Subscripts.size(); - for (size_t I = 1; I < SSize; ++I) { - const SCEV *S = Subscripts[I]; - if (!isKnownNonNegative(S, Ptr)) { - LLVM_DEBUG({ - dbgs() << "Check failed: !isKnownNonNegative(S, Ptr)\n"; - dbgs() << " S: " << *S << "\n" << " Ptr: " << *Ptr << "\n"; - }); - return false; - } - const SCEV *Range = DimensionSizes[I - 1]; - if (!isKnownLessThan(S, Range)) { - LLVM_DEBUG({ - dbgs() << "Check failed: !isKnownLessThan(S, Range)\n"; - dbgs() << " S: " << *S << "\n" - << " Range: " << *Range << "\n"; - }); - return false; - } - } - return true; - }; - - if (!AllIndicesInRange(SrcSizes, SrcSubscripts, SrcPtr) || - !AllIndicesInRange(DstSizes, DstSubscripts, DstPtr)) { - LLVM_DEBUG(dbgs() << "Check failed: AllIndicesInRange.\n"); + if (!validateDelinearizationResult(*SE, SrcSizes, SrcSubscripts, SrcPtr) || + !validateDelinearizationResult(*SE, DstSizes, DstSubscripts, DstPtr)) { SrcSubscripts.clear(); DstSubscripts.clear(); return false; @@ -3446,8 +3337,6 @@ bool DependenceInfo::tryDelinearizeParametricSize( SrcSubscripts.size() != DstSubscripts.size()) return false; - size_t Size = SrcSubscripts.size(); - // Statically check that the array bounds are in-range. The first subscript we // don't have a size for and it cannot overflow into another subscript, so is // always safe. The others need to be 0 <= subscript[i] < bound, for both src @@ -3455,29 +3344,9 @@ bool DependenceInfo::tryDelinearizeParametricSize( // FIXME: It may be better to record these sizes and add them as constraints // to the dependency checks. if (!DisableDelinearizationChecks) - for (size_t I = 1; I < Size; ++I) { - bool SNN = isKnownNonNegative(SrcSubscripts[I], SrcPtr); - bool DNN = isKnownNonNegative(DstSubscripts[I], DstPtr); - bool SLT = isKnownLessThan(SrcSubscripts[I], Sizes[I - 1]); - bool DLT = isKnownLessThan(DstSubscripts[I], Sizes[I - 1]); - if (SNN && DNN && SLT && DLT) - continue; - - LLVM_DEBUG({ - dbgs() << "Delinearization checks failed: can't prove the following\n"; - if (!SNN) - dbgs() << " isKnownNonNegative(" << *SrcSubscripts[I] << ")\n"; - if (!DNN) - dbgs() << " isKnownNonNegative(" << *DstSubscripts[I] << ")\n"; - if (!SLT) - dbgs() << " isKnownLessThan(" << *SrcSubscripts[I] << ", " - << *Sizes[I - 1] << ")\n"; - if (!DLT) - dbgs() << " isKnownLessThan(" << *DstSubscripts[I] << ", " - << *Sizes[I - 1] << ")\n"; - }); + if (!validateDelinearizationResult(*SE, Sizes, SrcSubscripts, SrcPtr) || + !validateDelinearizationResult(*SE, Sizes, DstSubscripts, DstPtr)) return false; - } return true; } @@ -3510,10 +3379,6 @@ bool DependenceInfo::invalidate(Function &F, const PreservedAnalyses &PA, Inv.invalidate(F, PA); } -SCEVUnionPredicate DependenceInfo::getRuntimeAssumptions() const { - return SCEVUnionPredicate(Assumptions, *SE); -} - // depends - // Returns NULL if there is no dependence. // Otherwise, return a Dependence with as many details as possible. @@ -3614,20 +3479,10 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst, SCEVUnionPredicate(Assume, *SE)); } - if (!Assume.empty()) { - if (!UnderRuntimeAssumptions) - return std::make_unique(Src, Dst, - SCEVUnionPredicate(Assume, *SE)); - // Add non-redundant assumptions. - unsigned N = Assumptions.size(); - for (const SCEVPredicate *P : Assume) { - bool Implied = false; - for (unsigned I = 0; I != N && !Implied; I++) - if (Assumptions[I]->implies(P, *SE)) - Implied = true; - if (!Implied) - Assumptions.push_back(P); - } + if (!Assume.empty() && !UnderRuntimeAssumptions) { + // Runtime assumptions needed but not allowed. + return std::make_unique(Src, Dst, + SCEVUnionPredicate(Assume, *SE)); } unsigned Pairs = 1; diff --git a/llvm/lib/Analysis/IVDescriptors.cpp b/llvm/lib/Analysis/IVDescriptors.cpp index 4d21f1c7e2de2..7624e0ed6f2b0 100644 --- a/llvm/lib/Analysis/IVDescriptors.cpp +++ b/llvm/lib/Analysis/IVDescriptors.cpp @@ -216,6 +216,52 @@ static bool checkOrderedReduction(RecurKind Kind, Instruction *ExactFPMathInst, return true; } +/// Returns true if \p Phi is a min/max reduction matching \p Kind where \p Phi +/// is used outside the reduction chain. This is common for loops selecting the +/// index of a minimum/maximum value (argmin/argmax). +static bool isMinMaxReductionPhiWithUsersOutsideReductionChain( + PHINode *Phi, RecurKind Kind, Loop *TheLoop, RecurrenceDescriptor &RedDes) { + BasicBlock *Latch = TheLoop->getLoopLatch(); + if (!Latch) + return false; + + assert(Phi->getNumIncomingValues() == 2 && "phi must have 2 incoming values"); + Value *Inc = Phi->getIncomingValueForBlock(Latch); + if (Phi->hasOneUse() || !Inc->hasOneUse() || + !RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind)) + return false; + + Value *A, *B; + bool IsMinMax = [&]() { + switch (Kind) { + case RecurKind::UMax: + return match(Inc, m_UMax(m_Value(A), m_Value(B))); + case RecurKind::UMin: + return match(Inc, m_UMin(m_Value(A), m_Value(B))); + case RecurKind::SMax: + return match(Inc, m_SMax(m_Value(A), m_Value(B))); + case RecurKind::SMin: + return match(Inc, m_SMin(m_Value(A), m_Value(B))); + default: + llvm_unreachable("all min/max kinds must be handled"); + } + }(); + if (!IsMinMax) + return false; + + if (A == B || (A != Phi && B != Phi)) + return false; + + SmallPtrSet CastInsts; + Value *RdxStart = Phi->getIncomingValueForBlock(TheLoop->getLoopPreheader()); + RedDes = + RecurrenceDescriptor(RdxStart, /*Exit=*/nullptr, /*Store=*/nullptr, Kind, + FastMathFlags(), /*ExactFP=*/nullptr, Phi->getType(), + /*Signed=*/false, /*Ordered=*/false, CastInsts, + /*MinWidthCastToRecurTy=*/-1U, /*PhiMultiUse=*/true); + return true; +} + bool RecurrenceDescriptor::AddReductionVar( PHINode *Phi, RecurKind Kind, Loop *TheLoop, FastMathFlags FuncFMF, RecurrenceDescriptor &RedDes, DemandedBits *DB, AssumptionCache *AC, @@ -227,6 +273,11 @@ bool RecurrenceDescriptor::AddReductionVar( if (Phi->getParent() != TheLoop->getHeader()) return false; + // Check for min/max reduction variables that feed other users in the loop. + if (isMinMaxReductionPhiWithUsersOutsideReductionChain(Phi, Kind, TheLoop, + RedDes)) + return true; + // Obtain the reduction start value from the value that comes from the loop // preheader. Value *RdxStart = Phi->getIncomingValueForBlock(TheLoop->getLoopPreheader()); diff --git a/llvm/lib/Analysis/LoopCacheAnalysis.cpp b/llvm/lib/Analysis/LoopCacheAnalysis.cpp index e0e2be8e35929..3bba2e8c0d8ad 100644 --- a/llvm/lib/Analysis/LoopCacheAnalysis.cpp +++ b/llvm/lib/Analysis/LoopCacheAnalysis.cpp @@ -368,8 +368,16 @@ bool IndexedReference::tryDelinearizeFixedSize( // the load/store instruction being analyzed. It is not needed for further // analysis. // TODO: Maybe this property should be enforced in delinearizeFixedSizeArray. +#ifndef NDEBUG assert(!Sizes.empty() && Subscripts.size() == Sizes.size() && - Sizes.back() == ElementSize && "Unexpected delinearization result"); + "Inconsistent length of Sizes and Subscripts"); + Type *WideTy = + SE.getWiderType(ElementSize->getType(), Sizes.back()->getType()); + const SCEV *ElemSizeExt = SE.getNoopOrZeroExtend(ElementSize, WideTy); + const SCEV *LastSizeExt = SE.getNoopOrZeroExtend(Sizes.back(), WideTy); + assert(ElemSizeExt == LastSizeExt && "Unexpected last element of Sizes"); +#endif + Sizes.pop_back(); return true; } diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp index 9768202a9ba26..c529d87502acd 100644 --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -1190,42 +1190,6 @@ InstructionCost TargetTransformInfo::getMemoryOpCost( return Cost; } -InstructionCost TargetTransformInfo::getMaskedMemoryOpCost( - const MemIntrinsicCostAttributes &MICA, - TTI::TargetCostKind CostKind) const { - InstructionCost Cost = TTIImpl->getMaskedMemoryOpCost(MICA, CostKind); - assert(Cost >= 0 && "TTI should not produce negative costs!"); - return Cost; -} - -InstructionCost TargetTransformInfo::getGatherScatterOpCost( - unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, - Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const { - InstructionCost Cost = TTIImpl->getGatherScatterOpCost( - Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I); - assert((!Cost.isValid() || Cost >= 0) && - "TTI should not produce negative costs!"); - return Cost; -} - -InstructionCost TargetTransformInfo::getExpandCompressMemoryOpCost( - unsigned Opcode, Type *DataTy, bool VariableMask, Align Alignment, - TTI::TargetCostKind CostKind, const Instruction *I) const { - InstructionCost Cost = TTIImpl->getExpandCompressMemoryOpCost( - Opcode, DataTy, VariableMask, Alignment, CostKind, I); - assert(Cost >= 0 && "TTI should not produce negative costs!"); - return Cost; -} - -InstructionCost TargetTransformInfo::getStridedMemoryOpCost( - unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, - Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const { - InstructionCost Cost = TTIImpl->getStridedMemoryOpCost( - Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I); - assert(Cost >= 0 && "TTI should not produce negative costs!"); - return Cost; -} - InstructionCost TargetTransformInfo::getInterleavedMemoryOpCost( unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, @@ -1245,6 +1209,14 @@ TargetTransformInfo::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, return Cost; } +InstructionCost TargetTransformInfo::getMemIntrinsicInstrCost( + const MemIntrinsicCostAttributes &MICA, + TTI::TargetCostKind CostKind) const { + InstructionCost Cost = TTIImpl->getMemIntrinsicInstrCost(MICA, CostKind); + assert(Cost >= 0 && "TTI should not produce negative costs!"); + return Cost; +} + InstructionCost TargetTransformInfo::getCallInstrCost(Function *F, Type *RetTy, ArrayRef Tys, diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp index 61d5c2c81df2e..c3678d37607d5 100644 --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -4250,11 +4250,13 @@ bool LLParser::parseValID(ValID &ID, PerFunctionState *PFS, Type *ExpectedTy) { } case lltok::kw_ptrauth: { // ValID ::= 'ptrauth' '(' ptr @foo ',' i32 - // (',' i64 (',' ptr addrdisc)? )? ')' + // (',' i64 (',' ptr addrdisc (',' ptr ds)? + // )? )? ')' Lex.Lex(); Constant *Ptr, *Key; - Constant *Disc = nullptr, *AddrDisc = nullptr; + Constant *Disc = nullptr, *AddrDisc = nullptr, + *DeactivationSymbol = nullptr; if (parseToken(lltok::lparen, "expected '(' in constant ptrauth expression") || @@ -4263,11 +4265,14 @@ bool LLParser::parseValID(ValID &ID, PerFunctionState *PFS, Type *ExpectedTy) { "expected comma in constant ptrauth expression") || parseGlobalTypeAndValue(Key)) return true; - // If present, parse the optional disc/addrdisc. - if (EatIfPresent(lltok::comma)) - if (parseGlobalTypeAndValue(Disc) || - (EatIfPresent(lltok::comma) && parseGlobalTypeAndValue(AddrDisc))) - return true; + // If present, parse the optional disc/addrdisc/ds. + if (EatIfPresent(lltok::comma) && parseGlobalTypeAndValue(Disc)) + return true; + if (EatIfPresent(lltok::comma) && parseGlobalTypeAndValue(AddrDisc)) + return true; + if (EatIfPresent(lltok::comma) && + parseGlobalTypeAndValue(DeactivationSymbol)) + return true; if (parseToken(lltok::rparen, "expected ')' in constant ptrauth expression")) return true; @@ -4298,7 +4303,15 @@ bool LLParser::parseValID(ValID &ID, PerFunctionState *PFS, Type *ExpectedTy) { AddrDisc = ConstantPointerNull::get(PointerType::get(Context, 0)); } - ID.ConstantVal = ConstantPtrAuth::get(Ptr, KeyC, DiscC, AddrDisc); + if (!DeactivationSymbol) + DeactivationSymbol = + ConstantPointerNull::get(PointerType::get(Context, 0)); + if (!DeactivationSymbol->getType()->isPointerTy()) + return error(ID.Loc, + "constant ptrauth deactivation symbol must be a pointer"); + + ID.ConstantVal = + ConstantPtrAuth::get(Ptr, KeyC, DiscC, AddrDisc, DeactivationSymbol); ID.Kind = ValID::t_Constant; return false; } diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp index 9f3bb230440fb..04cb0a699ebbf 100644 --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -1609,7 +1609,16 @@ Expected BitcodeReader::materializeValue(unsigned StartValID, if (!Disc) return error("ptrauth disc operand must be ConstantInt"); - C = ConstantPtrAuth::get(ConstOps[0], Key, Disc, ConstOps[3]); + Constant *DeactivationSymbol = + ConstOps.size() > 4 ? ConstOps[4] + : ConstantPointerNull::get(cast( + ConstOps[3]->getType())); + if (!DeactivationSymbol->getType()->isPointerTy()) + return error( + "ptrauth deactivation symbol operand must be a pointer"); + + C = ConstantPtrAuth::get(ConstOps[0], Key, Disc, ConstOps[3], + DeactivationSymbol); break; } case BitcodeConstant::NoCFIOpcode: { @@ -3813,6 +3822,16 @@ Error BitcodeReader::parseConstants() { (unsigned)Record[2], (unsigned)Record[3]}); break; } + case bitc::CST_CODE_PTRAUTH2: { + if (Record.size() < 5) + return error("Invalid ptrauth record"); + // Ptr, Key, Disc, AddrDisc, DeactivationSymbol + V = BitcodeConstant::create( + Alloc, CurTy, BitcodeConstant::ConstantPtrAuthOpcode, + {(unsigned)Record[0], (unsigned)Record[1], (unsigned)Record[2], + (unsigned)Record[3], (unsigned)Record[4]}); + break; + } } assert(V->getType() == getTypeByID(CurTyID) && "Incorrect result type ID"); diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp index 1d0461478b90c..0dd3fa3361fee 100644 --- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -3030,11 +3030,12 @@ void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal, Record.push_back(VE.getTypeID(NC->getGlobalValue()->getType())); Record.push_back(VE.getValueID(NC->getGlobalValue())); } else if (const auto *CPA = dyn_cast(C)) { - Code = bitc::CST_CODE_PTRAUTH; + Code = bitc::CST_CODE_PTRAUTH2; Record.push_back(VE.getValueID(CPA->getPointer())); Record.push_back(VE.getValueID(CPA->getKey())); Record.push_back(VE.getValueID(CPA->getDiscriminator())); Record.push_back(VE.getValueID(CPA->getAddrDiscriminator())); + Record.push_back(VE.getValueID(CPA->getDeactivationSymbol())); } else { #ifndef NDEBUG C->dump(); diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp index 751d3735d3b2b..2e4a26ef70bc2 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp @@ -493,10 +493,12 @@ void DwarfCompileUnit::attachLowHighPC(DIE &D, const MCSymbol *Begin, assert(End->isDefined() && "Invalid end label"); addLabelAddress(D, dwarf::DW_AT_low_pc, Begin); - if (DD->getDwarfVersion() < 4) - addLabelAddress(D, dwarf::DW_AT_high_pc, End); - else + if (DD->getDwarfVersion() >= 4 && + (!isDwoUnit() || !llvm::isRangeRelaxable(Begin, End))) { addLabelDelta(D, dwarf::DW_AT_high_pc, End, Begin); + return; + } + addLabelAddress(D, dwarf::DW_AT_high_pc, End); } // Add info for Wasm-global-based relocation. diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index a50bde1c37cbb..40bfea059c707 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -3307,14 +3307,12 @@ static MCSymbol *emitLoclistsTableHeader(AsmPrinter *Asm, } template -static void emitRangeList( - DwarfDebug &DD, AsmPrinter *Asm, MCSymbol *Sym, const Ranges &R, - const DwarfCompileUnit &CU, unsigned BaseAddressx, unsigned OffsetPair, - unsigned StartxLength, unsigned EndOfList, - StringRef (*StringifyEnum)(unsigned), - bool ShouldUseBaseAddress, - PayloadEmitter EmitPayload) { - +static void +emitRangeList(DwarfDebug &DD, AsmPrinter *Asm, MCSymbol *Sym, const Ranges &R, + const DwarfCompileUnit &CU, unsigned BaseAddressx, + unsigned OffsetPair, unsigned StartxLength, unsigned StartxEndx, + unsigned EndOfList, StringRef (*StringifyEnum)(unsigned), + bool ShouldUseBaseAddress, PayloadEmitter EmitPayload) { auto Size = Asm->MAI->getCodePointerSize(); bool UseDwarf5 = DD.getDwarfVersion() >= 5; @@ -3333,7 +3331,8 @@ static void emitRangeList( bool BaseIsSet = false; for (const auto &P : SectionRanges) { auto *Base = CUBase; - if ((Asm->TM.getTargetTriple().isNVPTX() && DD.tuneForGDB())) { + if ((Asm->TM.getTargetTriple().isNVPTX() && DD.tuneForGDB()) || + (DD.useSplitDwarf() && UseDwarf5 && P.first->isLinkerRelaxable())) { // PTX does not support subtracting labels from the code section in the // debug_loc section. To work around this, the NVPTX backend needs the // compile unit to have no low_pc in order to have a zero base_address @@ -3389,12 +3388,27 @@ static void emitRangeList( Asm->emitLabelDifference(End, Base, Size); } } else if (UseDwarf5) { - Asm->OutStreamer->AddComment(StringifyEnum(StartxLength)); - Asm->emitInt8(StartxLength); - Asm->OutStreamer->AddComment(" start index"); - Asm->emitULEB128(DD.getAddressPool().getIndex(Begin)); - Asm->OutStreamer->AddComment(" length"); - Asm->emitLabelDifferenceAsULEB128(End, Begin); + // NOTE: We can't use absoluteSymbolDiff here instead of + // isRangeRelaxable. While isRangeRelaxable only checks that the offset + // between labels won't change at link time (which is exactly what we + // need), absoluteSymbolDiff also requires that the offset remain + // unchanged at assembly time, imposing a much stricter condition. + // Consequently, this would lead to less optimal debug info emission. + if (DD.useSplitDwarf() && llvm::isRangeRelaxable(Begin, End)) { + Asm->OutStreamer->AddComment(StringifyEnum(StartxEndx)); + Asm->emitInt8(StartxEndx); + Asm->OutStreamer->AddComment(" start index"); + Asm->emitULEB128(DD.getAddressPool().getIndex(Begin)); + Asm->OutStreamer->AddComment(" end index"); + Asm->emitULEB128(DD.getAddressPool().getIndex(End)); + } else { + Asm->OutStreamer->AddComment(StringifyEnum(StartxLength)); + Asm->emitInt8(StartxLength); + Asm->OutStreamer->AddComment(" start index"); + Asm->emitULEB128(DD.getAddressPool().getIndex(Begin)); + Asm->OutStreamer->AddComment(" length"); + Asm->emitLabelDifferenceAsULEB128(End, Begin); + } } else { Asm->OutStreamer->emitSymbolValue(Begin, Size); Asm->OutStreamer->emitSymbolValue(End, Size); @@ -3415,14 +3429,14 @@ static void emitRangeList( // Handles emission of both debug_loclist / debug_loclist.dwo static void emitLocList(DwarfDebug &DD, AsmPrinter *Asm, const DebugLocStream::List &List) { - emitRangeList(DD, Asm, List.Label, DD.getDebugLocs().getEntries(List), - *List.CU, dwarf::DW_LLE_base_addressx, - dwarf::DW_LLE_offset_pair, dwarf::DW_LLE_startx_length, - dwarf::DW_LLE_end_of_list, llvm::dwarf::LocListEncodingString, - /* ShouldUseBaseAddress */ true, - [&](const DebugLocStream::Entry &E) { - DD.emitDebugLocEntryLocation(E, List.CU); - }); + emitRangeList( + DD, Asm, List.Label, DD.getDebugLocs().getEntries(List), *List.CU, + dwarf::DW_LLE_base_addressx, dwarf::DW_LLE_offset_pair, + dwarf::DW_LLE_startx_length, dwarf::DW_LLE_startx_endx, + dwarf::DW_LLE_end_of_list, llvm::dwarf::LocListEncodingString, + /* ShouldUseBaseAddress */ true, [&](const DebugLocStream::Entry &E) { + DD.emitDebugLocEntryLocation(E, List.CU); + }); } void DwarfDebug::emitDebugLocImpl(MCSection *Sec) { @@ -3644,8 +3658,8 @@ static void emitRangeList(DwarfDebug &DD, AsmPrinter *Asm, const RangeSpanList &List) { emitRangeList(DD, Asm, List.Label, List.Ranges, *List.CU, dwarf::DW_RLE_base_addressx, dwarf::DW_RLE_offset_pair, - dwarf::DW_RLE_startx_length, dwarf::DW_RLE_end_of_list, - llvm::dwarf::RangeListEncodingString, + dwarf::DW_RLE_startx_length, dwarf::DW_RLE_startx_endx, + dwarf::DW_RLE_end_of_list, llvm::dwarf::RangeListEncodingString, List.CU->getCUNode()->getRangesBaseAddress() || DD.getDwarfVersion() >= 5, [](auto) {}); diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp index 7be7468300569..e2ed45eec0ecd 100644 --- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp @@ -196,6 +196,10 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB, assert(Info.CFIType->getType()->isIntegerTy(32) && "Invalid CFI type"); } + if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_deactivation_symbol)) { + Info.DeactivationSymbol = cast(Bundle->Inputs[0]); + } + Info.CB = &CB; Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees); Info.CallConv = CallConv; diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index 2ec138b6e186d..e0665d99a891d 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -2917,6 +2917,9 @@ bool IRTranslator::translateIntrinsic( } } + if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_deactivation_symbol)) + MIB->setDeactivationSymbol(*MF, Bundle->Inputs[0].get()); + return true; } diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index 637acd61c8a5f..3906b311addf0 100644 --- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -38,8 +38,10 @@ void MachineIRBuilder::setMF(MachineFunction &MF) { //------------------------------------------------------------------------------ MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) { - return BuildMI(getMF(), {getDL(), getPCSections(), getMMRAMetadata()}, - getTII().get(Opcode)); + return BuildMI( + getMF(), + {getDL(), getPCSections(), getMMRAMetadata(), getDeactivationSymbol()}, + getTII().get(Opcode)); } MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) { diff --git a/llvm/lib/CodeGen/MIRParser/MILexer.cpp b/llvm/lib/CodeGen/MIRParser/MILexer.cpp index 8b72c295416a2..dbd56c7414f38 100644 --- a/llvm/lib/CodeGen/MIRParser/MILexer.cpp +++ b/llvm/lib/CodeGen/MIRParser/MILexer.cpp @@ -281,6 +281,7 @@ static MIToken::TokenKind getIdentifierKind(StringRef Identifier) { .Case("heap-alloc-marker", MIToken::kw_heap_alloc_marker) .Case("pcsections", MIToken::kw_pcsections) .Case("cfi-type", MIToken::kw_cfi_type) + .Case("deactivation-symbol", MIToken::kw_deactivation_symbol) .Case("bbsections", MIToken::kw_bbsections) .Case("bb_id", MIToken::kw_bb_id) .Case("unknown-size", MIToken::kw_unknown_size) diff --git a/llvm/lib/CodeGen/MIRParser/MILexer.h b/llvm/lib/CodeGen/MIRParser/MILexer.h index 0627f176b9e00..0407a0e7540d7 100644 --- a/llvm/lib/CodeGen/MIRParser/MILexer.h +++ b/llvm/lib/CodeGen/MIRParser/MILexer.h @@ -136,6 +136,7 @@ struct MIToken { kw_heap_alloc_marker, kw_pcsections, kw_cfi_type, + kw_deactivation_symbol, kw_bbsections, kw_bb_id, kw_unknown_size, diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp index 434a579c3be3f..f35274d4e2edf 100644 --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -1072,6 +1072,7 @@ bool MIParser::parse(MachineInstr *&MI) { Token.isNot(MIToken::kw_heap_alloc_marker) && Token.isNot(MIToken::kw_pcsections) && Token.isNot(MIToken::kw_cfi_type) && + Token.isNot(MIToken::kw_deactivation_symbol) && Token.isNot(MIToken::kw_debug_location) && Token.isNot(MIToken::kw_debug_instr_number) && Token.isNot(MIToken::coloncolon) && Token.isNot(MIToken::lbrace)) { @@ -1120,6 +1121,14 @@ bool MIParser::parse(MachineInstr *&MI) { lex(); } + GlobalValue *DS = nullptr; + if (Token.is(MIToken::kw_deactivation_symbol)) { + lex(); + if (parseGlobalValue(DS)) + return true; + lex(); + } + unsigned InstrNum = 0; if (Token.is(MIToken::kw_debug_instr_number)) { lex(); @@ -1196,6 +1205,8 @@ bool MIParser::parse(MachineInstr *&MI) { MI->setPCSections(MF, PCSections); if (CFIType) MI->setCFIType(MF, CFIType); + if (DS) + MI->setDeactivationSymbol(MF, DS); if (!MemOperands.empty()) MI->setMemRefs(MF, MemOperands); if (InstrNum) diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp index 1d54d72336860..c0554497653f8 100644 --- a/llvm/lib/CodeGen/MIRPrinter.cpp +++ b/llvm/lib/CodeGen/MIRPrinter.cpp @@ -19,6 +19,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringRef.h" +#include "llvm/CodeGen/MIRFormatter.h" #include "llvm/CodeGen/MIRYamlMapping.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineConstantPool.h" @@ -895,6 +896,10 @@ static void printMI(raw_ostream &OS, MFPrintState &State, } if (uint32_t CFIType = MI.getCFIType()) OS << LS << "cfi-type " << CFIType; + if (Value *DS = MI.getDeactivationSymbol()) { + OS << LS << "deactivation-symbol "; + MIRFormatter::printIRValue(OS, *DS, State.MST); + } if (auto Num = MI.peekDebugInstrNum()) OS << LS << "debug-instr-number " << Num; diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp index bfa5ab274c686..634547ded992f 100644 --- a/llvm/lib/CodeGen/MachineFunction.cpp +++ b/llvm/lib/CodeGen/MachineFunction.cpp @@ -609,10 +609,10 @@ MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfo( ArrayRef MMOs, MCSymbol *PreInstrSymbol, MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker, MDNode *PCSections, - uint32_t CFIType, MDNode *MMRAs) { + uint32_t CFIType, MDNode *MMRAs, Value *DS) { return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol, PostInstrSymbol, HeapAllocMarker, - PCSections, CFIType, MMRAs); + PCSections, CFIType, MMRAs, DS); } const char *MachineFunction::createExternalSymbolName(StringRef Name) { diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp index eb46124d9eb5f..18111156efa4f 100644 --- a/llvm/lib/CodeGen/MachineInstr.cpp +++ b/llvm/lib/CodeGen/MachineInstr.cpp @@ -322,15 +322,17 @@ void MachineInstr::setExtraInfo(MachineFunction &MF, MCSymbol *PreInstrSymbol, MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker, MDNode *PCSections, - uint32_t CFIType, MDNode *MMRAs) { + uint32_t CFIType, MDNode *MMRAs, Value *DS) { bool HasPreInstrSymbol = PreInstrSymbol != nullptr; bool HasPostInstrSymbol = PostInstrSymbol != nullptr; bool HasHeapAllocMarker = HeapAllocMarker != nullptr; bool HasPCSections = PCSections != nullptr; bool HasCFIType = CFIType != 0; bool HasMMRAs = MMRAs != nullptr; + bool HasDS = DS != nullptr; int NumPointers = MMOs.size() + HasPreInstrSymbol + HasPostInstrSymbol + - HasHeapAllocMarker + HasPCSections + HasCFIType + HasMMRAs; + HasHeapAllocMarker + HasPCSections + HasCFIType + HasMMRAs + + HasDS; // Drop all extra info if there is none. if (NumPointers <= 0) { @@ -343,10 +345,10 @@ void MachineInstr::setExtraInfo(MachineFunction &MF, // 32-bit pointers. // FIXME: Maybe we should make the symbols in the extra info mutable? else if (NumPointers > 1 || HasMMRAs || HasHeapAllocMarker || HasPCSections || - HasCFIType) { + HasCFIType || HasDS) { Info.set( MF.createMIExtraInfo(MMOs, PreInstrSymbol, PostInstrSymbol, - HeapAllocMarker, PCSections, CFIType, MMRAs)); + HeapAllocMarker, PCSections, CFIType, MMRAs, DS)); return; } @@ -365,7 +367,7 @@ void MachineInstr::dropMemRefs(MachineFunction &MF) { setExtraInfo(MF, {}, getPreInstrSymbol(), getPostInstrSymbol(), getHeapAllocMarker(), getPCSections(), getCFIType(), - getMMRAMetadata()); + getMMRAMetadata(), getDeactivationSymbol()); } void MachineInstr::setMemRefs(MachineFunction &MF, @@ -377,7 +379,7 @@ void MachineInstr::setMemRefs(MachineFunction &MF, setExtraInfo(MF, MMOs, getPreInstrSymbol(), getPostInstrSymbol(), getHeapAllocMarker(), getPCSections(), getCFIType(), - getMMRAMetadata()); + getMMRAMetadata(), getDeactivationSymbol()); } void MachineInstr::addMemOperand(MachineFunction &MF, @@ -488,7 +490,7 @@ void MachineInstr::setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) { setExtraInfo(MF, memoperands(), Symbol, getPostInstrSymbol(), getHeapAllocMarker(), getPCSections(), getCFIType(), - getMMRAMetadata()); + getMMRAMetadata(), getDeactivationSymbol()); } void MachineInstr::setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) { @@ -504,7 +506,7 @@ void MachineInstr::setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) { setExtraInfo(MF, memoperands(), getPreInstrSymbol(), Symbol, getHeapAllocMarker(), getPCSections(), getCFIType(), - getMMRAMetadata()); + getMMRAMetadata(), getDeactivationSymbol()); } void MachineInstr::setHeapAllocMarker(MachineFunction &MF, MDNode *Marker) { @@ -513,7 +515,8 @@ void MachineInstr::setHeapAllocMarker(MachineFunction &MF, MDNode *Marker) { return; setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(), - Marker, getPCSections(), getCFIType(), getMMRAMetadata()); + Marker, getPCSections(), getCFIType(), getMMRAMetadata(), + getDeactivationSymbol()); } void MachineInstr::setPCSections(MachineFunction &MF, MDNode *PCSections) { @@ -523,7 +526,7 @@ void MachineInstr::setPCSections(MachineFunction &MF, MDNode *PCSections) { setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(), getHeapAllocMarker(), PCSections, getCFIType(), - getMMRAMetadata()); + getMMRAMetadata(), getDeactivationSymbol()); } void MachineInstr::setCFIType(MachineFunction &MF, uint32_t Type) { @@ -532,7 +535,8 @@ void MachineInstr::setCFIType(MachineFunction &MF, uint32_t Type) { return; setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(), - getHeapAllocMarker(), getPCSections(), Type, getMMRAMetadata()); + getHeapAllocMarker(), getPCSections(), Type, getMMRAMetadata(), + getDeactivationSymbol()); } void MachineInstr::setMMRAMetadata(MachineFunction &MF, MDNode *MMRAs) { @@ -541,7 +545,18 @@ void MachineInstr::setMMRAMetadata(MachineFunction &MF, MDNode *MMRAs) { return; setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(), - getHeapAllocMarker(), getPCSections(), getCFIType(), MMRAs); + getHeapAllocMarker(), getPCSections(), getCFIType(), MMRAs, + getDeactivationSymbol()); +} + +void MachineInstr::setDeactivationSymbol(MachineFunction &MF, Value *DS) { + // Do nothing if old and new symbols are the same. + if (DS == getDeactivationSymbol()) + return; + + setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(), + getHeapAllocMarker(), getPCSections(), getCFIType(), + getMMRAMetadata(), DS); } void MachineInstr::cloneInstrSymbols(MachineFunction &MF, @@ -730,6 +745,8 @@ bool MachineInstr::isIdenticalTo(const MachineInstr &Other, // Call instructions with different CFI types are not identical. if (isCall() && getCFIType() != Other.getCFIType()) return false; + if (getDeactivationSymbol() != Other.getDeactivationSymbol()) + return false; return true; } @@ -2037,6 +2054,8 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST, OS << ','; OS << " cfi-type " << CFIType; } + if (getDeactivationSymbol()) + OS << ", deactivation-symbol " << getDeactivationSymbol()->getName(); if (DebugInstrNum) { if (!FirstOp) diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp index 52e8449fe510c..4ad721bf21959 100644 --- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -15,10 +15,12 @@ #include "InstrEmitter.h" #include "SDNodeDbgValue.h" #include "llvm/BinaryFormat/Dwarf.h" +#include "llvm/CodeGen/ISDOpcodes.h" #include "llvm/CodeGen/MachineConstantPool.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/StackMaps.h" #include "llvm/CodeGen/TargetInstrInfo.h" #include "llvm/CodeGen/TargetLowering.h" @@ -61,6 +63,8 @@ static unsigned countOperands(SDNode *Node, unsigned NumExpUses, unsigned N = Node->getNumOperands(); while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) --N; + if (N && Node->getOperand(N - 1).getOpcode() == ISD::DEACTIVATION_SYMBOL) + --N; // Ignore deactivation symbol if it exists. if (N && Node->getOperand(N - 1).getValueType() == MVT::Other) --N; // Ignore chain if it exists. @@ -1222,15 +1226,23 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned, } } - if (SDNode *GluedNode = Node->getGluedNode()) { - // FIXME: Possibly iterate over multiple glue nodes? - if (GluedNode->getOpcode() == - ~(unsigned)TargetOpcode::CONVERGENCECTRL_GLUE) { - Register VReg = getVR(GluedNode->getOperand(0), VRBaseMap); - MachineOperand MO = MachineOperand::CreateReg(VReg, /*isDef=*/false, - /*isImp=*/true); - MIB->addOperand(MO); - } + unsigned Op = Node->getNumOperands(); + if (Op != 0 && Node->getOperand(Op - 1)->getOpcode() == + ~(unsigned)TargetOpcode::CONVERGENCECTRL_GLUE) { + Register VReg = getVR(Node->getOperand(Op - 1)->getOperand(0), VRBaseMap); + MachineOperand MO = MachineOperand::CreateReg(VReg, /*isDef=*/false, + /*isImp=*/true); + MIB->addOperand(MO); + Op--; + } + + if (Op != 0 && + Node->getOperand(Op - 1)->getOpcode() == ISD::DEACTIVATION_SYMBOL) { + MI->setDeactivationSymbol( + *MF, const_cast( + cast(Node->getOperand(Op - 1)) + ->getGlobal())); + Op--; } // Run post-isel target hook to adjust this instruction if needed. @@ -1251,7 +1263,8 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, llvm_unreachable("This target-independent node should have been selected!"); case ISD::EntryToken: case ISD::MERGE_VALUES: - case ISD::TokenFactor: // fall thru + case ISD::TokenFactor: + case ISD::DEACTIVATION_SYMBOL: break; case ISD::CopyToReg: { Register DestReg = cast(Node->getOperand(1))->getReg(); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 1b15a207a2d37..06735708d5369 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -1916,6 +1916,21 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, return SDValue(N, 0); } +SDValue SelectionDAG::getDeactivationSymbol(const GlobalValue *GV) { + SDVTList VTs = getVTList(MVT::Untyped); + FoldingSetNodeID ID; + AddNodeIDNode(ID, ISD::DEACTIVATION_SYMBOL, VTs, {}); + ID.AddPointer(GV); + void *IP = nullptr; + if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) + return SDValue(E, 0); + + auto *N = newSDNode(GV, VTs); + CSEMap.InsertNode(N, IP); + InsertNode(N); + return SDValue(N, 0); +} + SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; SDVTList VTs = getVTList(VT); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 88b35582a9f7d..53d73ad618bd1 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -45,6 +45,7 @@ #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/SelectionDAGTargetInfo.h" #include "llvm/CodeGen/StackMaps.h" #include "llvm/CodeGen/SwiftErrorValueTracking.h" @@ -5376,6 +5377,14 @@ SmallVector SelectionDAGBuilder::getTargetIntrinsicOperands( } } + if (std::optional Bundle = + I.getOperandBundle(LLVMContext::OB_deactivation_symbol)) { + auto *Sym = Bundle->Inputs[0].get(); + SDValue SDSym = getValue(Sym); + SDSym = DAG.getDeactivationSymbol(cast(Sym)); + Ops.push_back(SDSym); + } + if (std::optional Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl)) { Value *Token = Bundle->Inputs[0].get(); @@ -9116,6 +9125,11 @@ void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee, ConvControlToken = getValue(Token); } + GlobalValue *DeactivationSymbol = nullptr; + if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_deactivation_symbol)) { + DeactivationSymbol = cast(Bundle->Inputs[0].get()); + } + TargetLowering::CallLoweringInfo CLI(DAG); CLI.setDebugLoc(getCurSDLoc()) .setChain(getRoot()) @@ -9125,7 +9139,8 @@ void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee, .setIsPreallocated( CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0) .setCFIType(CFIType) - .setConvergenceControlToken(ConvControlToken); + .setConvergenceControlToken(ConvControlToken) + .setDeactivationSymbol(DeactivationSymbol); // Set the pointer authentication info if we have it. if (PAI) { @@ -9745,7 +9760,7 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) { {LLVMContext::OB_deopt, LLVMContext::OB_funclet, LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated, LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi, - LLVMContext::OB_convergencectrl}); + LLVMContext::OB_convergencectrl, LLVMContext::OB_deactivation_symbol}); SDValue Callee = getValue(I.getCalledOperand()); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 0fad4722b1871..dd8f18d3b8a6a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -3308,6 +3308,7 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch, case ISD::LIFETIME_START: case ISD::LIFETIME_END: case ISD::PSEUDO_PROBE: + case ISD::DEACTIVATION_SYMBOL: NodeToMatch->setNodeId(-1); // Mark selected. return; case ISD::AssertSext: @@ -3389,7 +3390,7 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch, // These are the current input chain and glue for use when generating nodes. // Various Emit operations change these. For example, emitting a copytoreg // uses and updates these. - SDValue InputChain, InputGlue; + SDValue InputChain, InputGlue, DeactivationSymbol; // ChainNodesMatched - If a pattern matches nodes that have input/output // chains, the OPC_EmitMergeInputChains operation is emitted which indicates @@ -3542,6 +3543,15 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch, InputGlue = N->getOperand(N->getNumOperands()-1); continue; + case OPC_CaptureDeactivationSymbol: + // If the current node has a deactivation symbol, capture it in + // DeactivationSymbol. + if (N->getNumOperands() != 0 && + N->getOperand(N->getNumOperands() - 1).getOpcode() == + ISD::DEACTIVATION_SYMBOL) + DeactivationSymbol = N->getOperand(N->getNumOperands() - 1); + continue; + case OPC_MoveChild: { unsigned ChildNo = MatcherTable[MatcherIndex++]; if (ChildNo >= N.getNumOperands()) @@ -4223,6 +4233,8 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch, // If this has chain/glue inputs, add them. if (EmitNodeInfo & OPFL_Chain) Ops.push_back(InputChain); + if (DeactivationSymbol.getNode() != nullptr) + Ops.push_back(DeactivationSymbol); if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != nullptr) Ops.push_back(InputGlue); diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 5684e0e4c26c4..521d8f07434e6 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -10607,23 +10607,26 @@ TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask, assert(DataVT.getVectorElementCount() == MaskVT.getVectorElementCount() && "Incompatible types of Data and Mask"); if (IsCompressedMemory) { - if (DataVT.isScalableVector()) - report_fatal_error( - "Cannot currently handle compressed memory with scalable vectors"); // Incrementing the pointer according to number of '1's in the mask. - EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits()); - SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask); - if (MaskIntVT.getSizeInBits() < 32) { - MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg); - MaskIntVT = MVT::i32; + if (DataVT.isScalableVector()) { + EVT MaskExtVT = MaskVT.changeElementType(MVT::i32); + SDValue MaskExt = DAG.getNode(ISD::ZERO_EXTEND, DL, MaskExtVT, Mask); + Increment = DAG.getNode(ISD::VECREDUCE_ADD, DL, MVT::i32, MaskExt); + } else { + EVT MaskIntVT = + EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits()); + SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask); + if (MaskIntVT.getSizeInBits() < 32) { + MaskInIntReg = + DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg); + MaskIntVT = MVT::i32; + } + Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg); } - - // Count '1's with POPCNT. - Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg); - Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT); // Scale is an element size in bytes. SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL, AddrVT); + Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT); Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale); } else if (DataVT.isScalableVector()) { Increment = DAG.getVScale(DL, AddrVT, diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index d503d7a2345fd..fef3a3663d3a8 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -66,10 +66,6 @@ const TargetRegisterClass *TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, const MCOperandInfo &OpInfo = MCID.operands()[OpNum]; int16_t RegClass = getOpRegClassID(OpInfo); - // TODO: Remove isLookupPtrRegClass in favor of isLookupRegClassByHwMode - if (OpInfo.isLookupPtrRegClass()) - return TRI.getPointerRegClass(RegClass); - // Instructions like INSERT_SUBREG do not have fixed register classes. if (RegClass < 0) return nullptr; diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp index 5101717526263..cf88c4309974f 100644 --- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp +++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp @@ -14,6 +14,7 @@ #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" #include "llvm/ADT/SmallBitVector.h" +#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringRef.h" #include "llvm/Analysis/AssumptionCache.h" @@ -136,6 +137,8 @@ static bool isValidWorkshareLoopScheduleType(OMPScheduleType SchedType) { case OMPScheduleType::NomergeOrderedRuntime: case OMPScheduleType::NomergeOrderedAuto: case OMPScheduleType::NomergeOrderedTrapezoidal: + case OMPScheduleType::OrderedDistributeChunked: + case OMPScheduleType::OrderedDistribute: break; default: return false; @@ -182,7 +185,7 @@ static const omp::GV &getGridValue(const Triple &T, Function *Kernel) { /// arguments. static OMPScheduleType getOpenMPBaseScheduleType(llvm::omp::ScheduleKind ClauseKind, bool HasChunks, - bool HasSimdModifier) { + bool HasSimdModifier, bool HasDistScheduleChunks) { // Currently, the default schedule it static. switch (ClauseKind) { case OMP_SCHEDULE_Default: @@ -199,6 +202,9 @@ getOpenMPBaseScheduleType(llvm::omp::ScheduleKind ClauseKind, bool HasChunks, case OMP_SCHEDULE_Runtime: return HasSimdModifier ? OMPScheduleType::BaseRuntimeSimd : OMPScheduleType::BaseRuntime; + case OMP_SCHEDULE_Distribute: + return HasDistScheduleChunks ? OMPScheduleType::BaseDistributeChunked + : OMPScheduleType::BaseDistribute; } llvm_unreachable("unhandled schedule clause argument"); } @@ -267,9 +273,10 @@ getOpenMPMonotonicityScheduleType(OMPScheduleType ScheduleType, static OMPScheduleType computeOpenMPScheduleType(ScheduleKind ClauseKind, bool HasChunks, bool HasSimdModifier, bool HasMonotonicModifier, - bool HasNonmonotonicModifier, bool HasOrderedClause) { - OMPScheduleType BaseSchedule = - getOpenMPBaseScheduleType(ClauseKind, HasChunks, HasSimdModifier); + bool HasNonmonotonicModifier, bool HasOrderedClause, + bool HasDistScheduleChunks) { + OMPScheduleType BaseSchedule = getOpenMPBaseScheduleType( + ClauseKind, HasChunks, HasSimdModifier, HasDistScheduleChunks); OMPScheduleType OrderedSchedule = getOpenMPOrderingScheduleType(BaseSchedule, HasOrderedClause); OMPScheduleType Result = getOpenMPMonotonicityScheduleType( @@ -2465,7 +2472,8 @@ Value *OpenMPIRBuilder::createRuntimeShuffleFunction(InsertPointTy AllocaIP, void OpenMPIRBuilder::shuffleAndStore(InsertPointTy AllocaIP, Value *SrcAddr, Value *DstAddr, Type *ElemType, - Value *Offset, Type *ReductionArrayTy) { + Value *Offset, Type *ReductionArrayTy, + bool IsByRefElem) { uint64_t Size = M.getDataLayout().getTypeStoreSize(ElemType); // Create the loop over the big sized data. // ptr = (void*)Elem; @@ -2547,10 +2555,10 @@ void OpenMPIRBuilder::shuffleAndStore(InsertPointTy AllocaIP, Value *SrcAddr, } } -void OpenMPIRBuilder::emitReductionListCopy( +Error OpenMPIRBuilder::emitReductionListCopy( InsertPointTy AllocaIP, CopyAction Action, Type *ReductionArrayTy, ArrayRef ReductionInfos, Value *SrcBase, Value *DestBase, - CopyOptionsTy CopyOptions) { + ArrayRef IsByRef, CopyOptionsTy CopyOptions) { Type *IndexTy = Builder.getIndexTy( M.getDataLayout(), M.getDataLayout().getDefaultGlobalsAddressSpace()); Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset; @@ -2560,6 +2568,7 @@ void OpenMPIRBuilder::emitReductionListCopy( for (auto En : enumerate(ReductionInfos)) { const ReductionInfo &RI = En.value(); Value *SrcElementAddr = nullptr; + AllocaInst *DestAlloca = nullptr; Value *DestElementAddr = nullptr; Value *DestElementPtrAddr = nullptr; // Should we shuffle in an element from a remote lane? @@ -2579,14 +2588,18 @@ void OpenMPIRBuilder::emitReductionListCopy( DestElementPtrAddr = Builder.CreateInBoundsGEP( ReductionArrayTy, DestBase, {ConstantInt::get(IndexTy, 0), ConstantInt::get(IndexTy, En.index())}); + bool IsByRefElem = (!IsByRef.empty() && IsByRef[En.index()]); switch (Action) { case CopyAction::RemoteLaneToThread: { InsertPointTy CurIP = Builder.saveIP(); Builder.restoreIP(AllocaIP); - AllocaInst *DestAlloca = Builder.CreateAlloca(RI.ElementType, nullptr, - ".omp.reduction.element"); + + Type *DestAllocaType = + IsByRefElem ? RI.ByRefAllocatedType : RI.ElementType; + DestAlloca = Builder.CreateAlloca(DestAllocaType, nullptr, + ".omp.reduction.element"); DestAlloca->setAlignment( - M.getDataLayout().getPrefTypeAlign(RI.ElementType)); + M.getDataLayout().getPrefTypeAlign(DestAllocaType)); DestElementAddr = DestAlloca; DestElementAddr = Builder.CreateAddrSpaceCast(DestElementAddr, Builder.getPtrTy(), @@ -2606,8 +2619,57 @@ void OpenMPIRBuilder::emitReductionListCopy( // Now that all active lanes have read the element in the // Reduce list, shuffle over the value from the remote lane. if (ShuffleInElement) { - shuffleAndStore(AllocaIP, SrcElementAddr, DestElementAddr, RI.ElementType, - RemoteLaneOffset, ReductionArrayTy); + Type *ShuffleType = RI.ElementType; + Value *ShuffleSrcAddr = SrcElementAddr; + Value *ShuffleDestAddr = DestElementAddr; + AllocaInst *LocalStorage = nullptr; + + if (IsByRefElem) { + assert(RI.ByRefElementType && "Expected by-ref element type to be set"); + assert(RI.ByRefAllocatedType && + "Expected by-ref allocated type to be set"); + // For by-ref reductions, we need to copy from the remote lane the + // actual value of the partial reduction computed by that remote lane; + // rather than, for example, a pointer to that data or, even worse, a + // pointer to the descriptor of the by-ref reduction element. + ShuffleType = RI.ByRefElementType; + + InsertPointOrErrorTy GenResult = + RI.DataPtrPtrGen(Builder.saveIP(), ShuffleSrcAddr, ShuffleSrcAddr); + + if (!GenResult) + return GenResult.takeError(); + + ShuffleSrcAddr = Builder.CreateLoad(Builder.getPtrTy(), ShuffleSrcAddr); + + { + InsertPointTy OldIP = Builder.saveIP(); + Builder.restoreIP(AllocaIP); + + LocalStorage = Builder.CreateAlloca(ShuffleType); + Builder.restoreIP(OldIP); + ShuffleDestAddr = LocalStorage; + } + } + + shuffleAndStore(AllocaIP, ShuffleSrcAddr, ShuffleDestAddr, ShuffleType, + RemoteLaneOffset, ReductionArrayTy, IsByRefElem); + + if (IsByRefElem) { + Value *GEP; + InsertPointOrErrorTy GenResult = + RI.DataPtrPtrGen(Builder.saveIP(), + Builder.CreatePointerBitCastOrAddrSpaceCast( + DestAlloca, Builder.getPtrTy(), ".ascast"), + GEP); + + if (!GenResult) + return GenResult.takeError(); + + Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast( + LocalStorage, Builder.getPtrTy(), ".ascast"), + GEP); + } } else { switch (RI.EvaluationKind) { case EvalKind::Scalar: { @@ -2658,11 +2720,13 @@ void OpenMPIRBuilder::emitReductionListCopy( Builder.CreateStore(CastDestAddr, DestElementPtrAddr); } } + + return Error::success(); } Expected OpenMPIRBuilder::emitInterWarpCopyFunction( const LocationDescription &Loc, ArrayRef ReductionInfos, - AttributeList FuncAttrs) { + AttributeList FuncAttrs, ArrayRef IsByRef) { InsertPointTy SavedIP = Builder.saveIP(); LLVMContext &Ctx = M.getContext(); FunctionType *FuncTy = FunctionType::get( @@ -2743,7 +2807,9 @@ Expected OpenMPIRBuilder::emitInterWarpCopyFunction( // memory. // const ReductionInfo &RI = En.value(); - unsigned RealTySize = M.getDataLayout().getTypeAllocSize(RI.ElementType); + bool IsByRefElem = !IsByRef.empty() && IsByRef[En.index()]; + unsigned RealTySize = M.getDataLayout().getTypeAllocSize( + IsByRefElem ? RI.ByRefElementType : RI.ElementType); for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /= 2) { Type *CType = Builder.getIntNTy(TySize * 8); @@ -2806,6 +2872,17 @@ Expected OpenMPIRBuilder::emitInterWarpCopyFunction( ConstantInt::get(IndexTy, En.index())}); // elemptr = ((CopyType*)(elemptrptr)) + I Value *ElemPtr = Builder.CreateLoad(Builder.getPtrTy(), ElemPtrPtr); + + if (IsByRefElem) { + InsertPointOrErrorTy GenRes = + RI.DataPtrPtrGen(Builder.saveIP(), ElemPtr, ElemPtr); + + if (!GenRes) + return GenRes.takeError(); + + ElemPtr = Builder.CreateLoad(Builder.getPtrTy(), ElemPtr); + } + if (NumIters > 1) ElemPtr = Builder.CreateGEP(Builder.getInt32Ty(), ElemPtr, Cnt); @@ -2861,6 +2938,17 @@ Expected OpenMPIRBuilder::emitInterWarpCopyFunction( Value *TargetElemPtrVal = Builder.CreateLoad(Builder.getPtrTy(), TargetElemPtrPtr); Value *TargetElemPtr = TargetElemPtrVal; + + if (IsByRefElem) { + InsertPointOrErrorTy GenRes = + RI.DataPtrPtrGen(Builder.saveIP(), TargetElemPtr, TargetElemPtr); + + if (!GenRes) + return GenRes.takeError(); + + TargetElemPtr = Builder.CreateLoad(Builder.getPtrTy(), TargetElemPtr); + } + if (NumIters > 1) TargetElemPtr = Builder.CreateGEP(Builder.getInt32Ty(), TargetElemPtr, Cnt); @@ -2895,9 +2983,9 @@ Expected OpenMPIRBuilder::emitInterWarpCopyFunction( return WcFunc; } -Function *OpenMPIRBuilder::emitShuffleAndReduceFunction( +Expected OpenMPIRBuilder::emitShuffleAndReduceFunction( ArrayRef ReductionInfos, Function *ReduceFn, - AttributeList FuncAttrs) { + AttributeList FuncAttrs, ArrayRef IsByRef) { LLVMContext &Ctx = M.getContext(); FunctionType *FuncTy = FunctionType::get(Builder.getVoidTy(), @@ -2976,9 +3064,13 @@ Function *OpenMPIRBuilder::emitShuffleAndReduceFunction( // This loop iterates through the list of reduce elements and copies, // element by element, from a remote lane in the warp to RemoteReduceList, // hosted on the thread's stack. - emitReductionListCopy( + Error EmitRedLsCpRes = emitReductionListCopy( AllocaIP, CopyAction::RemoteLaneToThread, RedListArrayTy, ReductionInfos, - ReduceList, RemoteListAddrCast, {RemoteLaneOffset, nullptr, nullptr}); + ReduceList, RemoteListAddrCast, IsByRef, + {RemoteLaneOffset, nullptr, nullptr}); + + if (EmitRedLsCpRes) + return EmitRedLsCpRes; // The actions to be performed on the Remote Reduce list is dependent // on the algorithm version. @@ -3046,8 +3138,14 @@ Function *OpenMPIRBuilder::emitShuffleAndReduceFunction( Builder.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB); emitBlock(CpyThenBB, Builder.GetInsertBlock()->getParent()); - emitReductionListCopy(AllocaIP, CopyAction::ThreadCopy, RedListArrayTy, - ReductionInfos, RemoteListAddrCast, ReduceList); + + EmitRedLsCpRes = emitReductionListCopy( + AllocaIP, CopyAction::ThreadCopy, RedListArrayTy, ReductionInfos, + RemoteListAddrCast, ReduceList, IsByRef); + + if (EmitRedLsCpRes) + return EmitRedLsCpRes; + Builder.CreateBr(CpyMergeBB); emitBlock(CpyElseBB, Builder.GetInsertBlock()->getParent()); @@ -3452,7 +3550,8 @@ std::string OpenMPIRBuilder::getReductionFuncName(StringRef Name) const { Expected OpenMPIRBuilder::createReductionFunction( StringRef ReducerName, ArrayRef ReductionInfos, - ReductionGenCBKind ReductionGenCBKind, AttributeList FuncAttrs) { + ArrayRef IsByRef, ReductionGenCBKind ReductionGenCBKind, + AttributeList FuncAttrs) { auto *FuncTy = FunctionType::get(Builder.getVoidTy(), {Builder.getPtrTy(), Builder.getPtrTy()}, /* IsVarArg */ false); @@ -3513,8 +3612,14 @@ Expected OpenMPIRBuilder::createReductionFunction( LHSPtrs.emplace_back(LHSPtr); RHSPtrs.emplace_back(RHSPtr); } else { - Value *LHS = Builder.CreateLoad(RI.ElementType, LHSPtr); - Value *RHS = Builder.CreateLoad(RI.ElementType, RHSPtr); + Value *LHS = LHSPtr; + Value *RHS = RHSPtr; + + if (!IsByRef.empty() && !IsByRef[En.index()]) { + LHS = Builder.CreateLoad(RI.ElementType, LHSPtr); + RHS = Builder.CreateLoad(RI.ElementType, RHSPtr); + } + Value *Reduced; InsertPointOrErrorTy AfterIP = RI.ReductionGen(Builder.saveIP(), LHS, RHS, Reduced); @@ -3524,7 +3629,9 @@ Expected OpenMPIRBuilder::createReductionFunction( return ReductionFunc; Builder.restoreIP(*AfterIP); - Builder.CreateStore(Reduced, LHSPtr); + + if (!IsByRef.empty() && !IsByRef[En.index()]) + Builder.CreateStore(Reduced, LHSPtr); } } @@ -3577,9 +3684,9 @@ checkReductionInfos(ArrayRef ReductionInfos, OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU( const LocationDescription &Loc, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, ArrayRef ReductionInfos, - bool IsNoWait, bool IsTeamsReduction, ReductionGenCBKind ReductionGenCBKind, - std::optional GridValue, unsigned ReductionBufNum, - Value *SrcLocInfo) { + ArrayRef IsByRef, bool IsNoWait, bool IsTeamsReduction, + ReductionGenCBKind ReductionGenCBKind, std::optional GridValue, + unsigned ReductionBufNum, Value *SrcLocInfo) { if (!updateToLocation(Loc)) return InsertPointTy(); Builder.restoreIP(CodeGenIP); @@ -3615,9 +3722,9 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU( FuncAttrs = FuncAttrs.addFnAttributes(Ctx, AttrBldr); CodeGenIP = Builder.saveIP(); - Expected ReductionResult = - createReductionFunction(Builder.GetInsertBlock()->getParent()->getName(), - ReductionInfos, ReductionGenCBKind, FuncAttrs); + Expected ReductionResult = createReductionFunction( + Builder.GetInsertBlock()->getParent()->getName(), ReductionInfos, IsByRef, + ReductionGenCBKind, FuncAttrs); if (!ReductionResult) return ReductionResult.takeError(); Function *ReductionFunc = *ReductionResult; @@ -3656,15 +3763,25 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU( Value *ElemPtr = Builder.CreateInBoundsGEP( RedArrayTy, ReductionList, {ConstantInt::get(IndexTy, 0), ConstantInt::get(IndexTy, En.index())}); + + Value *PrivateVar = RI.PrivateVariable; + bool IsByRefElem = !IsByRef.empty() && IsByRef[En.index()]; + if (IsByRefElem) + PrivateVar = Builder.CreateLoad(RI.ElementType, PrivateVar); + Value *CastElem = - Builder.CreatePointerBitCastOrAddrSpaceCast(RI.PrivateVariable, PtrTy); + Builder.CreatePointerBitCastOrAddrSpaceCast(PrivateVar, PtrTy); Builder.CreateStore(CastElem, ElemPtr); } CodeGenIP = Builder.saveIP(); - Function *SarFunc = - emitShuffleAndReduceFunction(ReductionInfos, ReductionFunc, FuncAttrs); + Expected SarFunc = emitShuffleAndReduceFunction( + ReductionInfos, ReductionFunc, FuncAttrs, IsByRef); + + if (!SarFunc) + return SarFunc.takeError(); + Expected CopyResult = - emitInterWarpCopyFunction(Loc, ReductionInfos, FuncAttrs); + emitInterWarpCopyFunction(Loc, ReductionInfos, FuncAttrs, IsByRef); if (!CopyResult) return CopyResult.takeError(); Function *WcFunc = *CopyResult; @@ -3684,7 +3801,7 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU( Builder.getInt64(MaxDataSize * ReductionInfos.size()); if (!IsTeamsReduction) { Value *SarFuncCast = - Builder.CreatePointerBitCastOrAddrSpaceCast(SarFunc, FuncPtrTy); + Builder.CreatePointerBitCastOrAddrSpaceCast(*SarFunc, FuncPtrTy); Value *WcFuncCast = Builder.CreatePointerBitCastOrAddrSpaceCast(WcFunc, FuncPtrTy); Value *Args[] = {SrcLocInfo, ReductionDataSize, RL, SarFuncCast, @@ -3716,7 +3833,7 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU( Builder.getInt32(ReductionBufNum), ReductionDataSize, RL, - SarFunc, + *SarFunc, WcFunc, LtGCFunc, LtGRFunc, @@ -3743,7 +3860,8 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU( // Add emission of __kmpc_end_reduce{_nowait}(); for (auto En : enumerate(ReductionInfos)) { const ReductionInfo &RI = En.value(); - Value *LHS = RI.Variable; + Type *ValueType = RI.ElementType; + Value *RedValue = RI.Variable; Value *RHS = Builder.CreatePointerBitCastOrAddrSpaceCast(RI.PrivateVariable, PtrTy); @@ -3754,7 +3872,7 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU( // Fix the CallBack code genereated to use the correct Values for the LHS // and RHS - LHSPtr->replaceUsesWithIf(LHS, [ReductionFunc](const Use &U) { + LHSPtr->replaceUsesWithIf(RedValue, [ReductionFunc](const Use &U) { return cast(U.getUser())->getParent()->getParent() == ReductionFunc; }); @@ -3763,15 +3881,21 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU( ReductionFunc; }); } else { - Value *LHSValue = Builder.CreateLoad(RI.ElementType, LHS, "final.lhs"); - Value *RHSValue = Builder.CreateLoad(RI.ElementType, RHS, "final.rhs"); + if (IsByRef.empty() || !IsByRef[En.index()]) { + RedValue = Builder.CreateLoad(ValueType, RI.Variable, + "red.value." + Twine(En.index())); + } + Value *PrivateRedValue = Builder.CreateLoad( + ValueType, RHS, "red.private.value" + Twine(En.index())); Value *Reduced; InsertPointOrErrorTy AfterIP = - RI.ReductionGen(Builder.saveIP(), RHSValue, LHSValue, Reduced); + RI.ReductionGen(Builder.saveIP(), RedValue, PrivateRedValue, Reduced); if (!AfterIP) return AfterIP.takeError(); Builder.restoreIP(*AfterIP); - Builder.CreateStore(Reduced, LHS, false); + + if (!IsByRef.empty() && !IsByRef[En.index()]) + Builder.CreateStore(Reduced, RI.Variable); } } emitBlock(ExitBB, CurFunc); @@ -3872,7 +3996,7 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductions( assert(ReductionInfos.size() == IsByRef.size()); if (Config.isGPU()) return createReductionsGPU(Loc, AllocaIP, Builder.saveIP(), ReductionInfos, - IsNoWait, IsTeamsReduction); + IsByRef, IsNoWait, IsTeamsReduction); checkReductionInfos(ReductionInfos, /*IsGPU*/ false); @@ -4689,7 +4813,8 @@ static FunctionCallee getKmpcForStaticInitForType(Type *Ty, Module &M, OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::applyStaticWorkshareLoop( DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, - WorksharingLoopType LoopType, bool NeedsBarrier) { + WorksharingLoopType LoopType, bool NeedsBarrier, bool HasDistSchedule, + OMPScheduleType DistScheduleSchedType) { assert(CLI->isValid() && "Requires a valid canonical loop"); assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && "Require dedicated allocate IP"); @@ -4745,15 +4870,29 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::applyStaticWorkshareLoop( // Call the "init" function and update the trip count of the loop with the // value it produced. - SmallVector Args( - {SrcLoc, ThreadNum, SchedulingType, PLastIter, PLowerBound, PUpperBound}); - if (LoopType == WorksharingLoopType::DistributeForStaticLoop) { - Value *PDistUpperBound = - Builder.CreateAlloca(IVTy, nullptr, "p.distupperbound"); - Args.push_back(PDistUpperBound); + auto BuildInitCall = [LoopType, SrcLoc, ThreadNum, PLastIter, PLowerBound, + PUpperBound, IVTy, PStride, One, Zero, StaticInit, + this](Value *SchedulingType, auto &Builder) { + SmallVector Args({SrcLoc, ThreadNum, SchedulingType, PLastIter, + PLowerBound, PUpperBound}); + if (LoopType == WorksharingLoopType::DistributeForStaticLoop) { + Value *PDistUpperBound = + Builder.CreateAlloca(IVTy, nullptr, "p.distupperbound"); + Args.push_back(PDistUpperBound); + } + Args.append({PStride, One, Zero}); + createRuntimeFunctionCall(StaticInit, Args); + }; + BuildInitCall(SchedulingType, Builder); + if (HasDistSchedule && + LoopType != WorksharingLoopType::DistributeStaticLoop) { + Constant *DistScheduleSchedType = ConstantInt::get( + I32Type, static_cast(omp::OMPScheduleType::OrderedDistribute)); + // We want to emit a second init function call for the dist_schedule clause + // to the Distribute construct. This should only be done however if a + // Workshare Loop is nested within a Distribute Construct + BuildInitCall(DistScheduleSchedType, Builder); } - Args.append({PStride, One, Zero}); - createRuntimeFunctionCall(StaticInit, Args); Value *LowerBound = Builder.CreateLoad(IVTy, PLowerBound); Value *InclusiveUpperBound = Builder.CreateLoad(IVTy, PUpperBound); Value *TripCountMinusOne = Builder.CreateSub(InclusiveUpperBound, LowerBound); @@ -4792,14 +4931,44 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::applyStaticWorkshareLoop( return AfterIP; } +static void addAccessGroupMetadata(BasicBlock *Block, MDNode *AccessGroup, + LoopInfo &LI); +static void addLoopMetadata(CanonicalLoopInfo *Loop, + ArrayRef Properties); + +static void applyParallelAccessesMetadata(CanonicalLoopInfo *CLI, + LLVMContext &Ctx, Loop *Loop, + LoopInfo &LoopInfo, + SmallVector &LoopMDList) { + SmallSet Reachable; + + // Get the basic blocks from the loop in which memref instructions + // can be found. + // TODO: Generalize getting all blocks inside a CanonicalizeLoopInfo, + // preferably without running any passes. + for (BasicBlock *Block : Loop->getBlocks()) { + if (Block == CLI->getCond() || Block == CLI->getHeader()) + continue; + Reachable.insert(Block); + } + + // Add access group metadata to memory-access instructions. + MDNode *AccessGroup = MDNode::getDistinct(Ctx, {}); + for (BasicBlock *BB : Reachable) + addAccessGroupMetadata(BB, AccessGroup, LoopInfo); + // TODO: If the loop has existing parallel access metadata, have + // to combine two lists. + LoopMDList.push_back(MDNode::get( + Ctx, {MDString::get(Ctx, "llvm.loop.parallel_accesses"), AccessGroup})); +} + OpenMPIRBuilder::InsertPointOrErrorTy -OpenMPIRBuilder::applyStaticChunkedWorkshareLoop(DebugLoc DL, - CanonicalLoopInfo *CLI, - InsertPointTy AllocaIP, - bool NeedsBarrier, - Value *ChunkSize) { +OpenMPIRBuilder::applyStaticChunkedWorkshareLoop( + DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, + bool NeedsBarrier, Value *ChunkSize, OMPScheduleType SchedType, + Value *DistScheduleChunkSize, OMPScheduleType DistScheduleSchedType) { assert(CLI->isValid() && "Requires a valid canonical loop"); - assert(ChunkSize && "Chunk size is required"); + assert(ChunkSize || DistScheduleChunkSize && "Chunk size is required"); LLVMContext &Ctx = CLI->getFunction()->getContext(); Value *IV = CLI->getIndVar(); @@ -4813,6 +4982,18 @@ OpenMPIRBuilder::applyStaticChunkedWorkshareLoop(DebugLoc DL, Constant *Zero = ConstantInt::get(InternalIVTy, 0); Constant *One = ConstantInt::get(InternalIVTy, 1); + Function *F = CLI->getFunction(); + FunctionAnalysisManager FAM; + FAM.registerPass([]() { return DominatorTreeAnalysis(); }); + FAM.registerPass([]() { return PassInstrumentationAnalysis(); }); + LoopAnalysis LIA; + LoopInfo &&LI = LIA.run(*F, FAM); + Loop *L = LI.getLoopFor(CLI->getHeader()); + SmallVector LoopMDList; + if (ChunkSize || DistScheduleChunkSize) + applyParallelAccessesMetadata(CLI, Ctx, L, LI, LoopMDList); + addLoopMetadata(CLI, LoopMDList); + // Declare useful OpenMP runtime functions. FunctionCallee StaticInit = getKmpcForStaticInitForType(InternalIVTy, M, *this); @@ -4835,13 +5016,18 @@ OpenMPIRBuilder::applyStaticChunkedWorkshareLoop(DebugLoc DL, Builder.SetCurrentDebugLocation(DL); // TODO: Detect overflow in ubsan or max-out with current tripcount. - Value *CastedChunkSize = - Builder.CreateZExtOrTrunc(ChunkSize, InternalIVTy, "chunksize"); + Value *CastedChunkSize = Builder.CreateZExtOrTrunc( + ChunkSize ? ChunkSize : Zero, InternalIVTy, "chunksize"); + Value *CastedDistScheduleChunkSize = Builder.CreateZExtOrTrunc( + DistScheduleChunkSize ? DistScheduleChunkSize : Zero, InternalIVTy, + "distschedulechunksize"); Value *CastedTripCount = Builder.CreateZExt(OrigTripCount, InternalIVTy, "tripcount"); - Constant *SchedulingType = ConstantInt::get( - I32Type, static_cast(OMPScheduleType::UnorderedStaticChunked)); + Constant *SchedulingType = + ConstantInt::get(I32Type, static_cast(SchedType)); + Constant *DistSchedulingType = + ConstantInt::get(I32Type, static_cast(DistScheduleSchedType)); Builder.CreateStore(Zero, PLowerBound); Value *OrigUpperBound = Builder.CreateSub(CastedTripCount, One); Builder.CreateStore(OrigUpperBound, PUpperBound); @@ -4853,12 +5039,26 @@ OpenMPIRBuilder::applyStaticChunkedWorkshareLoop(DebugLoc DL, Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize); Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize); Value *ThreadNum = getOrCreateThreadID(SrcLoc); - createRuntimeFunctionCall( - StaticInit, {/*loc=*/SrcLoc, /*global_tid=*/ThreadNum, - /*schedtype=*/SchedulingType, /*plastiter=*/PLastIter, - /*plower=*/PLowerBound, /*pupper=*/PUpperBound, - /*pstride=*/PStride, /*incr=*/One, - /*chunk=*/CastedChunkSize}); + auto BuildInitCall = [StaticInit, SrcLoc, ThreadNum, PLastIter, PLowerBound, + PUpperBound, PStride, One, + this](Value *SchedulingType, Value *ChunkSize, + auto &Builder) { + createRuntimeFunctionCall( + StaticInit, {/*loc=*/SrcLoc, /*global_tid=*/ThreadNum, + /*schedtype=*/SchedulingType, /*plastiter=*/PLastIter, + /*plower=*/PLowerBound, /*pupper=*/PUpperBound, + /*pstride=*/PStride, /*incr=*/One, + /*chunk=*/ChunkSize}); + }; + BuildInitCall(SchedulingType, CastedChunkSize, Builder); + if (DistScheduleSchedType != OMPScheduleType::None && + SchedType != OMPScheduleType::OrderedDistributeChunked && + SchedType != OMPScheduleType::OrderedDistribute) { + // We want to emit a second init function call for the dist_schedule clause + // to the Distribute construct. This should only be done however if a + // Workshare Loop is nested within a Distribute Construct + BuildInitCall(DistSchedulingType, CastedDistScheduleChunkSize, Builder); + } // Load values written by the "init" function. Value *FirstChunkStart = @@ -5185,31 +5385,47 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::applyWorkshareLoop( bool NeedsBarrier, omp::ScheduleKind SchedKind, Value *ChunkSize, bool HasSimdModifier, bool HasMonotonicModifier, bool HasNonmonotonicModifier, bool HasOrderedClause, - WorksharingLoopType LoopType, bool NoLoop) { + WorksharingLoopType LoopType, bool NoLoop, bool HasDistSchedule, + Value *DistScheduleChunkSize) { if (Config.isTargetDevice()) return applyWorkshareLoopTarget(DL, CLI, AllocaIP, LoopType, NoLoop); OMPScheduleType EffectiveScheduleType = computeOpenMPScheduleType( SchedKind, ChunkSize, HasSimdModifier, HasMonotonicModifier, - HasNonmonotonicModifier, HasOrderedClause); + HasNonmonotonicModifier, HasOrderedClause, DistScheduleChunkSize); bool IsOrdered = (EffectiveScheduleType & OMPScheduleType::ModifierOrdered) == OMPScheduleType::ModifierOrdered; + OMPScheduleType DistScheduleSchedType = OMPScheduleType::None; + if (HasDistSchedule) { + DistScheduleSchedType = DistScheduleChunkSize + ? OMPScheduleType::OrderedDistributeChunked + : OMPScheduleType::OrderedDistribute; + } switch (EffectiveScheduleType & ~OMPScheduleType::ModifierMask) { case OMPScheduleType::BaseStatic: - assert(!ChunkSize && "No chunk size with static-chunked schedule"); - if (IsOrdered) + case OMPScheduleType::BaseDistribute: + assert(!ChunkSize || !DistScheduleChunkSize && + "No chunk size with static-chunked schedule"); + if (IsOrdered && !HasDistSchedule) return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType, NeedsBarrier, ChunkSize); // FIXME: Monotonicity ignored? - return applyStaticWorkshareLoop(DL, CLI, AllocaIP, LoopType, NeedsBarrier); + if (DistScheduleChunkSize) + return applyStaticChunkedWorkshareLoop( + DL, CLI, AllocaIP, NeedsBarrier, ChunkSize, EffectiveScheduleType, + DistScheduleChunkSize, DistScheduleSchedType); + return applyStaticWorkshareLoop(DL, CLI, AllocaIP, LoopType, NeedsBarrier, + HasDistSchedule); case OMPScheduleType::BaseStaticChunked: - if (IsOrdered) + case OMPScheduleType::BaseDistributeChunked: + if (IsOrdered && !HasDistSchedule) return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType, NeedsBarrier, ChunkSize); // FIXME: Monotonicity ignored? - return applyStaticChunkedWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier, - ChunkSize); + return applyStaticChunkedWorkshareLoop( + DL, CLI, AllocaIP, NeedsBarrier, ChunkSize, EffectiveScheduleType, + DistScheduleChunkSize, DistScheduleSchedType); case OMPScheduleType::BaseRuntime: case OMPScheduleType::BaseAuto: @@ -5803,8 +6019,8 @@ static void addLoopMetadata(CanonicalLoopInfo *Loop, } /// Attach llvm.access.group metadata to the memref instructions of \p Block -static void addSimdMetadata(BasicBlock *Block, MDNode *AccessGroup, - LoopInfo &LI) { +static void addAccessGroupMetadata(BasicBlock *Block, MDNode *AccessGroup, + LoopInfo &LI) { for (Instruction &I : *Block) { if (I.mayReadOrWriteMemory()) { // TODO: This instruction may already have access group from @@ -5994,16 +6210,8 @@ void OpenMPIRBuilder::applySimd(CanonicalLoopInfo *CanonicalLoop, // dependences of 'safelen' iterations are possible. // If clause order(concurrent) is specified then the memory instructions // are marked parallel even if 'safelen' is finite. - if ((Safelen == nullptr) || (Order == OrderKind::OMP_ORDER_concurrent)) { - // Add access group metadata to memory-access instructions. - MDNode *AccessGroup = MDNode::getDistinct(Ctx, {}); - for (BasicBlock *BB : Reachable) - addSimdMetadata(BB, AccessGroup, LI); - // TODO: If the loop has existing parallel access metadata, have - // to combine two lists. - LoopMDList.push_back(MDNode::get( - Ctx, {MDString::get(Ctx, "llvm.loop.parallel_accesses"), AccessGroup})); - } + if ((Safelen == nullptr) || (Order == OrderKind::OMP_ORDER_concurrent)) + applyParallelAccessesMetadata(CanonicalLoop, Ctx, L, LI, LoopMDList); // FIXME: the IF clause shares a loop backedge for the SIMD and non-SIMD // versions so we can't add the loop attributes in that case. diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp index eebabfd772982..7932765db8359 100644 --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -1674,12 +1674,14 @@ static void writeConstantInternal(raw_ostream &Out, const Constant *CV, if (const auto *CPA = dyn_cast(CV)) { Out << "ptrauth ("; - // ptrauth (ptr CST, i32 KEY[, i64 DISC[, ptr ADDRDISC]?]?) + // ptrauth (ptr CST, i32 KEY[, i64 DISC[, ptr ADDRDISC[, ptr DS]?]?]?) unsigned NumOpsToWrite = 2; if (!CPA->getOperand(2)->isNullValue()) NumOpsToWrite = 3; if (!CPA->getOperand(3)->isNullValue()) NumOpsToWrite = 4; + if (!CPA->getOperand(4)->isNullValue()) + NumOpsToWrite = 5; ListSeparator LS; for (unsigned i = 0, e = NumOpsToWrite; i != e; ++i) { diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp index 58b7ddd0381e5..487db134b0df3 100644 --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -1015,6 +1015,14 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F, } return false; // No other 'aarch64.sve.tuple.*'. } + + if (Name.starts_with("rev.nxv")) { + // 'aarch64.sve.rev.' + NewFn = Intrinsic::getOrInsertDeclaration( + F->getParent(), Intrinsic::vector_reverse, F->getReturnType()); + return true; + } + return false; // No other 'aarch64.sve.*'. } } diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp index a3aa5e9571657..6b82da140256f 100644 --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -2081,28 +2081,33 @@ Value *NoCFIValue::handleOperandChangeImpl(Value *From, Value *To) { // ConstantPtrAuth *ConstantPtrAuth::get(Constant *Ptr, ConstantInt *Key, - ConstantInt *Disc, Constant *AddrDisc) { - Constant *ArgVec[] = {Ptr, Key, Disc, AddrDisc}; + ConstantInt *Disc, Constant *AddrDisc, + Constant *DeactivationSymbol) { + Constant *ArgVec[] = {Ptr, Key, Disc, AddrDisc, DeactivationSymbol}; ConstantPtrAuthKeyType MapKey(ArgVec); LLVMContextImpl *pImpl = Ptr->getContext().pImpl; return pImpl->ConstantPtrAuths.getOrCreate(Ptr->getType(), MapKey); } ConstantPtrAuth *ConstantPtrAuth::getWithSameSchema(Constant *Pointer) const { - return get(Pointer, getKey(), getDiscriminator(), getAddrDiscriminator()); + return get(Pointer, getKey(), getDiscriminator(), getAddrDiscriminator(), + getDeactivationSymbol()); } ConstantPtrAuth::ConstantPtrAuth(Constant *Ptr, ConstantInt *Key, - ConstantInt *Disc, Constant *AddrDisc) + ConstantInt *Disc, Constant *AddrDisc, + Constant *DeactivationSymbol) : Constant(Ptr->getType(), Value::ConstantPtrAuthVal, AllocMarker) { assert(Ptr->getType()->isPointerTy()); assert(Key->getBitWidth() == 32); assert(Disc->getBitWidth() == 64); assert(AddrDisc->getType()->isPointerTy()); + assert(DeactivationSymbol->getType()->isPointerTy()); setOperand(0, Ptr); setOperand(1, Key); setOperand(2, Disc); setOperand(3, AddrDisc); + setOperand(4, DeactivationSymbol); } /// Remove the constant from the constant table. @@ -2150,6 +2155,11 @@ bool ConstantPtrAuth::hasSpecialAddressDiscriminator(uint64_t Value) const { bool ConstantPtrAuth::isKnownCompatibleWith(const Value *Key, const Value *Discriminator, const DataLayout &DL) const { + // This function may only be validly called to analyze a ptrauth operation + // with no deactivation symbol, so if we have one it isn't compatible. + if (!getDeactivationSymbol()->isNullValue()) + return false; + // If the keys are different, there's no chance for this to be compatible. if (getKey() != Key) return false; diff --git a/llvm/lib/IR/ConstantsContext.h b/llvm/lib/IR/ConstantsContext.h index e3e8d895a63f4..2073e0d42d8e3 100644 --- a/llvm/lib/IR/ConstantsContext.h +++ b/llvm/lib/IR/ConstantsContext.h @@ -539,7 +539,8 @@ struct ConstantPtrAuthKeyType { ConstantPtrAuth *create(TypeClass *Ty) const { return new ConstantPtrAuth(Operands[0], cast(Operands[1]), - cast(Operands[2]), Operands[3]); + cast(Operands[2]), Operands[3], + Operands[4]); } }; diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp index 604730e0d3004..26c4f4ec784cd 100644 --- a/llvm/lib/IR/Core.cpp +++ b/llvm/lib/IR/Core.cpp @@ -1699,7 +1699,9 @@ LLVMValueRef LLVMConstantPtrAuth(LLVMValueRef Ptr, LLVMValueRef Key, LLVMValueRef Disc, LLVMValueRef AddrDisc) { return wrap(ConstantPtrAuth::get( unwrap(Ptr), unwrap(Key), - unwrap(Disc), unwrap(AddrDisc))); + unwrap(Disc), unwrap(AddrDisc), + ConstantPointerNull::get( + cast(unwrap(AddrDisc)->getType())))); } /*-- Opcode mapping */ diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp index cd39970f5111f..85d3690dd8306 100644 --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -620,7 +620,8 @@ bool CallBase::hasReadingOperandBundles() const { // ptrauth) forces a callsite to be at least readonly. return hasOperandBundlesOtherThan({LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi, - LLVMContext::OB_convergencectrl}) && + LLVMContext::OB_convergencectrl, + LLVMContext::OB_deactivation_symbol}) && getIntrinsicID() != Intrinsic::assume; } @@ -628,7 +629,8 @@ bool CallBase::hasClobberingOperandBundles() const { return hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext::OB_funclet, LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi, - LLVMContext::OB_convergencectrl}) && + LLVMContext::OB_convergencectrl, + LLVMContext::OB_deactivation_symbol}) && getIntrinsicID() != Intrinsic::assume; } diff --git a/llvm/lib/IR/LLVMContext.cpp b/llvm/lib/IR/LLVMContext.cpp index 335c210c10e1a..10aba759185a7 100644 --- a/llvm/lib/IR/LLVMContext.cpp +++ b/llvm/lib/IR/LLVMContext.cpp @@ -55,6 +55,8 @@ static StringRef knownBundleName(unsigned BundleTagID) { return "convergencectrl"; case LLVMContext::OB_align: return "align"; + case LLVMContext::OB_deactivation_symbol: + return "deactivation-symbol"; default: llvm_unreachable("unknown bundle id"); } diff --git a/llvm/lib/IR/LLVMContextImpl.cpp b/llvm/lib/IR/LLVMContextImpl.cpp index ca7605ae53453..8f79398b086eb 100644 --- a/llvm/lib/IR/LLVMContextImpl.cpp +++ b/llvm/lib/IR/LLVMContextImpl.cpp @@ -107,6 +107,7 @@ LLVMContextImpl::~LLVMContextImpl() { ArrayConstants.freeConstants(); StructConstants.freeConstants(); VectorConstants.freeConstants(); + ConstantPtrAuths.freeConstants(); InlineAsms.freeConstants(); CAZConstants.clear(); diff --git a/llvm/lib/IR/ReplaceConstant.cpp b/llvm/lib/IR/ReplaceConstant.cpp index b3586b45a23f2..b1864c3dc9eeb 100644 --- a/llvm/lib/IR/ReplaceConstant.cpp +++ b/llvm/lib/IR/ReplaceConstant.cpp @@ -22,9 +22,9 @@ static bool isExpandableUser(User *U) { return isa(U) || isa(U); } -static SmallVector expandUser(BasicBlock::iterator InsertPt, - Constant *C) { - SmallVector NewInsts; +static void expandUser(BasicBlock::iterator InsertPt, Constant *C, + SmallVector &NewInsts) { + NewInsts.clear(); if (auto *CE = dyn_cast(C)) { Instruction *ConstInst = CE->getAsInstruction(); ConstInst->insertBefore(*InsertPt->getParent(), InsertPt); @@ -46,7 +46,6 @@ static SmallVector expandUser(BasicBlock::iterator InsertPt, } else { llvm_unreachable("Not an expandable user"); } - return NewInsts; } bool llvm::convertUsersOfConstantsToInstructions(ArrayRef Consts, @@ -91,6 +90,11 @@ bool llvm::convertUsersOfConstantsToInstructions(ArrayRef Consts, // Replace those expandable operands with instructions bool Changed = false; + // We need to cache the instructions we've already expanded to avoid expanding + // the same constant multiple times in the same basic block, which is + // problematic when the same constant is used in a phi node multiple times. + DenseMap, SmallVector> + ConstantToInstructionMap; while (!InstructionWorklist.empty()) { Instruction *I = InstructionWorklist.pop_back_val(); DebugLoc Loc = I->getDebugLoc(); @@ -105,7 +109,14 @@ bool llvm::convertUsersOfConstantsToInstructions(ArrayRef Consts, if (auto *C = dyn_cast(U.get())) { if (ExpandableUsers.contains(C)) { Changed = true; - auto NewInsts = expandUser(BI, C); + SmallVector &NewInsts = + ConstantToInstructionMap[std::make_pair(C, BI->getParent())]; + // If the cached instruction is after the insertion point, we need to + // create a new one. We can't simply move the cached instruction + // because its operands (also expanded instructions) might not + // dominate the new position. + if (NewInsts.empty() || BI->comesBefore(NewInsts.front())) + expandUser(BI, C, NewInsts); for (auto *NI : NewInsts) NI->setDebugLoc(Loc); InstructionWorklist.insert_range(NewInsts); diff --git a/llvm/lib/IR/RuntimeLibcalls.cpp b/llvm/lib/IR/RuntimeLibcalls.cpp index cbe7a7b9f77f4..a5f842a5fb520 100644 --- a/llvm/lib/IR/RuntimeLibcalls.cpp +++ b/llvm/lib/IR/RuntimeLibcalls.cpp @@ -130,13 +130,23 @@ bool RuntimeLibcallsInfo::darwinHasExp10(const Triple &TT) { } } +/// TODO: There is really no guarantee that sizeof(size_t) is equal to the index +/// size of the default address space. This matches TargetLibraryInfo and should +/// be kept in sync. +static IntegerType *getSizeTType(LLVMContext &Ctx, const DataLayout &DL) { + return DL.getIndexType(Ctx, /*AddressSpace=*/0); +} + std::pair RuntimeLibcallsInfo::getFunctionTy(LLVMContext &Ctx, const Triple &TT, const DataLayout &DL, RTLIB::LibcallImpl LibcallImpl) const { + // TODO: NoCallback probably unsafe in general static constexpr Attribute::AttrKind CommonFnAttrs[] = { Attribute::MustProgress, Attribute::NoCallback, Attribute::NoFree, Attribute::NoSync, Attribute::NoUnwind, Attribute::WillReturn}; + static constexpr Attribute::AttrKind MemoryFnAttrs[] = { + Attribute::MustProgress, Attribute::NoUnwind, Attribute::WillReturn}; static constexpr Attribute::AttrKind CommonPtrArgAttrs[] = { Attribute::NoAlias, Attribute::WriteOnly, Attribute::NonNull}; @@ -182,6 +192,71 @@ RuntimeLibcallsInfo::getFunctionTy(LLVMContext &Ctx, const Triple &TT, return {FunctionType::get(RetTy, {ScalarTy}, false), Attrs}; } + case RTLIB::impl_malloc: + case RTLIB::impl_calloc: { + AttrBuilder FuncAttrBuilder(Ctx); + for (Attribute::AttrKind Attr : MemoryFnAttrs) + FuncAttrBuilder.addAttribute(Attr); + FuncAttrBuilder.addAttribute(Attribute::NoFree); + + AllocFnKind AllocKind = AllocFnKind::Alloc; + if (LibcallImpl == RTLIB::impl_malloc) + AllocKind |= AllocFnKind::Uninitialized; + + // TODO: Set memory attribute + FuncAttrBuilder.addAllocKindAttr(AllocKind); + FuncAttrBuilder.addAttribute("alloc-family", "malloc"); + FuncAttrBuilder.addAllocSizeAttr(0, LibcallImpl == RTLIB::impl_malloc + ? std::nullopt + : std::make_optional(1)); + + AttributeList Attrs; + Attrs = Attrs.addFnAttributes(Ctx, FuncAttrBuilder); + + { + AttrBuilder ArgAttrBuilder(Ctx); + for (Attribute::AttrKind AK : CommonPtrArgAttrs) + ArgAttrBuilder.addAttribute(AK); + + Attrs = Attrs.addRetAttribute(Ctx, Attribute::NoUndef); + Attrs = Attrs.addRetAttribute(Ctx, Attribute::NoAlias); + Attrs = Attrs.addParamAttribute(Ctx, 0, Attribute::NoUndef); + if (LibcallImpl == RTLIB::impl_calloc) + Attrs = Attrs.addParamAttribute(Ctx, 1, Attribute::NoUndef); + } + + IntegerType *SizeT = getSizeTType(Ctx, DL); + PointerType *PtrTy = PointerType::get(Ctx, 0); + SmallVector ArgTys = {SizeT}; + if (LibcallImpl == RTLIB::impl_calloc) + ArgTys.push_back(SizeT); + + return {FunctionType::get(PtrTy, ArgTys, false), Attrs}; + } + case RTLIB::impl_free: { + // TODO: Set memory attribute + AttrBuilder FuncAttrBuilder(Ctx); + for (Attribute::AttrKind Attr : MemoryFnAttrs) + FuncAttrBuilder.addAttribute(Attr); + + FuncAttrBuilder.addAllocKindAttr(AllocFnKind::Free); + FuncAttrBuilder.addAttribute("alloc-family", "malloc"); + + AttributeList Attrs; + Attrs = Attrs.addFnAttributes(Ctx, FuncAttrBuilder); + + { + AttrBuilder ArgAttrBuilder(Ctx); + ArgAttrBuilder.addAttribute(Attribute::NoUndef); + ArgAttrBuilder.addAttribute(Attribute::AllocatedPointer); + ArgAttrBuilder.addCapturesAttr(CaptureInfo::none()); + Attrs = Attrs.addParamAttributes(Ctx, 0, ArgAttrBuilder); + } + + return {FunctionType::get(Type::getVoidTy(Ctx), {PointerType::get(Ctx, 0)}, + false), + Attrs}; + } case RTLIB::impl_sqrtf: case RTLIB::impl_sqrt: { AttrBuilder FuncAttrBuilder(Ctx); diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp index 7cc1980d24c33..a1e14d8f25bf7 100644 --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -2732,6 +2732,14 @@ void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) { Check(CPA->getDiscriminator()->getBitWidth() == 64, "signed ptrauth constant discriminator must be i64 constant integer"); + + Check(CPA->getDeactivationSymbol()->getType()->isPointerTy(), + "signed ptrauth constant deactivation symbol must be a pointer"); + + Check(isa(CPA->getDeactivationSymbol()) || + CPA->getDeactivationSymbol()->isNullValue(), + "signed ptrauth constant deactivation symbol must be a global value " + "or null"); } bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) { diff --git a/llvm/lib/MC/MCELFStreamer.cpp b/llvm/lib/MC/MCELFStreamer.cpp index 973e98dd1fa29..8d6f7bf6e642e 100644 --- a/llvm/lib/MC/MCELFStreamer.cpp +++ b/llvm/lib/MC/MCELFStreamer.cpp @@ -364,7 +364,7 @@ void MCELFStreamer::finishImpl() { } finalizeCGProfile(); - emitFrames(nullptr); + emitFrames(); this->MCObjectStreamer::finishImpl(); } diff --git a/llvm/lib/MC/MCMachOStreamer.cpp b/llvm/lib/MC/MCMachOStreamer.cpp index 2b7a248e6d109..cde1d66127f06 100644 --- a/llvm/lib/MC/MCMachOStreamer.cpp +++ b/llvm/lib/MC/MCMachOStreamer.cpp @@ -422,7 +422,7 @@ void MCMachOStreamer::emitTBSSSymbol(MCSection *Section, MCSymbol *Symbol, } void MCMachOStreamer::finishImpl() { - emitFrames(&getAssembler().getBackend()); + emitFrames(); // We have to set the fragment atom associations so we can relax properly for // Mach-O. diff --git a/llvm/lib/MC/MCObjectFileInfo.cpp b/llvm/lib/MC/MCObjectFileInfo.cpp index b2f500083f5d8..5afe00eee2242 100644 --- a/llvm/lib/MC/MCObjectFileInfo.cpp +++ b/llvm/lib/MC/MCObjectFileInfo.cpp @@ -61,9 +61,6 @@ static bool useCompactUnwind(const Triple &T) { } void MCObjectFileInfo::initMachOMCObjectFileInfo(const Triple &T) { - // MachO - SupportsWeakOmittedEHFrame = false; - EHFrameSection = Ctx->getMachOSection( "__TEXT", "__eh_frame", MachO::S_COALESCED | MachO::S_ATTR_NO_TOC | @@ -1090,7 +1087,6 @@ void MCObjectFileInfo::initMCObjectFileInfo(MCContext &MCCtx, bool PIC, Ctx = &MCCtx; // Common. - SupportsWeakOmittedEHFrame = true; SupportsCompactUnwindWithoutEHFrame = false; OmitDwarfIfHaveCompactUnwind = false; diff --git a/llvm/lib/MC/MCObjectStreamer.cpp b/llvm/lib/MC/MCObjectStreamer.cpp index 701a0836d2c70..94468140a30b9 100644 --- a/llvm/lib/MC/MCObjectStreamer.cpp +++ b/llvm/lib/MC/MCObjectStreamer.cpp @@ -178,10 +178,11 @@ void MCObjectStreamer::reset() { MCStreamer::reset(); } -void MCObjectStreamer::emitFrames(MCAsmBackend *MAB) { +void MCObjectStreamer::emitFrames() { if (!getNumFrameInfos()) return; + auto *MAB = &getAssembler().getBackend(); if (EmitEHFrame) MCDwarfFrameEmitter::Emit(*this, MAB, true); diff --git a/llvm/lib/MC/MCSymbol.cpp b/llvm/lib/MC/MCSymbol.cpp index b86873824cb00..cf44005139abf 100644 --- a/llvm/lib/MC/MCSymbol.cpp +++ b/llvm/lib/MC/MCSymbol.cpp @@ -84,7 +84,21 @@ void MCSymbol::print(raw_ostream &OS, const MCAsmInfo *MAI) const { } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) -LLVM_DUMP_METHOD void MCSymbol::dump() const { - dbgs() << *this; -} +LLVM_DUMP_METHOD void MCSymbol::dump() const { dbgs() << *this; } #endif + +// Determine whether the offset between two labels can change at link time. +// Currently, this function is used only in DWARF info emission logic, where it +// helps generate more optimal debug info when the offset between labels is +// constant at link time. +bool llvm::isRangeRelaxable(const MCSymbol *Begin, const MCSymbol *End) { + assert(Begin && "Range without a begin symbol?"); + assert(End && "Range without an end symbol?"); + for (const auto *Fragment = Begin->getFragment(); + Fragment != End->getFragment(); Fragment = Fragment->getNext()) { + assert(Fragment); + if (Fragment->isLinkerRelaxable()) + return true; + } + return false; +} diff --git a/llvm/lib/MC/MCTargetOptionsCommandFlags.cpp b/llvm/lib/MC/MCTargetOptionsCommandFlags.cpp index ff95ff78fd53a..22494fa11eb2a 100644 --- a/llvm/lib/MC/MCTargetOptionsCommandFlags.cpp +++ b/llvm/lib/MC/MCTargetOptionsCommandFlags.cpp @@ -24,6 +24,13 @@ using namespace llvm; return *NAME##View; \ } +#define MCSTROPT(NAME) \ + static cl::opt *NAME##View; \ + StringRef llvm::mc::get##NAME() { \ + assert(NAME##View && "RegisterMCTargetOptionsFlags not created."); \ + return *NAME##View; \ + } + #define MCOPT_EXP(TY, NAME) \ MCOPT(TY, NAME) \ std::optional llvm::mc::getExplicit##NAME() { \ @@ -52,8 +59,8 @@ MCOPT(bool, Crel) MCOPT(bool, ImplicitMapSyms) MCOPT(bool, X86RelaxRelocations) MCOPT(bool, X86Sse2Avx) -MCOPT(std::string, ABIName) -MCOPT(std::string, AsSecureLogFile) +MCSTROPT(ABIName) +MCSTROPT(AsSecureLogFile) llvm::mc::RegisterMCTargetOptionsFlags::RegisterMCTargetOptionsFlags() { #define MCBINDOPT(NAME) \ diff --git a/llvm/lib/MC/MCWasmStreamer.cpp b/llvm/lib/MC/MCWasmStreamer.cpp index 070b3d9f8d2c8..1d3cf38d4bfdb 100644 --- a/llvm/lib/MC/MCWasmStreamer.cpp +++ b/llvm/lib/MC/MCWasmStreamer.cpp @@ -147,7 +147,7 @@ void MCWasmStreamer::emitIdent(StringRef IdentString) { } void MCWasmStreamer::finishImpl() { - emitFrames(nullptr); + emitFrames(); this->MCObjectStreamer::finishImpl(); } diff --git a/llvm/lib/MC/MCWin64EH.cpp b/llvm/lib/MC/MCWin64EH.cpp index 6d146f6cedd6e..a7ce8d527250f 100644 --- a/llvm/lib/MC/MCWin64EH.cpp +++ b/llvm/lib/MC/MCWin64EH.cpp @@ -673,7 +673,7 @@ static void ARM64EmitUnwindCode(MCStreamer &streamer, break; case Win64EH::UOP_SaveFPLRX: b = 0x80; - b |= ((inst.Offset - 1) >> 3) & 0x3F; + b |= ((inst.Offset >> 3) - 1) & 0x3F; streamer.emitInt8(b); break; case Win64EH::UOP_SaveFPLR: diff --git a/llvm/lib/Object/Archive.cpp b/llvm/lib/Object/Archive.cpp index 861c284253f7a..8e4a5ea5fc612 100644 --- a/llvm/lib/Object/Archive.cpp +++ b/llvm/lib/Object/Archive.cpp @@ -582,7 +582,8 @@ Expected Archive::Child::getBuffer() const { if (!FullNameOrErr) return FullNameOrErr.takeError(); const std::string &FullName = *FullNameOrErr; - ErrorOr> Buf = MemoryBuffer::getFile(FullName); + ErrorOr> Buf = + MemoryBuffer::getFile(FullName, false, /*RequiresNullTerminator=*/false); if (std::error_code EC = Buf.getError()) return errorCodeToError(EC); Parent->ThinBuffers.push_back(std::move(*Buf)); diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp index 0d190ea448931..f5281ea69b512 100644 --- a/llvm/lib/Passes/PassBuilder.cpp +++ b/llvm/lib/Passes/PassBuilder.cpp @@ -1590,24 +1590,31 @@ parseBoundsCheckingOptions(StringRef Params) { Options.Rt = { /*MinRuntime=*/false, /*MayReturn=*/true, + /*HandlerPreserveAllRegs=*/false, }; } else if (ParamName == "rt-abort") { Options.Rt = { /*MinRuntime=*/false, /*MayReturn=*/false, + /*HandlerPreserveAllRegs=*/false, }; } else if (ParamName == "min-rt") { Options.Rt = { /*MinRuntime=*/true, /*MayReturn=*/true, + /*HandlerPreserveAllRegs=*/false, }; } else if (ParamName == "min-rt-abort") { Options.Rt = { /*MinRuntime=*/true, /*MayReturn=*/false, + /*HandlerPreserveAllRegs=*/false, }; } else if (ParamName == "merge") { Options.Merge = true; + } else if (ParamName == "handler-preserve-all-regs") { + if (Options.Rt) + Options.Rt->HandlerPreserveAllRegs = true; } else { StringRef ParamEQ; StringRef Val; diff --git a/llvm/lib/SandboxIR/Constant.cpp b/llvm/lib/SandboxIR/Constant.cpp index 9de88ef2cf0a0..eb14797af081c 100644 --- a/llvm/lib/SandboxIR/Constant.cpp +++ b/llvm/lib/SandboxIR/Constant.cpp @@ -412,10 +412,12 @@ PointerType *NoCFIValue::getType() const { } ConstantPtrAuth *ConstantPtrAuth::get(Constant *Ptr, ConstantInt *Key, - ConstantInt *Disc, Constant *AddrDisc) { + ConstantInt *Disc, Constant *AddrDisc, + Constant *DeactivationSymbol) { auto *LLVMC = llvm::ConstantPtrAuth::get( cast(Ptr->Val), cast(Key->Val), - cast(Disc->Val), cast(AddrDisc->Val)); + cast(Disc->Val), cast(AddrDisc->Val), + cast(DeactivationSymbol->Val)); return cast(Ptr->getContext().getOrCreateConstant(LLVMC)); } @@ -439,6 +441,11 @@ Constant *ConstantPtrAuth::getAddrDiscriminator() const { cast(Val)->getAddrDiscriminator()); } +Constant *ConstantPtrAuth::getDeactivationSymbol() const { + return Ctx.getOrCreateConstant( + cast(Val)->getDeactivationSymbol()); +} + ConstantPtrAuth *ConstantPtrAuth::getWithSameSchema(Constant *Pointer) const { auto *LLVMC = cast(Val)->getWithSameSchema( cast(Pointer->Val)); diff --git a/llvm/lib/Support/AllocToken.cpp b/llvm/lib/Support/AllocToken.cpp index daa40d4e9dcc6..cabe52189c4bb 100644 --- a/llvm/lib/Support/AllocToken.cpp +++ b/llvm/lib/Support/AllocToken.cpp @@ -28,6 +28,20 @@ llvm::getAllocTokenModeFromString(StringRef Name) { .Default(std::nullopt); } +StringRef llvm::getAllocTokenModeAsString(AllocTokenMode Mode) { + switch (Mode) { + case AllocTokenMode::Increment: + return "increment"; + case AllocTokenMode::Random: + return "random"; + case AllocTokenMode::TypeHash: + return "typehash"; + case AllocTokenMode::TypeHashPointerSplit: + return "typehashpointersplit"; + } + llvm_unreachable("Unknown AllocTokenMode"); +} + static uint64_t getStableHash(const AllocTokenMetadata &Metadata, uint64_t MaxTokens) { return getStableSipHash(Metadata.TypeName) % MaxTokens; diff --git a/llvm/lib/Target/AArch64/AArch64.td b/llvm/lib/Target/AArch64/AArch64.td index 0f457c2cab61b..1a4367b84353b 100644 --- a/llvm/lib/Target/AArch64/AArch64.td +++ b/llvm/lib/Target/AArch64/AArch64.td @@ -40,6 +40,8 @@ include "AArch64SchedPredExynos.td" include "AArch64SchedPredNeoverse.td" include "AArch64Combine.td" +defm : RemapAllTargetPseudoPointerOperands; + def AArch64InstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp index 5da6181ba36dd..8267414e78955 100644 --- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp +++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp @@ -49,12 +49,14 @@ #include "llvm/IR/Module.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCInstBuilder.h" #include "llvm/MC/MCSectionELF.h" #include "llvm/MC/MCSectionMachO.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" +#include "llvm/MC/MCValue.h" #include "llvm/MC/TargetRegistry.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CommandLine.h" @@ -95,6 +97,7 @@ class AArch64AsmPrinter : public AsmPrinter { bool EnableImportCallOptimization = false; DenseMap>> SectionToImportedFunctionCalls; + unsigned PAuthIFuncNextUniqueID = 1; public: static char ID; @@ -173,7 +176,12 @@ class AArch64AsmPrinter : public AsmPrinter { const MachineOperand *AUTAddrDisc, Register Scratch, std::optional PACKey, - uint64_t PACDisc, Register PACAddrDisc); + uint64_t PACDisc, Register PACAddrDisc, Value *DS); + + // Emit R_AARCH64_PATCHINST, the deactivation symbol relocation. Returns true + // if no instruction should be emitted because the deactivation symbol is + // defined in the current module so this function emitted a NOP instead. + bool emitDeactivationSymbolRelocation(Value *DS); // Emit the sequence for PAC. void emitPtrauthSign(const MachineInstr *MI); @@ -211,6 +219,10 @@ class AArch64AsmPrinter : public AsmPrinter { // authenticating) void LowerLOADgotAUTH(const MachineInstr &MI); + const MCExpr *emitPAuthRelocationAsIRelative( + const MCExpr *Target, uint16_t Disc, AArch64PACKey::ID KeyID, + bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr); + /// tblgen'erated driver function for lowering simple MI->MC /// pseudo instructions. bool lowerPseudoInstExpansion(const MachineInstr *MI, MCInst &Inst); @@ -2104,11 +2116,31 @@ void AArch64AsmPrinter::emitPtrauthTailCallHardening(const MachineInstr *TC) { LRCheckMethod); } +bool AArch64AsmPrinter::emitDeactivationSymbolRelocation(Value *DS) { + if (!DS) + return false; + + if (isa(DS)) { + // Just emit the nop directly. + EmitToStreamer(MCInstBuilder(AArch64::HINT).addImm(0)); + return true; + } + MCSymbol *Dot = OutContext.createTempSymbol(); + OutStreamer->emitLabel(Dot); + const MCExpr *DeactDotExpr = MCSymbolRefExpr::create(Dot, OutContext); + + const MCExpr *DSExpr = MCSymbolRefExpr::create( + OutContext.getOrCreateSymbol(DS->getName()), OutContext); + OutStreamer->emitRelocDirective(*DeactDotExpr, "R_AARCH64_PATCHINST", DSExpr, + SMLoc()); + return false; +} + void AArch64AsmPrinter::emitPtrauthAuthResign( Register AUTVal, AArch64PACKey::ID AUTKey, uint64_t AUTDisc, const MachineOperand *AUTAddrDisc, Register Scratch, std::optional PACKey, uint64_t PACDisc, - Register PACAddrDisc) { + Register PACAddrDisc, Value *DS) { const bool IsAUTPAC = PACKey.has_value(); // We expand AUT/AUTPAC into a sequence of the form @@ -2155,15 +2187,17 @@ void AArch64AsmPrinter::emitPtrauthAuthResign( bool AUTZero = AUTDiscReg == AArch64::XZR; unsigned AUTOpc = getAUTOpcodeForKey(AUTKey, AUTZero); - // autiza x16 ; if AUTZero - // autia x16, x17 ; if !AUTZero - MCInst AUTInst; - AUTInst.setOpcode(AUTOpc); - AUTInst.addOperand(MCOperand::createReg(AUTVal)); - AUTInst.addOperand(MCOperand::createReg(AUTVal)); - if (!AUTZero) - AUTInst.addOperand(MCOperand::createReg(AUTDiscReg)); - EmitToStreamer(*OutStreamer, AUTInst); + if (!emitDeactivationSymbolRelocation(DS)) { + // autiza x16 ; if AUTZero + // autia x16, x17 ; if !AUTZero + MCInst AUTInst; + AUTInst.setOpcode(AUTOpc); + AUTInst.addOperand(MCOperand::createReg(AUTVal)); + AUTInst.addOperand(MCOperand::createReg(AUTVal)); + if (!AUTZero) + AUTInst.addOperand(MCOperand::createReg(AUTDiscReg)); + EmitToStreamer(*OutStreamer, AUTInst); + } // Unchecked or checked-but-non-trapping AUT is just an "AUT": we're done. if (!IsAUTPAC && (!ShouldCheck || !ShouldTrap)) @@ -2227,6 +2261,9 @@ void AArch64AsmPrinter::emitPtrauthSign(const MachineInstr *MI) { bool IsZeroDisc = DiscReg == AArch64::XZR; unsigned Opc = getPACOpcodeForKey(Key, IsZeroDisc); + if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol())) + return; + // paciza x16 ; if IsZeroDisc // pacia x16, x17 ; if !IsZeroDisc MCInst PACInst; @@ -2299,6 +2336,214 @@ void AArch64AsmPrinter::emitPtrauthBranch(const MachineInstr *MI) { EmitToStreamer(*OutStreamer, BRInst); } +static void emitAddress(MCStreamer &Streamer, MCRegister Reg, + const MCExpr *Expr, bool DSOLocal, + const MCSubtargetInfo &STI) { + MCValue Val; + if (!Expr->evaluateAsRelocatable(Val, nullptr)) + report_fatal_error("emitAddress could not evaluate"); + if (DSOLocal) { + Streamer.emitInstruction( + MCInstBuilder(AArch64::ADRP) + .addReg(Reg) + .addExpr(MCSpecifierExpr::create(Expr, AArch64::S_ABS_PAGE, + Streamer.getContext())), + STI); + Streamer.emitInstruction( + MCInstBuilder(AArch64::ADDXri) + .addReg(Reg) + .addReg(Reg) + .addExpr(MCSpecifierExpr::create(Expr, AArch64::S_LO12, + Streamer.getContext())) + .addImm(0), + STI); + } else { + auto *SymRef = + MCSymbolRefExpr::create(Val.getAddSym(), Streamer.getContext()); + Streamer.emitInstruction( + MCInstBuilder(AArch64::ADRP) + .addReg(Reg) + .addExpr(MCSpecifierExpr::create(SymRef, AArch64::S_GOT_PAGE, + Streamer.getContext())), + STI); + Streamer.emitInstruction( + MCInstBuilder(AArch64::LDRXui) + .addReg(Reg) + .addReg(Reg) + .addExpr(MCSpecifierExpr::create(SymRef, AArch64::S_GOT_LO12, + Streamer.getContext())), + STI); + if (Val.getConstant()) + Streamer.emitInstruction(MCInstBuilder(AArch64::ADDXri) + .addReg(Reg) + .addReg(Reg) + .addImm(Val.getConstant()) + .addImm(0), + STI); + } +} + +static bool targetSupportsPAuthRelocation(const Triple &TT, + const MCExpr *Target, + const MCExpr *DSExpr) { + // No released version of glibc supports PAuth relocations. + if (TT.isOSGlibc()) + return false; + + // We emit PAuth constants as IRELATIVE relocations in cases where the + // constant cannot be represented as a PAuth relocation: + // 1) There is a deactivation symbol. + // 2) The signed value is not a symbol. + return !DSExpr && !isa(Target); +} + +static bool targetSupportsIRelativeRelocation(const Triple &TT) { + // IFUNCs are ELF-only. + if (!TT.isOSBinFormatELF()) + return false; + + // musl doesn't support IFUNCs. + if (TT.isMusl()) + return false; + + return true; +} + +// Emit an ifunc resolver that returns a signed pointer to the specified target, +// and return a FUNCINIT reference to the resolver. In the linked binary, this +// function becomes the target of an IRELATIVE relocation. This resolver is used +// to relocate signed pointers in global variable initializers in special cases +// where the standard R_AARCH64_AUTH_ABS64 relocation would not work. +// +// Example (signed null pointer, not address discriminated): +// +// .8byte .Lpauth_ifunc0 +// .pushsection .text.startup,"ax",@progbits +// .Lpauth_ifunc0: +// mov x0, #0 +// mov x1, #12345 +// b __emupac_pacda +// +// Example (signed null pointer, address discriminated): +// +// .Ltmp: +// .8byte .Lpauth_ifunc0 +// .pushsection .text.startup,"ax",@progbits +// .Lpauth_ifunc0: +// mov x0, #0 +// adrp x1, .Ltmp +// add x1, x1, :lo12:.Ltmp +// b __emupac_pacda +// .popsection +// +// Example (signed pointer to symbol, not address discriminated): +// +// .Ltmp: +// .8byte .Lpauth_ifunc0 +// .pushsection .text.startup,"ax",@progbits +// .Lpauth_ifunc0: +// adrp x0, symbol +// add x0, x0, :lo12:symbol +// mov x1, #12345 +// b __emupac_pacda +// .popsection +// +// Example (signed null pointer, not address discriminated, with deactivation +// symbol ds): +// +// .8byte .Lpauth_ifunc0 +// .pushsection .text.startup,"ax",@progbits +// .Lpauth_ifunc0: +// mov x0, #0 +// mov x1, #12345 +// .reloc ., R_AARCH64_PATCHINST, ds +// b __emupac_pacda +// ret +// .popsection +const MCExpr *AArch64AsmPrinter::emitPAuthRelocationAsIRelative( + const MCExpr *Target, uint16_t Disc, AArch64PACKey::ID KeyID, + bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr) { + const Triple &TT = TM.getTargetTriple(); + + // We only emit an IRELATIVE relocation if the target supports IRELATIVE and + // does not support the kind of PAuth relocation that we are trying to emit. + if (targetSupportsPAuthRelocation(TT, Target, DSExpr) || + !targetSupportsIRelativeRelocation(TT)) + return nullptr; + + // For now, only the DA key is supported. + if (KeyID != AArch64PACKey::DA) + return nullptr; + + std::unique_ptr STI( + TM.getTarget().createMCSubtargetInfo(TT, "", "")); + assert(STI && "Unable to create subtarget info"); + this->STI = static_cast(&*STI); + + MCSymbol *Place = OutStreamer->getContext().createTempSymbol(); + OutStreamer->emitLabel(Place); + OutStreamer->pushSection(); + + OutStreamer->switchSection(OutStreamer->getContext().getELFSection( + ".text.startup", ELF::SHT_PROGBITS, ELF::SHF_ALLOC | ELF::SHF_EXECINSTR, + 0, "", true, PAuthIFuncNextUniqueID++, nullptr)); + + MCSymbol *IRelativeSym = + OutStreamer->getContext().createLinkerPrivateSymbol("pauth_ifunc"); + OutStreamer->emitLabel(IRelativeSym); + if (isa(Target)) { + OutStreamer->emitInstruction(MCInstBuilder(AArch64::MOVZXi) + .addReg(AArch64::X0) + .addExpr(Target) + .addImm(0), + *STI); + } else { + emitAddress(*OutStreamer, AArch64::X0, Target, IsDSOLocal, *STI); + } + if (HasAddressDiversity) { + auto *PlacePlusDisc = MCBinaryExpr::createAdd( + MCSymbolRefExpr::create(Place, OutStreamer->getContext()), + MCConstantExpr::create(static_cast(Disc), + OutStreamer->getContext()), + OutStreamer->getContext()); + emitAddress(*OutStreamer, AArch64::X1, PlacePlusDisc, /*IsDSOLocal=*/true, + *STI); + } else { + emitMOVZ(AArch64::X1, Disc, 0); + } + + if (DSExpr) { + MCSymbol *PrePACInst = OutStreamer->getContext().createTempSymbol(); + OutStreamer->emitLabel(PrePACInst); + + auto *PrePACInstExpr = + MCSymbolRefExpr::create(PrePACInst, OutStreamer->getContext()); + OutStreamer->emitRelocDirective(*PrePACInstExpr, "R_AARCH64_PATCHINST", + DSExpr, SMLoc()); + } + + // We don't know the subtarget because this is being emitted for a global + // initializer. Because the performance of IFUNC resolvers is unimportant, we + // always call the EmuPAC runtime, which will end up using the PAC instruction + // if the target supports PAC. + MCSymbol *EmuPAC = + OutStreamer->getContext().getOrCreateSymbol("__emupac_pacda"); + const MCSymbolRefExpr *EmuPACRef = + MCSymbolRefExpr::create(EmuPAC, OutStreamer->getContext()); + OutStreamer->emitInstruction(MCInstBuilder(AArch64::B).addExpr(EmuPACRef), + *STI); + + // We need a RET despite the above tail call because the deactivation symbol + // may replace the tail call with a NOP. + if (DSExpr) + OutStreamer->emitInstruction( + MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI); + OutStreamer->popSection(); + + return MCSymbolRefExpr::create(IRelativeSym, AArch64::S_FUNCINIT, + OutStreamer->getContext()); +} + const MCExpr * AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) { MCContext &Ctx = OutContext; @@ -2310,22 +2555,26 @@ AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) { auto *BaseGVB = dyn_cast(BaseGV); - // If we can't understand the referenced ConstantExpr, there's nothing - // else we can do: emit an error. - if (!BaseGVB) { - BaseGV->getContext().emitError( - "cannot resolve target base/addend of ptrauth constant"); - return nullptr; + const MCExpr *Sym; + if (BaseGVB) { + // If there is an addend, turn that into the appropriate MCExpr. + Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx); + if (Offset.sgt(0)) + Sym = MCBinaryExpr::createAdd( + Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx); + else if (Offset.slt(0)) + Sym = MCBinaryExpr::createSub( + Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx); + } else { + Sym = MCConstantExpr::create(Offset.getSExtValue(), Ctx); } - // If there is an addend, turn that into the appropriate MCExpr. - const MCExpr *Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx); - if (Offset.sgt(0)) - Sym = MCBinaryExpr::createAdd( - Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx); - else if (Offset.slt(0)) - Sym = MCBinaryExpr::createSub( - Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx); + const MCExpr *DSExpr = nullptr; + if (auto *DS = dyn_cast(CPA.getDeactivationSymbol())) { + if (isa(DS)) + return Sym; + DSExpr = MCSymbolRefExpr::create(getSymbol(DS), Ctx); + } uint64_t KeyID = CPA.getKey()->getZExtValue(); // We later rely on valid KeyID value in AArch64PACKeyIDToString call from @@ -2344,6 +2593,16 @@ AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) { Disc = 0; } + // Check if we need to represent this with an IRELATIVE and emit it if so. + if (auto *IFuncSym = emitPAuthRelocationAsIRelative( + Sym, Disc, AArch64PACKey::ID(KeyID), CPA.hasAddressDiscriminator(), + BaseGVB && BaseGVB->isDSOLocal(), DSExpr)) + return IFuncSym; + + if (DSExpr) + report_fatal_error("deactivation symbols unsupported in constant " + "expressions on this target"); + // Finally build the complete @AUTH expr. return AArch64AuthMCExpr::create(Sym, Disc, AArch64PACKey::ID(KeyID), CPA.hasAddressDiscriminator(), Ctx); @@ -2948,17 +3207,18 @@ void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) { } case AArch64::AUTx16x17: - emitPtrauthAuthResign(AArch64::X16, - (AArch64PACKey::ID)MI->getOperand(0).getImm(), - MI->getOperand(1).getImm(), &MI->getOperand(2), - AArch64::X17, std::nullopt, 0, 0); + emitPtrauthAuthResign( + AArch64::X16, (AArch64PACKey::ID)MI->getOperand(0).getImm(), + MI->getOperand(1).getImm(), &MI->getOperand(2), AArch64::X17, + std::nullopt, 0, 0, MI->getDeactivationSymbol()); return; case AArch64::AUTxMxN: emitPtrauthAuthResign(MI->getOperand(0).getReg(), (AArch64PACKey::ID)MI->getOperand(3).getImm(), MI->getOperand(4).getImm(), &MI->getOperand(5), - MI->getOperand(1).getReg(), std::nullopt, 0, 0); + MI->getOperand(1).getReg(), std::nullopt, 0, 0, + MI->getDeactivationSymbol()); return; case AArch64::AUTPAC: @@ -2966,7 +3226,8 @@ void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) { AArch64::X16, (AArch64PACKey::ID)MI->getOperand(0).getImm(), MI->getOperand(1).getImm(), &MI->getOperand(2), AArch64::X17, (AArch64PACKey::ID)MI->getOperand(3).getImm(), - MI->getOperand(4).getImm(), MI->getOperand(5).getReg()); + MI->getOperand(4).getImm(), MI->getOperand(5).getReg(), + MI->getDeactivationSymbol()); return; case AArch64::PAC: @@ -3447,6 +3708,9 @@ void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) { return; } + if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol())) + return; + // Finally, do the automated lowerings for everything else. MCInst TmpInst; MCInstLowering.Lower(MI, TmpInst); diff --git a/llvm/lib/Target/AArch64/AArch64Features.td b/llvm/lib/Target/AArch64/AArch64Features.td index 72ff8613f01e7..066724bea92c9 100644 --- a/llvm/lib/Target/AArch64/AArch64Features.td +++ b/llvm/lib/Target/AArch64/AArch64Features.td @@ -894,6 +894,11 @@ def FeatureUseFixedOverScalableIfEqualCost : SubtargetFeature<"use-fixed-over-sc "UseFixedOverScalableIfEqualCost", "true", "Prefer fixed width loop vectorization over scalable if the cost-model assigns equal costs">; +def FeatureDisableMaximizeScalableBandwidth : SubtargetFeature< "disable-maximize-scalable-bandwidth", + "DisableMaximizeScalableBandwidth", "true", + "Determine the maximum scalable vector length for a loop by the " + "largest scalar type rather than the smallest">; + // For performance reasons we prefer to use ldapr to ldapur on certain cores. def FeatureAvoidLDAPUR : SubtargetFeature<"avoid-ldapur", "AvoidLDAPUR", "true", "Prefer add+ldapr to offset ldapur">; diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 08466667c0fa5..b721c1f533726 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -1557,7 +1557,10 @@ void AArch64DAGToDAGISel::SelectPtrauthAuth(SDNode *N) { extractPtrauthBlendDiscriminators(AUTDisc, CurDAG); if (!Subtarget->isX16X17Safer()) { - SDValue Ops[] = {Val, AUTKey, AUTConstDisc, AUTAddrDisc}; + std::vector Ops = {Val, AUTKey, AUTConstDisc, AUTAddrDisc}; + // Copy deactivation symbol if present. + if (N->getNumOperands() > 4) + Ops.push_back(N->getOperand(4)); SDNode *AUT = CurDAG->getMachineNode(AArch64::AUTxMxN, DL, MVT::i64, MVT::i64, Ops); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 83ce39fa314d1..6072fd9d8f242 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1783,9 +1783,13 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, setOperationAction(ISD::VECTOR_DEINTERLEAVE, VT, Custom); setOperationAction(ISD::VECTOR_INTERLEAVE, VT, Custom); setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); + } - if (Subtarget->hasSVEB16B16() && - Subtarget->isNonStreamingSVEorSME2Available()) { + if (Subtarget->hasSVEB16B16() && + Subtarget->isNonStreamingSVEorSME2Available()) { + // Note: Use SVE for bfloat16 operations when +sve-b16b16 is available. + for (auto VT : {MVT::v4bf16, MVT::v8bf16, MVT::nxv2bf16, MVT::nxv4bf16, + MVT::nxv8bf16}) { setOperationAction(ISD::FADD, VT, Custom); setOperationAction(ISD::FMA, VT, Custom); setOperationAction(ISD::FMAXIMUM, VT, Custom); @@ -1983,10 +1987,15 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); // We can lower types that have elements to compact. - for (auto VT : - {MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64, MVT::nxv2f32, - MVT::nxv2f64, MVT::nxv4i8, MVT::nxv4i16, MVT::nxv4i32, MVT::nxv4f32}) + for (auto VT : {MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64, + MVT::nxv2f32, MVT::nxv2f64, MVT::nxv4i8, MVT::nxv4i16, + MVT::nxv4i32, MVT::nxv4f32}) { setOperationAction(ISD::VECTOR_COMPRESS, VT, Custom); + // Use a custom lowering for masked stores that could be a supported + // compressing store. Note: These types still use the normal (Legal) + // lowering for non-compressing masked stores. + setOperationAction(ISD::MSTORE, VT, Custom); + } // If we have SVE, we can use SVE logic for legal (or smaller than legal) // NEON vectors in the lowest bits of the SVE register. @@ -5795,8 +5804,10 @@ SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { if (VT.is64BitVector()) { if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR && isNullConstant(N0.getOperand(1)) && + N0.getOperand(0).getValueType().is128BitVector() && N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && - isNullConstant(N1.getOperand(1))) { + isNullConstant(N1.getOperand(1)) && + N1.getOperand(0).getValueType().is128BitVector()) { N0 = N0.getOperand(0); N1 = N1.getOperand(0); VT = N0.getValueType(); @@ -6442,9 +6453,6 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, case Intrinsic::aarch64_sve_lastb: return DAG.getNode(AArch64ISD::LASTB, DL, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); - case Intrinsic::aarch64_sve_rev: - return DAG.getNode(ISD::VECTOR_REVERSE, DL, Op.getValueType(), - Op.getOperand(1)); case Intrinsic::aarch64_sve_tbl: return DAG.getNode(AArch64ISD::TBL, DL, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); @@ -7932,7 +7940,7 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op, case ISD::STORE: return LowerSTORE(Op, DAG); case ISD::MSTORE: - return LowerFixedLengthVectorMStoreToSVE(Op, DAG); + return LowerMSTORE(Op, DAG); case ISD::MGATHER: return LowerMGATHER(Op, DAG); case ISD::MSCATTER: @@ -10203,6 +10211,9 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, if (InGlue.getNode()) Ops.push_back(InGlue); + if (CLI.DeactivationSymbol) + Ops.push_back(DAG.getDeactivationSymbol(CLI.DeactivationSymbol)); + // If we're doing a tall call, use a TC_RETURN here rather than an // actual call instruction. if (IsTailCall) { @@ -11735,7 +11746,12 @@ SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { } if (LHS.getValueType().isInteger()) { - + if (Subtarget->hasCSSC() && CC == ISD::SETNE && isNullConstant(RHS)) { + SDValue One = DAG.getConstant(1, DL, LHS.getValueType()); + SDValue UMin = DAG.getNode(ISD::UMIN, DL, LHS.getValueType(), LHS, One); + SDValue Res = DAG.getZExtOrTrunc(UMin, DL, VT); + return IsStrict ? DAG.getMergeValues({Res, Chain}, DL) : Res; + } simplifySetCCIntoEq(CC, LHS, RHS, DAG, DL); SDValue CCVal; @@ -14802,9 +14818,11 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, } unsigned WhichResult; - if (isZIPMask(ShuffleMask, NumElts, WhichResult)) { + unsigned OperandOrder; + if (isZIPMask(ShuffleMask, NumElts, WhichResult, OperandOrder)) { unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2; - return DAG.getNode(Opc, DL, V1.getValueType(), V1, V2); + return DAG.getNode(Opc, DL, V1.getValueType(), OperandOrder == 0 ? V1 : V2, + OperandOrder == 0 ? V2 : V1); } if (isUZPMask(ShuffleMask, NumElts, WhichResult)) { unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; @@ -16526,7 +16544,7 @@ bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef M, EVT VT) const { isSingletonEXTMask(M, VT, DummyUnsigned) || isTRNMask(M, NumElts, DummyUnsigned) || isUZPMask(M, NumElts, DummyUnsigned) || - isZIPMask(M, NumElts, DummyUnsigned) || + isZIPMask(M, NumElts, DummyUnsigned, DummyUnsigned) || isTRN_v_undef_Mask(M, VT, DummyUnsigned) || isUZP_v_undef_Mask(M, VT, DummyUnsigned) || isZIP_v_undef_Mask(M, VT, DummyUnsigned) || @@ -22586,6 +22604,38 @@ static SDValue performSubWithBorrowCombine(SDNode *N, SelectionDAG &DAG) { Flags); } +// add(trunc(ashr(A, C)), trunc(lshr(A, BW-1))), with C >= BW +// -> +// X = trunc(ashr(A, C)); add(x, lshr(X, BW-1) +// The original converts into ashr+lshr+xtn+xtn+add. The second becomes +// ashr+xtn+usra. The first form has less total latency due to more parallelism, +// but more micro-ops and seems to be slower in practice. +static SDValue performAddTruncShiftCombine(SDNode *N, SelectionDAG &DAG) { + using namespace llvm::SDPatternMatch; + EVT VT = N->getValueType(0); + if (VT != MVT::v2i32 && VT != MVT::v4i16 && VT != MVT::v8i8) + return SDValue(); + + SDValue AShr, LShr; + if (!sd_match(N, m_Add(m_Trunc(m_Value(AShr)), m_Trunc(m_Value(LShr))))) + return SDValue(); + if (AShr.getOpcode() != AArch64ISD::VASHR) + std::swap(AShr, LShr); + if (AShr.getOpcode() != AArch64ISD::VASHR || + LShr.getOpcode() != AArch64ISD::VLSHR || + AShr.getOperand(0) != LShr.getOperand(0) || + AShr.getConstantOperandVal(1) < VT.getScalarSizeInBits() || + LShr.getConstantOperandVal(1) != VT.getScalarSizeInBits() * 2 - 1) + return SDValue(); + + SDLoc DL(N); + SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, AShr); + SDValue Shift = DAG.getNode( + AArch64ISD::VLSHR, DL, VT, Trunc, + DAG.getTargetConstant(VT.getScalarSizeInBits() - 1, DL, MVT::i32)); + return DAG.getNode(ISD::ADD, DL, VT, Trunc, Shift); +} + static SDValue performAddSubCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { // Try to change sum of two reductions. @@ -22609,6 +22659,8 @@ static SDValue performAddSubCombine(SDNode *N, return Val; if (SDValue Val = performSubWithBorrowCombine(N, DCI.DAG)) return Val; + if (SDValue Val = performAddTruncShiftCombine(N, DCI.DAG)) + return Val; if (SDValue Val = performExtBinopLoadFold(N, DCI.DAG)) return Val; @@ -30391,6 +30443,43 @@ SDValue AArch64TargetLowering::LowerFixedLengthVectorStoreToSVE( Store->isTruncatingStore()); } +SDValue AArch64TargetLowering::LowerMSTORE(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + auto *Store = cast(Op); + EVT VT = Store->getValue().getValueType(); + if (VT.isFixedLengthVector()) + return LowerFixedLengthVectorMStoreToSVE(Op, DAG); + + if (!Store->isCompressingStore()) + return SDValue(); + + EVT MaskVT = Store->getMask().getValueType(); + EVT MaskExtVT = getPromotedVTForPredicate(MaskVT); + EVT MaskReduceVT = MaskExtVT.getScalarType(); + SDValue Zero = DAG.getConstant(0, DL, MVT::i64); + + SDValue MaskExt = + DAG.getNode(ISD::ZERO_EXTEND, DL, MaskExtVT, Store->getMask()); + SDValue CntActive = + DAG.getNode(ISD::VECREDUCE_ADD, DL, MaskReduceVT, MaskExt); + if (MaskReduceVT != MVT::i64) + CntActive = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, CntActive); + + SDValue CompressedValue = + DAG.getNode(ISD::VECTOR_COMPRESS, DL, VT, Store->getValue(), + Store->getMask(), DAG.getPOISON(VT)); + SDValue CompressedMask = + DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, DL, MaskVT, Zero, CntActive); + + return DAG.getMaskedStore(Store->getChain(), DL, CompressedValue, + Store->getBasePtr(), Store->getOffset(), + CompressedMask, Store->getMemoryVT(), + Store->getMemOperand(), Store->getAddressingMode(), + Store->isTruncatingStore(), + /*isCompressing=*/false); +} + SDValue AArch64TargetLowering::LowerFixedLengthVectorMStoreToSVE( SDValue Op, SelectionDAG &DAG) const { auto *Store = cast(Op); @@ -30405,7 +30494,8 @@ SDValue AArch64TargetLowering::LowerFixedLengthVectorMStoreToSVE( return DAG.getMaskedStore( Store->getChain(), DL, NewValue, Store->getBasePtr(), Store->getOffset(), Mask, Store->getMemoryVT(), Store->getMemOperand(), - Store->getAddressingMode(), Store->isTruncatingStore()); + Store->getAddressingMode(), Store->isTruncatingStore(), + Store->isCompressingStore()); } SDValue AArch64TargetLowering::LowerFixedLengthVectorIntDivideToSVE( @@ -31539,10 +31629,15 @@ SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE( } unsigned WhichResult; - if (isZIPMask(ShuffleMask, VT.getVectorNumElements(), WhichResult) && + unsigned OperandOrder; + if (isZIPMask(ShuffleMask, VT.getVectorNumElements(), WhichResult, + OperandOrder) && WhichResult == 0) return convertFromScalableVector( - DAG, VT, DAG.getNode(AArch64ISD::ZIP1, DL, ContainerVT, Op1, Op2)); + DAG, VT, + DAG.getNode(AArch64ISD::ZIP1, DL, ContainerVT, + OperandOrder == 0 ? Op1 : Op2, + OperandOrder == 0 ? Op2 : Op1)); if (isTRNMask(ShuffleMask, VT.getVectorNumElements(), WhichResult)) { unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; @@ -31587,10 +31682,14 @@ SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE( return convertFromScalableVector(DAG, VT, Op); } - if (isZIPMask(ShuffleMask, VT.getVectorNumElements(), WhichResult) && + if (isZIPMask(ShuffleMask, VT.getVectorNumElements(), WhichResult, + OperandOrder) && WhichResult != 0) return convertFromScalableVector( - DAG, VT, DAG.getNode(AArch64ISD::ZIP2, DL, ContainerVT, Op1, Op2)); + DAG, VT, + DAG.getNode(AArch64ISD::ZIP2, DL, ContainerVT, + OperandOrder == 0 ? Op1 : Op2, + OperandOrder == 0 ? Op2 : Op1)); if (isUZPMask(ShuffleMask, VT.getVectorNumElements(), WhichResult)) { unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index ca08eb40c956a..32aa913181a21 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -761,6 +761,7 @@ class AArch64TargetLowering : public TargetLowering { SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerInlineDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerMSTORE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerAVG(SDValue Op, SelectionDAG &DAG, unsigned NewOp) const; diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td index 6871c2d504cf6..61a8f764e39ed 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td +++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td @@ -2347,6 +2347,7 @@ class BImm pattern> let Inst{25-0} = addr; let DecoderMethod = "DecodeUnconditionalBranch"; + let supportsDeactivationSymbol = true; } class BranchImm pattern> @@ -2404,6 +2405,7 @@ class SignAuthOneData opcode_prefix, bits<2> opcode, string asm, let Inst{11-10} = opcode; let Inst{9-5} = Rn; let Inst{4-0} = Rd; + let supportsDeactivationSymbol = true; } class SignAuthZero opcode_prefix, bits<2> opcode, string asm, @@ -2417,6 +2419,7 @@ class SignAuthZero opcode_prefix, bits<2> opcode, string asm, let Inst{11-10} = opcode; let Inst{9-5} = 0b11111; let Inst{4-0} = Rd; + let supportsDeactivationSymbol = true; } class SignAuthTwoOperand opc, string asm, diff --git a/llvm/lib/Target/AArch64/AArch64InstrGISel.td b/llvm/lib/Target/AArch64/AArch64InstrGISel.td index 52b216c7fe0f0..7d99786830e3d 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrGISel.td +++ b/llvm/lib/Target/AArch64/AArch64InstrGISel.td @@ -149,6 +149,13 @@ def G_VLSHR : AArch64GenericInstruction { let hasSideEffects = 0; } +// Float truncation using round to odd +def G_FPTRUNC_ODD : AArch64GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type1:$src); + let hasSideEffects = false; +} + // Represents an integer to FP conversion on the FPR bank. def G_SITOF : AArch64GenericInstruction { let OutOperandList = (outs type0:$dst); @@ -297,6 +304,8 @@ def : GINodeEquiv; def : GINodeEquiv; +def : GINodeEquiv; + // These are patterns that we only use for GlobalISel via the importer. def : Pat<(f32 (fadd (vector_extract (v2f32 FPR64:$Rn), (i64 0)), (vector_extract (v2f32 FPR64:$Rn), (i64 1)))), diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td index 03bad8ff8ac8a..da93a2b13fc11 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -645,29 +645,34 @@ def nontrunc_masked_store : (masked_st node:$val, node:$ptr, undef, node:$pred), [{ return !cast(N)->isTruncatingStore() && cast(N)->isUnindexed() && - !cast(N)->isNonTemporal(); + !cast(N)->isNonTemporal() && + !cast(N)->isCompressingStore(); }]>; // truncating masked store fragments. def trunc_masked_store : PatFrag<(ops node:$val, node:$ptr, node:$pred), (masked_st node:$val, node:$ptr, undef, node:$pred), [{ return cast(N)->isTruncatingStore() && - cast(N)->isUnindexed(); + cast(N)->isUnindexed() && + !cast(N)->isCompressingStore(); }]>; def trunc_masked_store_i8 : PatFrag<(ops node:$val, node:$ptr, node:$pred), (trunc_masked_store node:$val, node:$ptr, node:$pred), [{ - return cast(N)->getMemoryVT().getScalarType() == MVT::i8; + return cast(N)->getMemoryVT().getScalarType() == MVT::i8 && + !cast(N)->isCompressingStore(); }]>; def trunc_masked_store_i16 : PatFrag<(ops node:$val, node:$ptr, node:$pred), (trunc_masked_store node:$val, node:$ptr, node:$pred), [{ - return cast(N)->getMemoryVT().getScalarType() == MVT::i16; + return cast(N)->getMemoryVT().getScalarType() == MVT::i16 && + !cast(N)->isCompressingStore(); }]>; def trunc_masked_store_i32 : PatFrag<(ops node:$val, node:$ptr, node:$pred), (trunc_masked_store node:$val, node:$ptr, node:$pred), [{ - return cast(N)->getMemoryVT().getScalarType() == MVT::i32; + return cast(N)->getMemoryVT().getScalarType() == MVT::i32 && + !cast(N)->isCompressingStore(); }]>; def non_temporal_store : @@ -675,7 +680,8 @@ def non_temporal_store : (masked_st node:$val, node:$ptr, undef, node:$pred), [{ return !cast(N)->isTruncatingStore() && cast(N)->isUnindexed() && - cast(N)->isNonTemporal(); + cast(N)->isNonTemporal() && + !cast(N)->isCompressingStore(); }]>; multiclass masked_gather_scatter { @@ -2215,6 +2221,7 @@ let Predicates = [HasPAuth] in { let Size = 12; let Defs = [X16, X17]; let usesCustomInserter = 1; + let supportsDeactivationSymbol = true; } // A standalone pattern is used, so that literal 0 can be passed as $Disc. diff --git a/llvm/lib/Target/AArch64/AArch64PerfectShuffle.h b/llvm/lib/Target/AArch64/AArch64PerfectShuffle.h index c28cbf2bc63c2..ef8786d0ad0e1 100644 --- a/llvm/lib/Target/AArch64/AArch64PerfectShuffle.h +++ b/llvm/lib/Target/AArch64/AArch64PerfectShuffle.h @@ -6622,35 +6622,52 @@ inline unsigned getPerfectShuffleCost(llvm::ArrayRef M) { } /// Return true for zip1 or zip2 masks of the form: -/// <0, 8, 1, 9, 2, 10, 3, 11> or -/// <4, 12, 5, 13, 6, 14, 7, 15> +/// <0, 8, 1, 9, 2, 10, 3, 11> (WhichResultOut = 0, OperandOrderOut = 0) or +/// <4, 12, 5, 13, 6, 14, 7, 15> (WhichResultOut = 1, OperandOrderOut = 0) or +/// <8, 0, 9, 1, 10, 2, 11, 3> (WhichResultOut = 0, OperandOrderOut = 1) or +/// <12, 4, 13, 5, 14, 6, 15, 7> (WhichResultOut = 1, OperandOrderOut = 1) inline bool isZIPMask(ArrayRef M, unsigned NumElts, - unsigned &WhichResultOut) { + unsigned &WhichResultOut, unsigned &OperandOrderOut) { if (NumElts % 2 != 0) return false; - // Check the first non-undef element for which half to use. - unsigned WhichResult = 2; - for (unsigned i = 0; i != NumElts / 2; i++) { - if (M[i * 2] >= 0) { - WhichResult = ((unsigned)M[i * 2] == i ? 0 : 1); - break; - } else if (M[i * 2 + 1] >= 0) { - WhichResult = ((unsigned)M[i * 2 + 1] == NumElts + i ? 0 : 1); - break; - } - } - if (WhichResult == 2) - return false; + // "Variant" refers to the distinction bwetween zip1 and zip2, while + // "Order" refers to sequence of input registers (matching vs flipped). + bool Variant0Order0 = true; // WhichResultOut = 0, OperandOrderOut = 0 + bool Variant1Order0 = true; // WhichResultOut = 1, OperandOrderOut = 0 + bool Variant0Order1 = true; // WhichResultOut = 0, OperandOrderOut = 1 + bool Variant1Order1 = true; // WhichResultOut = 1, OperandOrderOut = 1 // Check all elements match. - unsigned Idx = WhichResult * NumElts / 2; for (unsigned i = 0; i != NumElts; i += 2) { - if ((M[i] >= 0 && (unsigned)M[i] != Idx) || - (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx + NumElts)) - return false; - Idx += 1; + if (M[i] >= 0) { + unsigned EvenElt = (unsigned)M[i]; + if (EvenElt != i / 2) + Variant0Order0 = false; + if (EvenElt != NumElts / 2 + i / 2) + Variant1Order0 = false; + if (EvenElt != NumElts + i / 2) + Variant0Order1 = false; + if (EvenElt != NumElts + NumElts / 2 + i / 2) + Variant1Order1 = false; + } + if (M[i + 1] >= 0) { + unsigned OddElt = (unsigned)M[i + 1]; + if (OddElt != NumElts + i / 2) + Variant0Order0 = false; + if (OddElt != NumElts + NumElts / 2 + i / 2) + Variant1Order0 = false; + if (OddElt != i / 2) + Variant0Order1 = false; + if (OddElt != NumElts / 2 + i / 2) + Variant1Order1 = false; + } } - WhichResultOut = WhichResult; + + if (Variant0Order0 + Variant1Order0 + Variant0Order1 + Variant1Order1 != 1) + return false; + + WhichResultOut = (Variant0Order0 || Variant0Order1) ? 0 : 1; + OperandOrderOut = (Variant0Order0 || Variant1Order0) ? 0 : 1; return true; } diff --git a/llvm/lib/Target/AArch64/AArch64Processors.td b/llvm/lib/Target/AArch64/AArch64Processors.td index 11387bb97d29c..120415f91c9ae 100644 --- a/llvm/lib/Target/AArch64/AArch64Processors.td +++ b/llvm/lib/Target/AArch64/AArch64Processors.td @@ -593,6 +593,7 @@ def TuneNeoverseN2 : SubtargetFeature<"neoversen2", "ARMProcFamily", "NeoverseN2 FeatureALULSLFast, FeaturePostRAScheduler, FeatureEnableSelectOptimize, + FeatureDisableMaximizeScalableBandwidth, FeaturePredictableSelectIsExpensive]>; def TuneNeoverseN3 : SubtargetFeature<"neoversen3", "ARMProcFamily", "NeoverseN3", @@ -626,6 +627,7 @@ def TuneNeoverseV1 : SubtargetFeature<"neoversev1", "ARMProcFamily", "NeoverseV1 FeaturePostRAScheduler, FeatureEnableSelectOptimize, FeaturePredictableSelectIsExpensive, + FeatureDisableMaximizeScalableBandwidth, FeatureNoSVEFPLD1R]>; def TuneNeoverseV2 : SubtargetFeature<"neoversev2", "ARMProcFamily", "NeoverseV2", diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN3.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN3.td index 0b65a5f6b1e25..22e6d1107a337 100644 --- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN3.td +++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN3.td @@ -49,6 +49,12 @@ def N3UnitM : ProcResGroup<[N3UnitM0, N3UnitM1]>; def N3UnitL : ProcResGroup<[N3UnitL01, N3UnitL2]>; def N3UnitI : ProcResGroup<[N3UnitS, N3UnitM0, N3UnitM1]>; +// Group required for modelling SVE gather loads throughput +def N3UnitVL : ProcResGroup<[N3UnitL01, N3UnitV0, N3UnitV1]>; +// Unused group to fix: "error: proc resource group overlaps with N3UnitVL but +// no supergroup contains both." +def : ProcResGroup<[N3UnitL01, N3UnitL2, N3UnitV0, N3UnitV1]>; + //===----------------------------------------------------------------------===// def : ReadAdvance; @@ -321,6 +327,12 @@ def N3Write_6c_2I_2L : SchedWriteRes<[N3UnitI, N3UnitI, N3UnitL, N3UnitL]> { let NumMicroOps = 4; } +def N3Write_6c_2L01_2V : SchedWriteRes<[N3UnitVL]> { + let Latency = 6; + let NumMicroOps = 4; + let ReleaseAtCycles = [5]; +} + def N3Write_6c_4V0 : SchedWriteRes<[N3UnitV0, N3UnitV0, N3UnitV0, N3UnitV0]> { let Latency = 6; let NumMicroOps = 4; @@ -1097,7 +1109,7 @@ def : SchedAlias; // ASIMD shift accumulate def : InstRW<[N3Wr_ADA, N3Rd_ADA], (instregex "^[SU]ABAL?v", "^[SU]ADALPv", - "^[SU]R?SRAv")>; + "^[SU]R?SRA(v|d)")>; // ASIMD arith, reduce, 4H/4S def : InstRW<[N3Write_3c_1V1], (instregex "^[SU]?ADDL?Vv4i(16|32)v$")>; @@ -1138,30 +1150,30 @@ def : InstRW<[N3Wr_VMAH, N3Rd_VMAH], (instregex "^SQRDMLAHv", "^SQRDMLSHv")>; def : InstRW<[N3Wr_VMAL, N3Rd_VMAL], (instregex "^[SU]MLALv", "^[SU]MLSLv")>; // ASIMD multiply accumulate saturating long -def : InstRW<[N3Wr_VMASL, N3Rd_VMASL], (instregex "^SQDMLALv", "^SQDMLSLv")>; +def : InstRW<[N3Wr_VMASL, N3Rd_VMASL], (instregex "^SQDMLAL(v|i16|i32)", "^SQDMLSL(v|i16|i32)")>; // ASIMD multiply/multiply long (8x8) polynomial, D-form // ASIMD multiply/multiply long (8x8) polynomial, Q-form def : InstRW<[N3Write_2c_1V0], (instregex "^PMULL?(v8i8|v16i8)$")>; // ASIMD multiply long -def : InstRW<[N3Write_4c_1V0], (instregex "^[SU]MULLv", "^SQDMULLv")>; +def : InstRW<[N3Write_4c_1V0], (instregex "^[SU]MULLv", "^SQDMULL(v|i16|i32)")>; // ASIMD shift by immed, basic -def : InstRW<[N3Write_2c_1V1], (instregex "^SHLv", "^SHLLv", "^SHRNv", - "^SSHLLv", "^SSHRv", "^USHLLv", - "^USHRv")>; +def : InstRW<[N3Write_2c_1V1], (instregex "^SHL(v|d)", "^SHLLv", "^SHRNv", + "^SSHLLv", "^SSHR(v|d)", "^USHLLv", + "^USHR(v|d)")>; // ASIMD shift by immed and insert, basic -def : InstRW<[N3Write_2c_1V1], (instregex "^SLIv", "^SRIv")>; +def : InstRW<[N3Write_2c_1V1], (instregex "^SLI(v|d)", "^SRI(v|d)")>; // ASIMD shift by immed, complex def : InstRW<[N3Write_4c_1V1], - (instregex "^RSHRNv", "^SQRSHRNv", "^SQRSHRUNv", + (instregex "^RSHRNv", "^SQRSHRN[vbhs]", "^SQRSHRUN[vbhs]", "^(SQSHLU?|UQSHL)[bhsd]$", "^(SQSHLU?|UQSHL)(v8i8|v16i8|v4i16|v8i16|v2i32|v4i32|v2i64)_shift$", - "^SQSHRNv", "^SQSHRUNv", "^SRSHRv", "^UQRSHRNv", - "^UQSHRNv", "^URSHRv")>; + "^SQSHRN[vbhs]", "^SQSHRUN[vbhs]", "^SRSHR(v|d)", + "^UQRSHRN[vbhs]", "^UQSHRN[vbhs]","^URSHR(v|d)")>; // ASIMD shift by register, basic def : InstRW<[N3Write_2c_1V1], (instregex "^[SU]SHLv")>; @@ -1197,16 +1209,16 @@ def : InstRW<[N3Write_3c_1V0], (instregex "^FCVTL(v2|v4)i32")>; def : InstRW<[N3Write_4c_2V0], (instregex "^FCVTN(v4|v8)i16")>; // ASIMD FP convert, narrow (F64 to F32) -def : InstRW<[N3Write_3c_1V0], (instregex "^FCVTN(v2|v4)i32", +def : InstRW<[N3Write_3c_1V0], (instregex "^FCVTN(v2|v4)i32", "^FCVTXNv1i64", "^FCVTXN(v2|v4)f32")>; // ASIMD FP convert, other, D-form F32 and Q-form F64 -def : InstRW<[N3Write_3c_1V0], (instregex "^[FSU]CVT[AMNPZ][SU]v2f(32|64)$", - "^[SU]CVTFv2f(32|64)$")>; +def : InstRW<[N3Write_3c_1V0], (instregex "^[FSU]CVT[AMNPZ][SU](v2f(32|64)|s|d|v1i32|v1i64|v2i32_shift|v2i64_shift)$", + "^[SU]CVTF(v2f(32|64)|s|d|v1i32|v1i64|v2i32_shift|v2i64_shift)$")>; // ASIMD FP convert, other, D-form F16 and Q-form F32 -def : InstRW<[N3Write_4c_2V0], (instregex "^[FSU]CVT[AMNPZ][SU]v4f(16|32)$", - "^[SU]CVTFv4f(16|32)$")>; +def : InstRW<[N3Write_4c_2V0], (instregex "^[FSU]CVT[AMNPZ][SU](v4f(16|32)|v4i(16|32)_shift)$", + "^[SU]CVTF(v4f(16|32)|v4i(16|32)_shift)$")>; // ASIMD FP convert, other, Q-form F16 def : InstRW<[N3Write_6c_4V0], (instregex "^[FSU]CVT[AMNPZ][SU]v8f16$", @@ -1241,7 +1253,7 @@ def : InstRW<[N3Write_4c_2V], (instregex "^(FMAX|FMIN)(NM)?Vv4(i16|i32)v$")>; def : InstRW<[N3Write_6c_3V], (instregex "^(FMAX|FMIN)(NM)?Vv8i16v$")>; // ASIMD FP multiply -def : InstRW<[N3Wr_FPM], (instregex "^FMULv", "^FMULXv")>; +def : InstRW<[N3Wr_FPM], (instregex "^FMULv", "^FMULX(v|32|64)")>; // ASIMD FP multiply accumulate def : InstRW<[N3Wr_FPMA, N3Rd_FPMA], (instregex "^FMLAv", "^FMLSv")>; @@ -1330,9 +1342,9 @@ def : InstRW<[N3Write_4c_2V0], (instrs URECPEv4i32, URSQRTEv4i32)>; // ASIMD reciprocal and square root estimate, D-form F32 and scalar forms def : InstRW<[N3Write_3c_1V0], (instrs FRECPEv1f16, FRECPEv1i32, - FRECPEv1i64, FRECPEv2f32, + FRECPEv1i64, FRECPEv2f32, FRECPEv2f64, FRSQRTEv1f16, FRSQRTEv1i32, - FRSQRTEv1i64, FRSQRTEv2f32)>; + FRSQRTEv1i64, FRSQRTEv2f32, FRSQRTEv2f64)>; // ASIMD reciprocal and square root estimate, D-form F16 and Q-form F32 def : InstRW<[N3Write_4c_2V0], (instrs FRECPEv4f16, FRECPEv4f32, @@ -1345,7 +1357,7 @@ def : InstRW<[N3Write_6c_4V0], (instrs FRECPEv8f16, FRSQRTEv8f16)>; def : InstRW<[N3Write_3c_1V0], (instregex "^FRECPXv")>; // ASIMD reciprocal step -def : InstRW<[N3Write_4c_1V], (instregex "^FRECPSv", "^FRSQRTSv")>; +def : InstRW<[N3Write_4c_1V], (instregex "^FRECPS(v|32|64)", "^FRSQRTS(v|32|64)")>; // ASIMD table lookup, 3 table regs def : InstRW<[N3Write_4c_2V], (instrs TBLv8i8Three, TBLv16i8Three)>; @@ -2270,8 +2282,8 @@ def : InstRW<[N3Write_7c_4L], (instregex "^LDNT1[BHW]_ZZR_S$", "^LDNT1S[BH]_ZZR_S$")>; // Non temporal gather load, vector + scalar 64-bit element size -def : InstRW<[N3Write_6c_2L], (instregex "^LDNT1S?[BHW]_ZZR_D$")>; -def : InstRW<[N3Write_6c_2L], (instrs LDNT1D_ZZR_D)>; +def : InstRW<[N3Write_6c_2L01_2V], (instregex "^LDNT1S?[BHW]_ZZR_D$")>; +def : InstRW<[N3Write_6c_2L01_2V], (instrs LDNT1D_ZZR_D)>; // Contiguous first faulting load, scalar + scalar def : InstRW<[N3Write_6c_1L], (instregex "^LDFF1[BHWD]$", @@ -2320,11 +2332,11 @@ def : InstRW<[N3Write_7c_4L], (instregex "^GLD(FF)?1S?[BH]_S_IMM$", "^GLD(FF)?1W_IMM$")>; // Gather load, vector + imm, 64-bit element size -def : InstRW<[N3Write_6c_2L], (instregex "^GLD(FF)?1S?[BHW]_D_IMM$", +def : InstRW<[N3Write_6c_2L01_2V], (instregex "^GLD(FF)?1S?[BHW]_D_IMM$", "^GLD(FF)?1D_IMM$")>; // Gather load, 64-bit element size -def : InstRW<[N3Write_6c_2L], +def : InstRW<[N3Write_6c_2L01_2V], (instregex "^GLD(FF)?1S?[BHW]_D_[SU]XTW(_SCALED)?$", "^GLD(FF)?1S?[BHW]_D(_SCALED)?$", "^GLD(FF)?1D_[SU]XTW(_SCALED)?$", diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index 0bae00bafee3c..3a5f1499f9d2d 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -375,8 +375,13 @@ AArch64TTIImpl::getInlineCallPenalty(const Function *F, const CallBase &Call, bool AArch64TTIImpl::shouldMaximizeVectorBandwidth( TargetTransformInfo::RegisterKind K) const { assert(K != TargetTransformInfo::RGK_Scalar); - return (K == TargetTransformInfo::RGK_FixedWidthVector && - ST->isNeonAvailable()); + + if (K == TargetTransformInfo::RGK_FixedWidthVector && ST->isNeonAvailable()) + return true; + + return K == TargetTransformInfo::RGK_ScalableVector && + ST->isSVEorStreamingSVEAvailable() && + !ST->disableMaximizeScalableBandwidth(); } /// Calculate the cost of materializing a 64-bit value. This helper @@ -921,8 +926,20 @@ AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, if (ICA.getArgs().empty()) break; - // TODO: Add handling for fshl where third argument is not a constant. const TTI::OperandValueInfo OpInfoZ = TTI::getOperandInfo(ICA.getArgs()[2]); + + // ROTR / ROTL is a funnel shift with equal first and second operand. For + // ROTR on integer registers (i32/i64) this can be done in a single ror + // instruction. A fshl with a non-constant shift uses a neg + ror. + if (RetTy->isIntegerTy() && ICA.getArgs()[0] == ICA.getArgs()[1] && + (RetTy->getPrimitiveSizeInBits() == 32 || + RetTy->getPrimitiveSizeInBits() == 64)) { + InstructionCost NegCost = + (ICA.getID() == Intrinsic::fshl && !OpInfoZ.isConstant()) ? 1 : 0; + return 1 + NegCost; + } + + // TODO: Add handling for fshl where third argument is not a constant. if (!OpInfoZ.isConstant()) break; @@ -6062,7 +6079,7 @@ AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, if (LT.second.isFixedLengthVector() && LT.second.getVectorNumElements() == Mask.size() && (Kind == TTI::SK_PermuteTwoSrc || Kind == TTI::SK_PermuteSingleSrc) && - (isZIPMask(Mask, LT.second.getVectorNumElements(), Unused) || + (isZIPMask(Mask, LT.second.getVectorNumElements(), Unused, Unused) || isUZPMask(Mask, LT.second.getVectorNumElements(), Unused) || isREVMask(Mask, LT.second.getScalarSizeInBits(), LT.second.getVectorNumElements(), 16) || diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h index 52fc28a98449b..fe3bb5e7981d2 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -334,6 +334,23 @@ class AArch64TTIImpl final : public BasicTTIImplBase { return isLegalMaskedLoadStore(DataType, Alignment); } + bool isElementTypeLegalForCompressStore(Type *Ty) const { + return Ty->isFloatTy() || Ty->isDoubleTy() || Ty->isIntegerTy(32) || + Ty->isIntegerTy(64); + } + + bool isLegalMaskedCompressStore(Type *DataType, + Align Alignment) const override { + if (!ST->isSVEAvailable()) + return false; + + if (isa(DataType) && + DataType->getPrimitiveSizeInBits() < 128) + return false; + + return isElementTypeLegalForCompressStore(DataType->getScalarType()); + } + bool isLegalMaskedGatherScatter(Type *DataType) const { if (!ST->isSVEAvailable()) return false; diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp index 55694efafeed1..7907a3c283624 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp @@ -1421,6 +1421,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, } else if (Info.CFIType) { MIB->setCFIType(MF, Info.CFIType->getZExtValue()); } + MIB->setDeactivationSymbol(MF, Info.DeactivationSymbol); MIB.add(Info.Callee); diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp index 089b0b2feb231..1025b2502211a 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp @@ -21,6 +21,7 @@ #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" #include "llvm/CodeGen/GlobalISel/Utils.h" #include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/IR/DerivedTypes.h" @@ -820,8 +821,17 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) .legalFor( {{s16, s32}, {s16, s64}, {s32, s64}, {v4s16, v4s32}, {v2s32, v2s64}}) .libcallFor({{s16, s128}, {s32, s128}, {s64, s128}}) - .clampNumElements(0, v4s16, v4s16) - .clampNumElements(0, v2s32, v2s32) + .moreElementsToNextPow2(1) + .customIf([](const LegalityQuery &Q) { + LLT DstTy = Q.Types[0]; + LLT SrcTy = Q.Types[1]; + return SrcTy.isFixedVector() && DstTy.isFixedVector() && + SrcTy.getScalarSizeInBits() == 64 && + DstTy.getScalarSizeInBits() == 16; + }) + // Clamp based on input + .clampNumElements(1, v4s32, v4s32) + .clampNumElements(1, v2s64, v2s64) .scalarize(0); getActionDefinitionsBuilder(G_FPEXT) @@ -1479,6 +1489,10 @@ bool AArch64LegalizerInfo::legalizeCustom( return legalizeICMP(MI, MRI, MIRBuilder); case TargetOpcode::G_BITCAST: return legalizeBitcast(MI, Helper); + case TargetOpcode::G_FPTRUNC: + // In order to lower f16 to f64 properly, we need to use f32 as an + // intermediary + return legalizeFptrunc(MI, MIRBuilder, MRI); } llvm_unreachable("expected switch to return"); @@ -2416,3 +2430,80 @@ bool AArch64LegalizerInfo::legalizePrefetch(MachineInstr &MI, MI.eraseFromParent(); return true; } + +bool AArch64LegalizerInfo::legalizeFptrunc(MachineInstr &MI, + MachineIRBuilder &MIRBuilder, + MachineRegisterInfo &MRI) const { + auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs(); + assert(SrcTy.isFixedVector() && isPowerOf2_32(SrcTy.getNumElements()) && + "Expected a power of 2 elements"); + + LLT s16 = LLT::scalar(16); + LLT s32 = LLT::scalar(32); + LLT s64 = LLT::scalar(64); + LLT v2s16 = LLT::fixed_vector(2, s16); + LLT v4s16 = LLT::fixed_vector(4, s16); + LLT v2s32 = LLT::fixed_vector(2, s32); + LLT v4s32 = LLT::fixed_vector(4, s32); + LLT v2s64 = LLT::fixed_vector(2, s64); + + SmallVector RegsToUnmergeTo; + SmallVector TruncOddDstRegs; + SmallVector RegsToMerge; + + unsigned ElemCount = SrcTy.getNumElements(); + + // Find the biggest size chunks we can work with + int StepSize = ElemCount % 4 ? 2 : 4; + + // If we have a power of 2 greater than 2, we need to first unmerge into + // enough pieces + if (ElemCount <= 2) + RegsToUnmergeTo.push_back(Src); + else { + for (unsigned i = 0; i < ElemCount / 2; ++i) + RegsToUnmergeTo.push_back(MRI.createGenericVirtualRegister(v2s64)); + + MIRBuilder.buildUnmerge(RegsToUnmergeTo, Src); + } + + // Create all of the round-to-odd instructions and store them + for (auto SrcReg : RegsToUnmergeTo) { + Register Mid = + MIRBuilder.buildInstr(AArch64::G_FPTRUNC_ODD, {v2s32}, {SrcReg}) + .getReg(0); + TruncOddDstRegs.push_back(Mid); + } + + // Truncate 4s32 to 4s16 if we can to reduce instruction count, otherwise + // truncate 2s32 to 2s16. + unsigned Index = 0; + for (unsigned LoopIter = 0; LoopIter < ElemCount / StepSize; ++LoopIter) { + if (StepSize == 4) { + Register ConcatDst = + MIRBuilder + .buildMergeLikeInstr( + {v4s32}, {TruncOddDstRegs[Index++], TruncOddDstRegs[Index++]}) + .getReg(0); + + RegsToMerge.push_back( + MIRBuilder.buildFPTrunc(v4s16, ConcatDst).getReg(0)); + } else { + RegsToMerge.push_back( + MIRBuilder.buildFPTrunc(v2s16, TruncOddDstRegs[Index++]).getReg(0)); + } + } + + // If there is only one register, replace the destination + if (RegsToMerge.size() == 1) { + MRI.replaceRegWith(Dst, RegsToMerge.pop_back_val()); + MI.eraseFromParent(); + return true; + } + + // Merge the rest of the instructions & replace the register + Register Fin = MIRBuilder.buildMergeLikeInstr(DstTy, RegsToMerge).getReg(0); + MRI.replaceRegWith(Dst, Fin); + MI.eraseFromParent(); + return true; +} \ No newline at end of file diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h index bcb294326fa92..12b6a6fa395a8 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h @@ -67,6 +67,8 @@ class AArch64LegalizerInfo : public LegalizerInfo { bool legalizeDynStackAlloc(MachineInstr &MI, LegalizerHelper &Helper) const; bool legalizePrefetch(MachineInstr &MI, LegalizerHelper &Helper) const; bool legalizeBitcast(MachineInstr &MI, LegalizerHelper &Helper) const; + bool legalizeFptrunc(MachineInstr &MI, MachineIRBuilder &MIRBuilder, + MachineRegisterInfo &MRI) const; const AArch64Subtarget *ST; }; } // End llvm namespace. diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp index 23dcaea2ac1a4..4fba593b3d0fb 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp @@ -252,14 +252,15 @@ bool matchZip(MachineInstr &MI, MachineRegisterInfo &MRI, ShuffleVectorPseudo &MatchInfo) { assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR); unsigned WhichResult; + unsigned OperandOrder; ArrayRef ShuffleMask = MI.getOperand(3).getShuffleMask(); Register Dst = MI.getOperand(0).getReg(); unsigned NumElts = MRI.getType(Dst).getNumElements(); - if (!isZIPMask(ShuffleMask, NumElts, WhichResult)) + if (!isZIPMask(ShuffleMask, NumElts, WhichResult, OperandOrder)) return false; unsigned Opc = (WhichResult == 0) ? AArch64::G_ZIP1 : AArch64::G_ZIP2; - Register V1 = MI.getOperand(1).getReg(); - Register V2 = MI.getOperand(2).getReg(); + Register V1 = MI.getOperand(OperandOrder == 0 ? 1 : 2).getReg(); + Register V2 = MI.getOperand(OperandOrder == 0 ? 2 : 1).getReg(); MatchInfo = ShuffleVectorPseudo(Opc, Dst, {V1, V2}); return true; } diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp index 64f96c57d2026..942e1bd5b4e0b 100644 --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp @@ -53,7 +53,7 @@ void AArch64WinCOFFStreamer::emitWindowsUnwindTables() { } void AArch64WinCOFFStreamer::finishImpl() { - emitFrames(nullptr); + emitFrames(); emitWindowsUnwindTables(); MCWinCOFFStreamer::finishImpl(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp index 8e35ba77d69aa..71ea9ef6fc050 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp @@ -143,14 +143,6 @@ class AMDGPUCodeGenPrepareImpl bool canBreakPHINode(const PHINode &I); - /// \returns True if binary operation \p I is a signed binary operation, false - /// otherwise. - bool isSigned(const BinaryOperator &I) const; - - /// \returns True if the condition of 'select' operation \p I comes from a - /// signed 'icmp' operation, false otherwise. - bool isSigned(const SelectInst &I) const; - /// Return true if \p T is a legal scalar floating point type. bool isLegalFloatingTy(const Type *T) const; @@ -304,16 +296,6 @@ bool AMDGPUCodeGenPrepareImpl::run() { return MadeChange; } -bool AMDGPUCodeGenPrepareImpl::isSigned(const BinaryOperator &I) const { - return I.getOpcode() == Instruction::AShr || - I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem; -} - -bool AMDGPUCodeGenPrepareImpl::isSigned(const SelectInst &I) const { - return isa(I.getOperand(0)) && - cast(I.getOperand(0))->isSigned(); -} - bool AMDGPUCodeGenPrepareImpl::isLegalFloatingTy(const Type *Ty) const { return Ty->isFloatTy() || Ty->isDoubleTy() || (Ty->isHalfTy() && ST.has16BitInsts()); diff --git a/llvm/lib/Target/AMDGPU/R600.td b/llvm/lib/Target/AMDGPU/R600.td index 9148edb92b084..bdfaac9f42ea7 100644 --- a/llvm/lib/Target/AMDGPU/R600.td +++ b/llvm/lib/Target/AMDGPU/R600.td @@ -8,15 +8,6 @@ include "llvm/Target/Target.td" -def R600InstrInfo : InstrInfo { - let guessInstructionProperties = 1; -} - -def R600 : Target { - let InstructionSet = R600InstrInfo; - let AllowRegisterRenaming = 1; -} - let Namespace = "R600" in { foreach Index = 0-15 in { @@ -27,6 +18,18 @@ include "R600RegisterInfo.td" } +defm : RemapAllTargetPseudoPointerOperands; + +def R600InstrInfo : InstrInfo { + let guessInstructionProperties = 1; +} + +def R600 : Target { + let InstructionSet = R600InstrInfo; + let AllowRegisterRenaming = 1; +} + + def NullALU : InstrItinClass; def ALU_NULL : FuncUnit; diff --git a/llvm/lib/Target/AMDGPU/SIDefines.h b/llvm/lib/Target/AMDGPU/SIDefines.h index b7a92a0a1d634..0d206aba33543 100644 --- a/llvm/lib/Target/AMDGPU/SIDefines.h +++ b/llvm/lib/Target/AMDGPU/SIDefines.h @@ -523,6 +523,7 @@ enum Id { // HwRegCode, (6) [5:0] ID_HW_ID1 = 23, ID_HW_ID2 = 24, ID_POPS_PACKER = 25, + ID_SCHED_MODE = 26, ID_PERF_SNAPSHOT_DATA_gfx11 = 27, ID_IB_STS2 = 28, ID_SHADER_CYCLES = 29, diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index fa28a969439d9..da019b6e476df 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -1963,6 +1963,10 @@ MachineBasicBlock *SIInstrInfo::insertSimulatedTrap(MachineRegisterInfo &MRI, BuildMI(MBB, MI, DL, get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(TrapBB); MF->push_back(TrapBB); MBB.addSuccessor(TrapBB); + } else { + // Since we're adding HaltLoopBB and modifying the CFG, we must return a + // different block to signal the change. + ContBB = HaltLoopBB; } // Start with a `s_trap 2`, if we're in PRIV=1 and we need the workaround this diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td index 3fe37e8217f35..c5f5b7d53cfb1 100644 --- a/llvm/lib/Target/AMDGPU/SIInstructions.td +++ b/llvm/lib/Target/AMDGPU/SIInstructions.td @@ -4751,3 +4751,14 @@ def V_ILLEGAL : Enc32, InstSI<(outs), (ins), "v_illegal"> { let hasSideEffects = 1; let SubtargetPredicate = isGFX10Plus; } + +defvar VGPR32_Ptr_Opcodes = [LOAD_STACK_GUARD]; +defvar VGPR64_Ptr_Opcodes = !listremove(PseudosWithPtrOps, VGPR32_Ptr_Opcodes); + +foreach inst = VGPR32_Ptr_Opcodes in { + def : RemapPointerOperands; +} + +foreach inst = VGPR64_Ptr_Opcodes in { + def : RemapPointerOperands; +} diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp index 6489e63d4f6b8..ce782b025464e 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp @@ -211,6 +211,7 @@ static constexpr CustomOperand Operands[] = { {{"HW_REG_HW_ID2"}, ID_HW_ID2, isGFX10Plus}, {{"HW_REG_SQ_PERF_SNAPSHOT_PC_HI"}, ID_SQ_PERF_SNAPSHOT_PC_HI, isGFX940}, {{"HW_REG_POPS_PACKER"}, ID_POPS_PACKER, isGFX10}, + {{"HW_REG_WAVE_SCHED_MODE"}, ID_SCHED_MODE, isGFX12Plus}, {{"HW_REG_PERF_SNAPSHOT_DATA"}, ID_PERF_SNAPSHOT_DATA_gfx11, isGFX11}, {{"HW_REG_IB_STS2"}, ID_IB_STS2, isGFX1250}, {{"HW_REG_SHADER_CYCLES"}, ID_SHADER_CYCLES, isGFX10_3_GFX11}, diff --git a/llvm/lib/Target/ARC/ARC.td b/llvm/lib/Target/ARC/ARC.td index 142ce7f747919..71b3bb61639f8 100644 --- a/llvm/lib/Target/ARC/ARC.td +++ b/llvm/lib/Target/ARC/ARC.td @@ -24,6 +24,8 @@ include "ARCRegisterInfo.td" include "ARCInstrInfo.td" include "ARCCallingConv.td" +defm : RemapAllTargetPseudoPointerOperands; + def ARCInstrInfo : InstrInfo; class Proc Features> diff --git a/llvm/lib/Target/ARM/ARM.td b/llvm/lib/Target/ARM/ARM.td index 570aae9b3c7a7..1f71d810983db 100644 --- a/llvm/lib/Target/ARM/ARM.td +++ b/llvm/lib/Target/ARM/ARM.td @@ -38,6 +38,14 @@ include "ARMSchedule.td" //===----------------------------------------------------------------------===// include "ARMInstrInfo.td" + +def Thumb1OnlyMode : HwMode<[IsThumb1Only]>; +def arm_ptr_rc : RegClassByHwMode< + [DefaultMode, Thumb1OnlyMode], + [GPR, tGPR]>; + +defm : RemapAllTargetPseudoPointerOperands; + def ARMInstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp index 2d2e62c80c702..c1dd2e5066b6d 100644 --- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp @@ -51,8 +51,8 @@ using namespace llvm; ARMAsmPrinter::ARMAsmPrinter(TargetMachine &TM, std::unique_ptr Streamer) - : AsmPrinter(TM, std::move(Streamer), ID), Subtarget(nullptr), AFI(nullptr), - MCP(nullptr), InConstantPool(false), OptimizationGoals(-1) {} + : AsmPrinter(TM, std::move(Streamer), ID), AFI(nullptr), MCP(nullptr), + InConstantPool(false), OptimizationGoals(-1) {} const ARMBaseTargetMachine &ARMAsmPrinter::getTM() const { return static_cast(TM); @@ -116,7 +116,6 @@ void ARMAsmPrinter::emitGlobalVariable(const GlobalVariable *GV) { bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) { AFI = MF.getInfo(); MCP = MF.getConstantPool(); - Subtarget = &MF.getSubtarget(); SetupMachineFunction(MF); const Function &F = MF.getFunction(); @@ -154,7 +153,7 @@ bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) { else if (OptimizationGoals != (int)OptimizationGoal) // conflicting goals OptimizationGoals = 0; - if (Subtarget->isTargetCOFF()) { + if (TM.getTargetTriple().isOSBinFormatCOFF()) { bool Local = F.hasLocalLinkage(); COFF::SymbolStorageClass Scl = Local ? COFF::IMAGE_SYM_CLASS_STATIC : COFF::IMAGE_SYM_CLASS_EXTERNAL; @@ -260,8 +259,8 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum, break; } case MachineOperand::MO_ConstantPoolIndex: - if (Subtarget->genExecuteOnly()) - llvm_unreachable("execute-only should not generate constant pools"); + assert(!MF->getSubtarget().genExecuteOnly() && + "execute-only should not generate constant pools"); GetCPISymbol(MO.getIndex())->print(O, MAI); break; } @@ -1048,7 +1047,8 @@ void ARMAsmPrinter::emitJumpTableAddrs(const MachineInstr *MI) { // .word (LBB1 - LJTI_0_0) const MCExpr *Expr = MCSymbolRefExpr::create(MBB->getSymbol(), OutContext); - if (isPositionIndependent() || Subtarget->isROPI()) + const ARMSubtarget &STI = MF->getSubtarget(); + if (isPositionIndependent() || STI.isROPI()) Expr = MCBinaryExpr::createSub(Expr, MCSymbolRefExpr::create(JTISymbol, OutContext), OutContext); @@ -1097,7 +1097,8 @@ void ARMAsmPrinter::emitJumpTableTBInst(const MachineInstr *MI, const MachineOperand &MO1 = MI->getOperand(1); unsigned JTI = MO1.getIndex(); - if (Subtarget->isThumb1Only()) + const ARMSubtarget &STI = MF->getSubtarget(); + if (STI.isThumb1Only()) emitAlignment(Align(4)); MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI); @@ -1905,6 +1906,7 @@ void ARMAsmPrinter::emitInstruction(const MachineInstr *MI) { ARM_MC::verifyInstructionPredicates(MI->getOpcode(), getSubtargetInfo().getFeatureBits()); + const ARMSubtarget &STI = MF->getSubtarget(); const DataLayout &DL = getDataLayout(); MCTargetStreamer &TS = *OutStreamer->getTargetStreamer(); ARMTargetStreamer &ATS = static_cast(TS); @@ -1916,8 +1918,8 @@ void ARMAsmPrinter::emitInstruction(const MachineInstr *MI) { } // Emit unwinding stuff for frame-related instructions - if (Subtarget->isTargetEHABICompatible() && - MI->getFlag(MachineInstr::FrameSetup)) + if (TM.getTargetTriple().isTargetEHABICompatible() && + MI->getFlag(MachineInstr::FrameSetup)) EmitUnwindingInstruction(MI); // Do any auto-generated pseudo lowerings. @@ -1983,14 +1985,13 @@ void ARMAsmPrinter::emitInstruction(const MachineInstr *MI) { // Add 's' bit operand (always reg0 for this) .addReg(0)); - assert(Subtarget->hasV4TOps()); - EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::BX) - .addReg(MI->getOperand(0).getReg())); + assert(STI.hasV4TOps() && "Expected V4TOps for BX call"); + EmitToStreamer(*OutStreamer, + MCInstBuilder(ARM::BX).addReg(MI->getOperand(0).getReg())); return; } case ARM::tBX_CALL: { - if (Subtarget->hasV5TOps()) - llvm_unreachable("Expected BLX to be selected for v5t+"); + assert(!STI.hasV5TOps() && "Expected BLX to be selected for v5t+"); // On ARM v4t, when doing a call from thumb mode, we need to ensure // that the saved lr has its LSB set correctly (the arch doesn't @@ -2279,8 +2280,8 @@ void ARMAsmPrinter::emitInstruction(const MachineInstr *MI) { return; } case ARM::CONSTPOOL_ENTRY: { - if (Subtarget->genExecuteOnly()) - llvm_unreachable("execute-only should not generate constant pools"); + assert(!STI.genExecuteOnly() && + "execute-only should not generate constant pools"); /// CONSTPOOL_ENTRY - This instruction represents a floating constant pool /// in the function. The first operand is the ID# for this instruction, the @@ -2486,7 +2487,7 @@ void ARMAsmPrinter::emitInstruction(const MachineInstr *MI) { case ARM::TRAP: { // Non-Darwin binutils don't yet support the "trap" mnemonic. // FIXME: Remove this special case when they do. - if (!Subtarget->isTargetMachO()) { + if (!TM.getTargetTriple().isOSBinFormatMachO()) { uint32_t Val = 0xe7ffdefeUL; OutStreamer->AddComment("trap"); ATS.emitInst(Val); @@ -2497,7 +2498,7 @@ void ARMAsmPrinter::emitInstruction(const MachineInstr *MI) { case ARM::tTRAP: { // Non-Darwin binutils don't yet support the "trap" mnemonic. // FIXME: Remove this special case when they do. - if (!Subtarget->isTargetMachO()) { + if (!TM.getTargetTriple().isOSBinFormatMachO()) { uint16_t Val = 0xdefe; OutStreamer->AddComment("trap"); ATS.emitInst(Val, 'n'); @@ -2657,9 +2658,6 @@ void ARMAsmPrinter::emitInstruction(const MachineInstr *MI) { .addImm(ARMCC::AL) .addReg(0)); - const MachineFunction &MF = *MI->getParent()->getParent(); - const ARMSubtarget &STI = MF.getSubtarget(); - if (STI.isTargetDarwin() || STI.isTargetWindows()) { // These platforms always use the same frame register EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::LDRi12) @@ -2688,7 +2686,7 @@ void ARMAsmPrinter::emitInstruction(const MachineInstr *MI) { .addReg(0)); } - assert(Subtarget->hasV4TOps()); + assert(STI.hasV4TOps()); EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::BX) .addReg(ScratchReg) // Predicate. @@ -2705,9 +2703,6 @@ void ARMAsmPrinter::emitInstruction(const MachineInstr *MI) { Register SrcReg = MI->getOperand(0).getReg(); Register ScratchReg = MI->getOperand(1).getReg(); - const MachineFunction &MF = *MI->getParent()->getParent(); - const ARMSubtarget &STI = MF.getSubtarget(); - EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tLDRi) .addReg(ScratchReg) .addReg(SrcReg) diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.h b/llvm/lib/Target/ARM/ARMAsmPrinter.h index 9e92b5a36a672..b9cd3c2613bc8 100644 --- a/llvm/lib/Target/ARM/ARMAsmPrinter.h +++ b/llvm/lib/Target/ARM/ARMAsmPrinter.h @@ -9,13 +9,13 @@ #ifndef LLVM_LIB_TARGET_ARM_ARMASMPRINTER_H #define LLVM_LIB_TARGET_ARM_ARMASMPRINTER_H -#include "ARMSubtarget.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/Target/TargetMachine.h" namespace llvm { class ARMFunctionInfo; +class ARMBaseTargetMachine; class MCOperand; class MachineConstantPool; class MachineOperand; @@ -33,10 +33,6 @@ class LLVM_LIBRARY_VISIBILITY ARMAsmPrinter : public AsmPrinter { static char ID; private: - /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can - /// make the right decision when printing asm code for different targets. - const ARMSubtarget *Subtarget; - /// AFI - Keep a pointer to ARMFunctionInfo for the current /// MachineFunction. ARMFunctionInfo *AFI; diff --git a/llvm/lib/Target/ARM/ARMInstrCDE.td b/llvm/lib/Target/ARM/ARMInstrCDE.td index f4326de5ed667..5d4e3acf5b581 100644 --- a/llvm/lib/Target/ARM/ARMInstrCDE.td +++ b/llvm/lib/Target/ARM/ARMInstrCDE.td @@ -115,6 +115,7 @@ class CDE_CX1_Instr !con(params.Iops1, (ins imm_13b:$imm), params.PredOp), !strconcat(iname, params.PAsm, "\t$coproc, $Rd, $imm"), params.Cstr> { + bits<0> p; bits<13> imm; bits<4> Rd; @@ -131,6 +132,7 @@ class CDE_CX2_Instr !con(params.Iops2, (ins imm_9b:$imm), params.PredOp), !strconcat(iname, params.PAsm, "\t$coproc, $Rd, $Rn, $imm"), params.Cstr> { + bits<0> p; bits<9> imm; bits<4> Rd; bits<4> Rn; @@ -149,6 +151,7 @@ class CDE_CX3_Instr !con(params.Iops3, (ins imm_6b:$imm), params.PredOp), !strconcat(iname, params.PAsm, "\t$coproc, $Rd, $Rn, $Rm, $imm"), params.Cstr> { + bits<0> p; bits<6> imm; bits<4> Rd; bits<4> Rn; diff --git a/llvm/lib/Target/ARM/ARMInstrFormats.td b/llvm/lib/Target/ARM/ARMInstrFormats.td index 1ad2485dce17f..1cd1a9a0f7331 100644 --- a/llvm/lib/Target/ARM/ARMInstrFormats.td +++ b/llvm/lib/Target/ARM/ARMInstrFormats.td @@ -1220,6 +1220,7 @@ class Thumb1sI pattern> : InstThumb { bits<0> s; + bits<0> p; let OutOperandList = !con(oops, (outs s_cc_out:$s)); let InOperandList = !con(iops, (ins pred:$p)); let AsmString = !strconcat(opc, "${s}${p}", asm); @@ -1244,6 +1245,7 @@ class Thumb1pI pattern> : InstThumb { + bits<0> p; let OutOperandList = oops; let InOperandList = !con(iops, (ins pred:$p)); let AsmString = !strconcat(opc, "${p}", asm); @@ -1343,6 +1345,7 @@ class Thumb2I pattern> : InstARM { + bits<0> p; let OutOperandList = oops; let InOperandList = !con(iops, (ins pred:$p)); let AsmString = !strconcat(opc, "${p}", asm); @@ -1361,6 +1364,7 @@ class Thumb2sI pattern> : InstARM { + bits<0> p; bits<1> s; // condition-code set flag ('1' if the insn should set the flags) let Inst{20} = s; @@ -2221,6 +2225,7 @@ class NeonI pattern> : InstARM { + bits<0> p; let OutOperandList = oops; let InOperandList = !con(iops, (ins pred:$p)); let AsmString = !strconcat(opc, "${p}", ".", dt, "\t", asm); @@ -2234,6 +2239,7 @@ class NeonXI pattern> : InstARM { + bits<0> p; let OutOperandList = oops; let InOperandList = !con(iops, (ins pred:$p)); let AsmString = !strconcat(opc, "${p}", "\t", asm); diff --git a/llvm/lib/Target/ARM/ARMInstrThumb.td b/llvm/lib/Target/ARM/ARMInstrThumb.td index 55b0d9e1c01fc..0ee98e68de68d 100644 --- a/llvm/lib/Target/ARM/ARMInstrThumb.td +++ b/llvm/lib/Target/ARM/ARMInstrThumb.td @@ -484,6 +484,7 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { def tBX : TI<(outs), (ins GPR:$Rm, pred:$p), IIC_Br, "bx${p}\t$Rm", []>, T1Special<{1,1,0,?}>, Sched<[WriteBr]> { // A6.2.3 & A8.6.25 + bits<0> p; bits<4> Rm; let Inst{6-3} = Rm; let Inst{2-0} = 0b000; @@ -492,6 +493,7 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { def tBXNS : TI<(outs), (ins GPR:$Rm, pred:$p), IIC_Br, "bxns${p}\t$Rm", []>, Requires<[IsThumb, Has8MSecExt]>, T1Special<{1,1,0,?}>, Sched<[WriteBr]> { + bits<0> p; bits<4> Rm; let Inst{6-3} = Rm; let Inst{2-0} = 0b100; @@ -524,6 +526,7 @@ let isCall = 1, "bl${p}\t$func", [(ARMcall tglobaladdr:$func)]>, Requires<[IsThumb]>, Sched<[WriteBrL]> { + bits<0> p; bits<24> func; let Inst{26} = func{23}; let Inst{25-16} = func{20-11}; @@ -537,6 +540,7 @@ let isCall = 1, (outs), (ins pred:$p, thumb_blx_target:$func), IIC_Br, "blx${p}\t$func", []>, Requires<[IsThumb, HasV5T, IsNotMClass]>, Sched<[WriteBrL]> { + bits<0> p; bits<24> func; let Inst{26} = func{23}; let Inst{25-16} = func{20-11}; @@ -551,6 +555,7 @@ let isCall = 1, "blx${p}\t$func", []>, Requires<[IsThumb, HasV5T]>, T1Special<{1,1,1,?}>, Sched<[WriteBrL]> { // A6.2.3 & A8.6.24; + bits<0> p; bits<4> func; let Inst{6-3} = func; let Inst{2-0} = 0b000; @@ -566,6 +571,7 @@ let isCall = 1, "blxns${p}\t$func", []>, Requires<[IsThumb, Has8MSecExt]>, T1Special<{1,1,1,?}>, Sched<[WriteBrL]> { + bits<0> p; bits<4> func; let Inst{6-3} = func; let Inst{2-0} = 0b100; @@ -825,6 +831,7 @@ let hasSideEffects = 0 in { let mayLoad = 1, hasExtraDefRegAllocReq = 1, variadicOpsAreDefs = 1 in def tLDMIA : T1I<(outs), (ins tGPR:$Rn, pred:$p, reglist:$regs, variable_ops), IIC_iLoad_m, "ldm${p}\t$Rn, $regs", []>, T1Encoding<{1,1,0,0,1,?}> { + bits<0> p; bits<3> Rn; bits<8> regs; let Inst{10-8} = Rn; @@ -855,6 +862,7 @@ def tSTMIA_UPD : Thumb1I<(outs tGPR:$wb), AddrModeNone, 2, IIC_iStore_mu, "stm${p}\t$Rn!, $regs", "$Rn = $wb", []>, T1Encoding<{1,1,0,0,0,?}> { + bits<0> p; bits<3> Rn; bits<8> regs; let Inst{10-8} = Rn; @@ -873,6 +881,7 @@ def tPOP : T1I<(outs), (ins pred:$p, reglist:$regs, variable_ops), IIC_iPop, "pop${p}\t$regs", []>, T1Misc<{1,1,0,?,?,?,?}>, Sched<[WriteLd]> { + bits<0> p; bits<16> regs; let Inst{8} = regs{15}; let Inst{7-0} = regs{7-0}; @@ -883,6 +892,7 @@ def tPUSH : T1I<(outs), (ins pred:$p, reglist:$regs, variable_ops), IIC_iStore_m, "push${p}\t$regs", []>, T1Misc<{0,1,0,?,?,?,?}>, Sched<[WriteST]> { + bits<0> p; bits<16> regs; let Inst{8} = regs{14}; let Inst{7-0} = regs{7-0}; diff --git a/llvm/lib/Target/ARM/ARMInstrThumb2.td b/llvm/lib/Target/ARM/ARMInstrThumb2.td index 66a2297bde0be..596196c4d0425 100644 --- a/llvm/lib/Target/ARM/ARMInstrThumb2.td +++ b/llvm/lib/Target/ARM/ARMInstrThumb2.td @@ -2059,6 +2059,7 @@ multiclass thumb2_ld_mult { + bits<0> p; bits<4> Rn; bits<16> regs; @@ -2074,6 +2075,7 @@ multiclass thumb2_ld_mult { + bits<0> p; bits<4> Rn; bits<16> regs; @@ -2089,6 +2091,7 @@ multiclass thumb2_ld_mult { + bits<0> p; bits<4> Rn; bits<16> regs; @@ -2104,6 +2107,7 @@ multiclass thumb2_ld_mult { + bits<0> p; bits<4> Rn; bits<16> regs; @@ -2128,6 +2132,7 @@ multiclass thumb2_st_mult { + bits<0> p; bits<4> Rn; bits<16> regs; @@ -2146,6 +2151,7 @@ multiclass thumb2_st_mult { + bits<0> p; bits<4> Rn; bits<16> regs; @@ -2164,6 +2170,7 @@ multiclass thumb2_st_mult { + bits<0> p; bits<4> Rn; bits<16> regs; @@ -2182,6 +2189,7 @@ multiclass thumb2_st_mult { + bits<0> p; bits<4> Rn; bits<16> regs; @@ -4030,9 +4038,11 @@ def t2TBH : T2I<(outs), (ins (addrmode_tbh $Rn, $Rm):$addr), IIC_Br, // FIXME: should be able to write a pattern for ARMBrcond, but can't use // a two-value operand where a dag node expects ", "two operands. :( let isBranch = 1, isTerminator = 1 in -def t2Bcc : T2I<(outs), (ins brtarget:$target), IIC_Br, - "b", ".w\t$target", - [/*(ARMbrcond bb:$target, imm:$cc)*/]>, Sched<[WriteBr]> { +def t2Bcc : Thumb2XI<(outs), (ins brtarget:$target, pred:$p), + AddrModeNone, 4, IIC_Br, + "b${p}.w\t$target", "", + [/*(ARMbrcond bb:$target, imm:$cc)*/]>, + Sched<[WriteBr]> { let Inst{31-27} = 0b11110; let Inst{15-14} = 0b10; let Inst{12} = 0; @@ -5481,6 +5491,7 @@ class V8_1MI { + bits<0> p; bits<16> regs; let Inst{31-16} = 0b1110100010011111; @@ -5509,6 +5520,7 @@ def t2BF_LabelPseudo def t2BFi : t2BF<(ins bflabel_u4:$b_label, bflabel_s16:$label, pred:$p), !strconcat("bf", "${p}"), "$b_label, $label"> { + bits<0> p; bits<4> b_label; bits<16> label; @@ -5540,6 +5552,7 @@ def t2BFic : t2BF<(ins bflabel_u4:$b_label, bflabel_s12:$label, def t2BFr : t2BF<(ins bflabel_u4:$b_label, rGPR:$Rn, pred:$p), !strconcat("bfx", "${p}"), "$b_label, $Rn"> { + bits<0> p; bits<4> b_label; bits<4> Rn; @@ -5551,6 +5564,7 @@ def t2BFr : t2BF<(ins bflabel_u4:$b_label, rGPR:$Rn, pred:$p), def t2BFLi : t2BF<(ins bflabel_u4:$b_label, bflabel_s18:$label, pred:$p), !strconcat("bfl", "${p}"), "$b_label, $label"> { + bits<0> p; bits<4> b_label; bits<18> label; @@ -5563,6 +5577,7 @@ def t2BFLi : t2BF<(ins bflabel_u4:$b_label, bflabel_s18:$label, pred:$p), def t2BFLr : t2BF<(ins bflabel_u4:$b_label, rGPR:$Rn, pred:$p), !strconcat("bflx", "${p}"), "$b_label, $Rn"> { + bits<0> p; bits<4> b_label; bits<4> Rn; @@ -5826,6 +5841,7 @@ let Predicates = [IsThumb2, HasV8_1MMainline, HasPACBTI] in { def t2PACG : V8_1MI<(outs rGPR:$Rd), (ins pred:$p, GPRnopc:$Rn, GPRnopc:$Rm), AddrModeNone, NoItinerary, "pacg${p}", "$Rd, $Rn, $Rm", "", []> { + bits<0> p; bits<4> Rd; bits<4> Rn; bits<4> Rm; @@ -5841,6 +5857,7 @@ let hasSideEffects = 1 in { class PACBTIAut : V8_1MI<(outs), iops, AddrModeNone, NoItinerary, asm, "$Ra, $Rn, $Rm", "", []> { + bits<0> p; bits<4> Ra; bits<4> Rn; bits<4> Rm; diff --git a/llvm/lib/Target/ARM/ARMMCInstLower.cpp b/llvm/lib/Target/ARM/ARMMCInstLower.cpp index f5d6597f214dd..c040904a82b71 100644 --- a/llvm/lib/Target/ARM/ARMMCInstLower.cpp +++ b/llvm/lib/Target/ARM/ARMMCInstLower.cpp @@ -112,8 +112,8 @@ bool ARMAsmPrinter::lowerOperand(const MachineOperand &MO, MCOp = GetSymbolRef(MO, GetJTISymbol(MO.getIndex())); break; case MachineOperand::MO_ConstantPoolIndex: - if (Subtarget->genExecuteOnly()) - llvm_unreachable("execute-only should not generate constant pools"); + assert(!MF->getSubtarget().genExecuteOnly() && + "execute-only should not generate constant pools"); MCOp = GetSymbolRef(MO, GetCPISymbol(MO.getIndex())); break; case MachineOperand::MO_BlockAddress: diff --git a/llvm/lib/Target/ARM/ARMSubtarget.cpp b/llvm/lib/Target/ARM/ARMSubtarget.cpp index cad0cb6a441a0..e6af32df5b76f 100644 --- a/llvm/lib/Target/ARM/ARMSubtarget.cpp +++ b/llvm/lib/Target/ARM/ARMSubtarget.cpp @@ -379,10 +379,15 @@ void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) { } bool ARMSubtarget::isROPI() const { + // FIXME: This should ideally come from a function attribute, to work + // correctly with LTO. return TM.getRelocationModel() == Reloc::ROPI || TM.getRelocationModel() == Reloc::ROPI_RWPI; } + bool ARMSubtarget::isRWPI() const { + // FIXME: This should ideally come from a function attribute, to work + // correctly with LTO. return TM.getRelocationModel() == Reloc::RWPI || TM.getRelocationModel() == Reloc::ROPI_RWPI; } diff --git a/llvm/lib/Target/ARM/CMakeLists.txt b/llvm/lib/Target/ARM/CMakeLists.txt index eb3ad01a54fb2..d99368e1d3b2b 100644 --- a/llvm/lib/Target/ARM/CMakeLists.txt +++ b/llvm/lib/Target/ARM/CMakeLists.txt @@ -6,8 +6,7 @@ tablegen(LLVM ARMGenAsmMatcher.inc -gen-asm-matcher) tablegen(LLVM ARMGenAsmWriter.inc -gen-asm-writer) tablegen(LLVM ARMGenCallingConv.inc -gen-callingconv) tablegen(LLVM ARMGenDAGISel.inc -gen-dag-isel) -tablegen(LLVM ARMGenDisassemblerTables.inc -gen-disassembler - -ignore-non-decodable-operands) +tablegen(LLVM ARMGenDisassemblerTables.inc -gen-disassembler) tablegen(LLVM ARMGenFastISel.inc -gen-fast-isel) tablegen(LLVM ARMGenGlobalISel.inc -gen-global-isel) tablegen(LLVM ARMGenInstrInfo.inc -gen-instr-info) diff --git a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp index b119146576569..44f50dd03e54f 100644 --- a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp +++ b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp @@ -149,7 +149,7 @@ class ARMDisassembler : public MCDisassembler { raw_ostream &CStream) const; bool isVectorPredicable(const MCInst &MI) const; - DecodeStatus AddThumbPredicate(MCInst&) const; + DecodeStatus checkThumbPredicate(MCInst &) const; void UpdateThumbPredicate(DecodeStatus &S, MCInst &MI) const; llvm::endianness InstructionEndianness; @@ -618,6 +618,23 @@ static DecodeStatus DecodePredicateOperand(MCInst &Inst, unsigned Val, return S; } +// This overload is used to decode a `pred` operand that is not encoded into +// instruction. This is the case for almost all predicable Thumb instructions +// (exceptions are tBcc and t2Bcc). Some predicable Thumb instructions have ARM +// equivalents where they are not predicable (always executed). This function +// is used to decode `pred` operand of these ARM instructions, too. +static DecodeStatus DecodePredicateOperand(MCInst &Inst, + const MCDisassembler *Decoder) { + const auto *D = static_cast(Decoder); + unsigned CC = ARMCC::AL; + if (D->getSubtargetInfo().hasFeature(ARM::ModeThumb)) + CC = D->ITBlock.getITCC(); + MCRegister CondReg = CC == ARMCC::AL ? ARM::NoRegister : ARM::CPSR; + Inst.addOperand(MCOperand::createImm(CC)); + Inst.addOperand(MCOperand::createReg(CondReg)); + return MCDisassembler::Success; +} + static DecodeStatus DecodeCCOutOperand(MCInst &Inst, unsigned Val, uint64_t Address, const MCDisassembler *Decoder) { @@ -1050,6 +1067,40 @@ static DecodeStatus DecodeCopMemInstruction(MCInst &Inst, unsigned Insn, if (!Check(S, DecodePredicateOperand(Inst, pred, Address, Decoder))) return MCDisassembler::Fail; break; + case ARM::t2LDC2L_OFFSET: + case ARM::t2LDC2L_OPTION: + case ARM::t2LDC2L_POST: + case ARM::t2LDC2L_PRE: + case ARM::t2LDC2_OFFSET: + case ARM::t2LDC2_OPTION: + case ARM::t2LDC2_POST: + case ARM::t2LDC2_PRE: + case ARM::t2LDCL_OFFSET: + case ARM::t2LDCL_OPTION: + case ARM::t2LDCL_POST: + case ARM::t2LDCL_PRE: + case ARM::t2LDC_OFFSET: + case ARM::t2LDC_OPTION: + case ARM::t2LDC_POST: + case ARM::t2LDC_PRE: + case ARM::t2STC2L_OFFSET: + case ARM::t2STC2L_OPTION: + case ARM::t2STC2L_POST: + case ARM::t2STC2L_PRE: + case ARM::t2STC2_OFFSET: + case ARM::t2STC2_OPTION: + case ARM::t2STC2_POST: + case ARM::t2STC2_PRE: + case ARM::t2STCL_OFFSET: + case ARM::t2STCL_OPTION: + case ARM::t2STCL_POST: + case ARM::t2STCL_PRE: + case ARM::t2STC_OFFSET: + case ARM::t2STC_OPTION: + case ARM::t2STC_POST: + case ARM::t2STC_PRE: + DecodePredicateOperand(Inst, Decoder); + break; default: break; } @@ -1217,6 +1268,8 @@ static DecodeStatus DecodeTSBInstruction(MCInst &Inst, unsigned Insn, // the only available operand), but LLVM expects the instruction to have one // operand, so we need to add the csync when decoding. Inst.addOperand(MCOperand::createImm(ARM_TSB::CSYNC)); + if (Inst.getOpcode() == ARM::t2TSB) + DecodePredicateOperand(Inst, Decoder); return MCDisassembler::Success; } @@ -1650,6 +1703,7 @@ static DecodeStatus DecodeT2CPSInstruction(MCInst &Inst, unsigned Insn, if(imm > 4) return MCDisassembler::Fail; Inst.setOpcode(ARM::t2HINT); Inst.addOperand(MCOperand::createImm(imm)); + DecodePredicateOperand(Inst, Decoder); } return S; @@ -1675,6 +1729,7 @@ DecodeT2HintSpaceInstruction(MCInst &Inst, unsigned Insn, uint64_t Address, Inst.setOpcode(Opcode); if (Opcode == ARM::t2HINT) { Inst.addOperand(MCOperand::createImm(imm)); + DecodePredicateOperand(Inst, Decoder); } return MCDisassembler::Success; @@ -1702,6 +1757,7 @@ static DecodeStatus DecodeT2MOVTWInstruction(MCInst &Inst, unsigned Insn, if (!tryAddingSymbolicOperand(Address, imm, false, 4, Inst, Decoder)) Inst.addOperand(MCOperand::createImm(imm)); + DecodePredicateOperand(Inst, Decoder); return S; } @@ -1906,6 +1962,7 @@ static DecodeStatus DecodeT2BInstruction(MCInst &Inst, unsigned Insn, true, 4, Inst, Decoder)) Inst.addOperand(MCOperand::createImm(imm32)); + DecodePredicateOperand(Inst, Decoder); return Status; } @@ -2231,6 +2288,7 @@ static DecodeStatus DecodeVLDInstruction(MCInst &Inst, unsigned Insn, break; } + DecodePredicateOperand(Inst, Decoder); return S; } @@ -2502,6 +2560,7 @@ static DecodeStatus DecodeVSTInstruction(MCInst &Inst, unsigned Insn, break; } + DecodePredicateOperand(Inst, Decoder); return S; } @@ -2605,6 +2664,7 @@ static DecodeStatus DecodeVLD1DupInstruction(MCInst &Inst, unsigned Insn, !Check(S, DecodeGPRRegisterClass(Inst, Rm, Address, Decoder))) return MCDisassembler::Fail; + DecodePredicateOperand(Inst, Decoder); return S; } @@ -2654,6 +2714,7 @@ static DecodeStatus DecodeVLD2DupInstruction(MCInst &Inst, unsigned Insn, return MCDisassembler::Fail; } + DecodePredicateOperand(Inst, Decoder); return S; } @@ -2690,6 +2751,7 @@ static DecodeStatus DecodeVLD3DupInstruction(MCInst &Inst, unsigned Insn, return MCDisassembler::Fail; } + DecodePredicateOperand(Inst, Decoder); return S; } @@ -2743,6 +2805,7 @@ static DecodeStatus DecodeVLD4DupInstruction(MCInst &Inst, unsigned Insn, return MCDisassembler::Fail; } + DecodePredicateOperand(Inst, Decoder); return S; } @@ -2789,6 +2852,7 @@ static DecodeStatus DecodeVMOVModImmInstruction(MCInst &Inst, unsigned Insn, break; } + DecodePredicateOperand(Inst, Decoder); return S; } @@ -2861,6 +2925,7 @@ static DecodeStatus DecodeVSHLMaxInstruction(MCInst &Inst, unsigned Insn, return MCDisassembler::Fail; Inst.addOperand(MCOperand::createImm(8 << size)); + DecodePredicateOperand(Inst, Decoder); return S; } @@ -2926,6 +2991,7 @@ static DecodeStatus DecodeTBLInstruction(MCInst &Inst, unsigned Insn, if (!Check(S, DecodeDPRRegisterClass(Inst, Rm, Address, Decoder))) return MCDisassembler::Fail; + DecodePredicateOperand(Inst, Decoder); return S; } @@ -2951,6 +3017,7 @@ static DecodeStatus DecodeThumbAddSpecialReg(MCInst &Inst, uint16_t Insn, } Inst.addOperand(MCOperand::createImm(imm)); + DecodePredicateOperand(Inst, Decoder); return S; } @@ -3113,6 +3180,7 @@ static DecodeStatus DecodeT2LoadLabel(MCInst &Inst, unsigned Insn, } Inst.addOperand(MCOperand::createImm(imm)); + DecodePredicateOperand(Inst, Decoder); return S; } @@ -3197,6 +3265,7 @@ static DecodeStatus DecodeT2LoadShift(MCInst &Inst, unsigned Insn, if (!Check(S, DecodeT2AddrModeSOReg(Inst, addrmode, Address, Decoder))) return MCDisassembler::Fail; + DecodePredicateOperand(Inst, Decoder); return S; } @@ -3341,6 +3410,7 @@ static DecodeStatus DecodeT2LoadImm8(MCInst &Inst, unsigned Insn, if (!Check(S, DecodeT2AddrModeImm8(Inst, imm, Address, Decoder))) return MCDisassembler::Fail; + DecodePredicateOperand(Inst, Decoder); return S; } @@ -3449,6 +3519,7 @@ static DecodeStatus DecodeT2LoadImm12(MCInst &Inst, unsigned Insn, if (!Check(S, DecodeT2AddrModeImm12(Inst, imm, Address, Decoder))) return MCDisassembler::Fail; + DecodePredicateOperand(Inst, Decoder); return S; } @@ -3488,6 +3559,7 @@ static DecodeStatus DecodeT2LoadT(MCInst &Inst, unsigned Insn, uint64_t Address, return MCDisassembler::Fail; if (!Check(S, DecodeT2AddrModeImm8(Inst, imm, Address, Decoder))) return MCDisassembler::Fail; + DecodePredicateOperand(Inst, Decoder); return S; } @@ -3678,6 +3750,7 @@ static DecodeStatus DecodeT2LdStPre(MCInst &Inst, unsigned Insn, if (!Check(S, DecodeT2AddrModeImm8(Inst, addr, Address, Decoder))) return MCDisassembler::Fail; + DecodePredicateOperand(Inst, Decoder); return S; } @@ -3690,6 +3763,7 @@ static DecodeStatus DecodeThumbAddSPImm(MCInst &Inst, uint16_t Insn, Inst.addOperand(MCOperand::createReg(ARM::SP)); Inst.addOperand(MCOperand::createImm(imm)); + DecodePredicateOperand(Inst, Decoder); return MCDisassembler::Success; } @@ -3716,6 +3790,7 @@ static DecodeStatus DecodeThumbAddSPReg(MCInst &Inst, uint16_t Insn, return MCDisassembler::Fail; } + DecodePredicateOperand(Inst, Decoder); return S; } @@ -3840,6 +3915,7 @@ static DecodeStatus DecodeThumbTableBranch(MCInst &Inst, unsigned Insn, return MCDisassembler::Fail; if (!Check(S, DecoderGPRRegisterClass(Inst, Rm, Address, Decoder))) return MCDisassembler::Fail; + DecodePredicateOperand(Inst, Decoder); return S; } @@ -4305,6 +4381,7 @@ static DecodeStatus DecodeVLD1LN(MCInst &Inst, unsigned Insn, uint64_t Address, return MCDisassembler::Fail; Inst.addOperand(MCOperand::createImm(index)); + DecodePredicateOperand(Inst, Decoder); return S; } @@ -4370,6 +4447,7 @@ static DecodeStatus DecodeVST1LN(MCInst &Inst, unsigned Insn, uint64_t Address, return MCDisassembler::Fail; Inst.addOperand(MCOperand::createImm(index)); + DecodePredicateOperand(Inst, Decoder); return S; } @@ -4437,6 +4515,7 @@ static DecodeStatus DecodeVLD2LN(MCInst &Inst, unsigned Insn, uint64_t Address, return MCDisassembler::Fail; Inst.addOperand(MCOperand::createImm(index)); + DecodePredicateOperand(Inst, Decoder); return S; } @@ -4500,6 +4579,7 @@ static DecodeStatus DecodeVST2LN(MCInst &Inst, unsigned Insn, uint64_t Address, return MCDisassembler::Fail; Inst.addOperand(MCOperand::createImm(index)); + DecodePredicateOperand(Inst, Decoder); return S; } @@ -4570,6 +4650,7 @@ static DecodeStatus DecodeVLD3LN(MCInst &Inst, unsigned Insn, uint64_t Address, return MCDisassembler::Fail; Inst.addOperand(MCOperand::createImm(index)); + DecodePredicateOperand(Inst, Decoder); return S; } @@ -4633,6 +4714,7 @@ static DecodeStatus DecodeVST3LN(MCInst &Inst, unsigned Insn, uint64_t Address, return MCDisassembler::Fail; Inst.addOperand(MCOperand::createImm(index)); + DecodePredicateOperand(Inst, Decoder); return S; } @@ -4714,6 +4796,7 @@ static DecodeStatus DecodeVLD4LN(MCInst &Inst, unsigned Insn, uint64_t Address, return MCDisassembler::Fail; Inst.addOperand(MCOperand::createImm(index)); + DecodePredicateOperand(Inst, Decoder); return S; } @@ -4786,6 +4869,7 @@ static DecodeStatus DecodeVST4LN(MCInst &Inst, unsigned Insn, uint64_t Address, return MCDisassembler::Fail; Inst.addOperand(MCOperand::createImm(index)); + DecodePredicateOperand(Inst, Decoder); return S; } @@ -4904,6 +4988,7 @@ static DecodeStatus DecodeT2LDRDPreInstruction(MCInst &Inst, unsigned Insn, if (!Check(S, DecodeT2AddrModeImm8s4(Inst, addr, Address, Decoder))) return MCDisassembler::Fail; + DecodePredicateOperand(Inst, Decoder); return S; } @@ -4939,6 +5024,7 @@ static DecodeStatus DecodeT2STRDPreInstruction(MCInst &Inst, unsigned Insn, if (!Check(S, DecodeT2AddrModeImm8s4(Inst, addr, Address, Decoder))) return MCDisassembler::Fail; + DecodePredicateOperand(Inst, Decoder); return S; } @@ -4965,6 +5051,7 @@ static DecodeStatus DecodeT2Adr(MCInst &Inst, uint32_t Insn, uint64_t Address, Val = -Val; } Inst.addOperand(MCOperand::createImm(Val)); + DecodePredicateOperand(Inst, Decoder); return S; } @@ -5062,6 +5149,7 @@ static DecodeStatus DecodeVCVTD(MCInst &Inst, unsigned Insn, uint64_t Address, return MCDisassembler::Fail; Inst.addOperand(MCOperand::createImm(64 - imm)); + DecodePredicateOperand(Inst, Decoder); return S; } @@ -5121,6 +5209,7 @@ static DecodeStatus DecodeVCVTQ(MCInst &Inst, unsigned Insn, uint64_t Address, return MCDisassembler::Fail; Inst.addOperand(MCOperand::createImm(64 - imm)); + DecodePredicateOperand(Inst, Decoder); return S; } @@ -5326,8 +5415,10 @@ static DecodeStatus DecodeLOLoop(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { DecodeStatus S = MCDisassembler::Success; - if (Inst.getOpcode() == ARM::MVE_LCTP) + if (Inst.getOpcode() == ARM::MVE_LCTP) { + DecodePredicateOperand(Inst, Decoder); return S; + } unsigned Imm = fieldFromInstruction(Insn, 11, 1) | fieldFromInstruction(Insn, 1, 10) << 1; @@ -5372,6 +5463,7 @@ static DecodeStatus DecodeLOLoop(MCInst &Inst, unsigned Insn, uint64_t Address, Check(S, MCDisassembler::SoftFail); // an SBZ bit is wrong: soft fail Inst.setOpcode(ARM::MVE_LCTP); + DecodePredicateOperand(Inst, Decoder); } else { Inst.addOperand(MCOperand::createReg(ARM::LR)); if (!Check(S, DecoderGPRRegisterClass(Inst, @@ -5762,6 +5854,7 @@ static DecodeStatus DecodeMVEVMOVQtoDReg(MCInst &Inst, unsigned Insn, if (!Check(S, DecodeMVEPairVectorIndexOperand<0>(Inst, index, Address, Decoder))) return MCDisassembler::Fail; + DecodePredicateOperand(Inst, Decoder); return S; } @@ -5788,6 +5881,7 @@ static DecodeStatus DecodeMVEVMOVDRegtoQ(MCInst &Inst, unsigned Insn, if (!Check(S, DecodeMVEPairVectorIndexOperand<0>(Inst, index, Address, Decoder))) return MCDisassembler::Fail; + DecodePredicateOperand(Inst, Decoder); return S; } @@ -5833,6 +5927,8 @@ DecodeMVEOverlappingLongShift(MCInst &Inst, unsigned Insn, uint64_t Address, if (!Check(S, DecoderGPRRegisterClass(Inst, Rm, Address, Decoder))) return MCDisassembler::Fail; + DecodePredicateOperand(Inst, Decoder); + if (fieldFromInstruction (Insn, 6, 3) != 4) return MCDisassembler::SoftFail; @@ -5868,6 +5964,7 @@ DecodeMVEOverlappingLongShift(MCInst &Inst, unsigned Insn, uint64_t Address, Inst.addOperand(MCOperand::createImm(Saturate)); } + DecodePredicateOperand(Inst, Decoder); return S; } @@ -5971,10 +6068,12 @@ static DecodeStatus DecodeT2AddSubSPImm(MCInst &Inst, unsigned Insn, if (TypeT3) { Inst.setOpcode(sign1 ? ARM::t2SUBspImm12 : ARM::t2ADDspImm12); Inst.addOperand(MCOperand::createImm(Imm12)); // zext imm12 + DecodePredicateOperand(Inst, Decoder); } else { Inst.setOpcode(sign1 ? ARM::t2SUBspImm : ARM::t2ADDspImm); if (!Check(DS, DecodeT2SOImm(Inst, Imm12, Address, Decoder))) // imm12 return MCDisassembler::Fail; + DecodePredicateOperand(Inst, Decoder); if (!Check(DS, DecodeCCOutOperand(Inst, S, Address, Decoder))) // cc_out return MCDisassembler::Fail; } @@ -5994,7 +6093,7 @@ static DecodeStatus DecodeLazyLoadStoreMul(MCInst &Inst, unsigned Insn, if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))) return MCDisassembler::Fail; // An optional predicate, '$p' in the assembly. - DecodePredicateOperand(Inst, ARMCC::AL, Address, Decoder); + DecodePredicateOperand(Inst, Decoder); // An immediate that represents a floating point registers list. '$regs' in // the assembly. Inst.addOperand(MCOperand::createImm(0)); // Arbitrary value, has no effect. @@ -6115,28 +6214,17 @@ DecodeStatus ARMDisassembler::getARMInstruction(MCInst &MI, uint64_t &Size, return checkDecodedInstruction(MI, Size, Address, CS, Insn, Result); } - struct DecodeTable { - const uint8_t *P; - bool DecodePred; - }; - - const DecodeTable Tables[] = { - {DecoderTableVFP32, false}, {DecoderTableVFPV832, false}, - {DecoderTableNEONData32, true}, {DecoderTableNEONLoadStore32, true}, - {DecoderTableNEONDup32, false}, {DecoderTablev8NEON32, false}, - {DecoderTablev8Crypto32, false}, + const uint8_t *Tables[] = { + DecoderTableVFP32, DecoderTableVFPV832, + DecoderTableNEONData32, DecoderTableNEONLoadStore32, + DecoderTableNEONDup32, DecoderTablev8NEON32, + DecoderTablev8Crypto32, }; - for (auto Table : Tables) { - Result = decodeInstruction(Table.P, MI, Insn, Address, this, STI); + for (const uint8_t *Table : Tables) { + Result = decodeInstruction(Table, MI, Insn, Address, this, STI); if (Result != MCDisassembler::Fail) { Size = 4; - // Add a fake predicate operand, because we share these instruction - // definitions with Thumb2 where these instructions are predicable. - if (Table.DecodePred && MCII->get(MI.getOpcode()).isPredicable()) { - MI.addOperand(MCOperand::createImm(ARMCC::AL)); - MI.addOperand(MCOperand::createReg(ARM::NoRegister)); - } return Result; } } @@ -6161,18 +6249,16 @@ bool ARMDisassembler::isVectorPredicable(const MCInst &MI) const { return false; } -// Most Thumb instructions don't have explicit predicates in the -// encoding, but rather get their predicates from IT context. We need -// to fix up the predicate operands using this context information as a -// post-pass. +// Most Thumb instructions don't have explicit predicates in the encoding, +// but rather get their predicates from IT context. Here, we check that the +// decoded instruction is allowed to have the decoded predicate and advance +// IT/VPT block states. MCDisassembler::DecodeStatus -ARMDisassembler::AddThumbPredicate(MCInst &MI) const { +ARMDisassembler::checkThumbPredicate(MCInst &MI) const { MCDisassembler::DecodeStatus S = Success; const FeatureBitset &FeatureBits = getSubtargetInfo().getFeatureBits(); - // A few instructions actually have predicates encoded in them. Don't - // try to overwrite it if we're seeing one of those. switch (MI.getOpcode()) { case ARM::tBcc: case ARM::t2Bcc: @@ -6218,34 +6304,10 @@ ARMDisassembler::AddThumbPredicate(MCInst &MI) const { (isVectorPredicable(MI) && ITBlock.instrInITBlock())) S = SoftFail; - // If we're in an IT block, base the predicate on that. Otherwise, - // assume a predicate of AL. - unsigned CC = ARMCC::AL; - if (ITBlock.instrInITBlock()) { - CC = ITBlock.getITCC(); + if (ITBlock.instrInITBlock()) ITBlock.advanceITState(); - } else if (VPTBlock.instrInVPTBlock()) { + else if (VPTBlock.instrInVPTBlock()) VPTBlock.advanceVPTState(); - } - - const MCInstrDesc &MCID = MCII->get(MI.getOpcode()); - - MCInst::iterator CCI = MI.begin(); - for (unsigned i = 0; i < MCID.NumOperands; ++i, ++CCI) { - if (MCID.operands()[i].isPredicate() || CCI == MI.end()) - break; - } - - if (MCID.isPredicable()) { - CCI = MI.insert(CCI, MCOperand::createImm(CC)); - ++CCI; - if (CC == ARMCC::AL) - MI.insert(CCI, MCOperand::createReg(ARM::NoRegister)); - else - MI.insert(CCI, MCOperand::createReg(ARM::CPSR)); - } else if (CC != ARMCC::AL) { - Check(S, SoftFail); - } return S; } @@ -6307,7 +6369,7 @@ DecodeStatus ARMDisassembler::getThumbInstruction(MCInst &MI, uint64_t &Size, decodeInstruction(DecoderTableThumb16, MI, Insn16, Address, this, STI); if (Result != MCDisassembler::Fail) { Size = 2; - Check(Result, AddThumbPredicate(MI)); + Check(Result, checkThumbPredicate(MI)); return Result; } @@ -6315,7 +6377,7 @@ DecodeStatus ARMDisassembler::getThumbInstruction(MCInst &MI, uint64_t &Size, STI); if (Result) { Size = 2; - Check(Result, AddThumbPredicate(MI)); + Check(Result, checkThumbPredicate(MI)); return Result; } @@ -6329,7 +6391,7 @@ DecodeStatus ARMDisassembler::getThumbInstruction(MCInst &MI, uint64_t &Size, if (MI.getOpcode() == ARM::t2IT && ITBlock.instrInITBlock()) Result = MCDisassembler::SoftFail; - Check(Result, AddThumbPredicate(MI)); + Check(Result, checkThumbPredicate(MI)); // If we find an IT instruction, we need to parse its condition // code and mask operands so that we can apply them correctly @@ -6367,7 +6429,7 @@ DecodeStatus ARMDisassembler::getThumbInstruction(MCInst &MI, uint64_t &Size, if (isVPTOpcode(MI.getOpcode()) && VPTBlock.instrInVPTBlock()) Result = MCDisassembler::SoftFail; - Check(Result, AddThumbPredicate(MI)); + Check(Result, checkThumbPredicate(MI)); if (isVPTOpcode(MI.getOpcode())) { unsigned Mask = MI.getOperand(0).getImm(); @@ -6381,7 +6443,7 @@ DecodeStatus ARMDisassembler::getThumbInstruction(MCInst &MI, uint64_t &Size, decodeInstruction(DecoderTableThumb32, MI, Insn32, Address, this, STI); if (Result != MCDisassembler::Fail) { Size = 4; - Check(Result, AddThumbPredicate(MI)); + Check(Result, checkThumbPredicate(MI)); return Result; } @@ -6389,7 +6451,7 @@ DecodeStatus ARMDisassembler::getThumbInstruction(MCInst &MI, uint64_t &Size, decodeInstruction(DecoderTableThumb232, MI, Insn32, Address, this, STI); if (Result != MCDisassembler::Fail) { Size = 4; - Check(Result, AddThumbPredicate(MI)); + Check(Result, checkThumbPredicate(MI)); return checkDecodedInstruction(MI, Size, Address, CS, Insn32, Result); } @@ -6428,7 +6490,7 @@ DecodeStatus ARMDisassembler::getThumbInstruction(MCInst &MI, uint64_t &Size, Address, this, STI); if (Result != MCDisassembler::Fail) { Size = 4; - Check(Result, AddThumbPredicate(MI)); + Check(Result, checkThumbPredicate(MI)); return Result; } } @@ -6442,7 +6504,7 @@ DecodeStatus ARMDisassembler::getThumbInstruction(MCInst &MI, uint64_t &Size, Address, this, STI); if (Result != MCDisassembler::Fail) { Size = 4; - Check(Result, AddThumbPredicate(MI)); + Check(Result, checkThumbPredicate(MI)); return Result; } @@ -6475,7 +6537,7 @@ DecodeStatus ARMDisassembler::getThumbInstruction(MCInst &MI, uint64_t &Size, decodeInstruction(DecoderTable, MI, Insn32, Address, this, STI); if (Result != MCDisassembler::Fail) { Size = 4; - Check(Result, AddThumbPredicate(MI)); + Check(Result, checkThumbPredicate(MI)); return Result; } diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp index ca366edad89ee..060d1f86f6846 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp @@ -54,7 +54,7 @@ void ARMWinCOFFStreamer::emitWindowsUnwindTables() { } void ARMWinCOFFStreamer::finishImpl() { - emitFrames(nullptr); + emitFrames(); emitWindowsUnwindTables(); MCWinCOFFStreamer::finishImpl(); diff --git a/llvm/lib/Target/AVR/AVR.td b/llvm/lib/Target/AVR/AVR.td index 22ffc4a368ad6..f4ee11984cb73 100644 --- a/llvm/lib/Target/AVR/AVR.td +++ b/llvm/lib/Target/AVR/AVR.td @@ -32,6 +32,8 @@ include "AVRRegisterInfo.td" include "AVRInstrInfo.td" +defm : RemapAllTargetPseudoPointerOperands; + def AVRInstrInfo : InstrInfo; //===---------------------------------------------------------------------===// diff --git a/llvm/lib/Target/BPF/BPF.td b/llvm/lib/Target/BPF/BPF.td index 436b7eef600e7..50f9793fb29a7 100644 --- a/llvm/lib/Target/BPF/BPF.td +++ b/llvm/lib/Target/BPF/BPF.td @@ -13,6 +13,9 @@ include "BPFCallingConv.td" include "BPFInstrInfo.td" include "GISel/BPFRegisterBanks.td" + +defm : RemapAllTargetPseudoPointerOperands; + def BPFInstrInfo : InstrInfo; class Proc Features> diff --git a/llvm/lib/Target/CSKY/CSKY.td b/llvm/lib/Target/CSKY/CSKY.td index b5df93a9d464c..45ef9441b0a41 100644 --- a/llvm/lib/Target/CSKY/CSKY.td +++ b/llvm/lib/Target/CSKY/CSKY.td @@ -671,6 +671,8 @@ def : CK860V<"ck860fv", NoSchedModel, // Define the CSKY target. //===----------------------------------------------------------------------===// +defm : RemapAllTargetPseudoPointerOperands; + def CSKYInstrInfo : InstrInfo; diff --git a/llvm/lib/Target/DirectX/DirectX.td b/llvm/lib/Target/DirectX/DirectX.td index 4d1d45b84a683..1717d533d90fa 100644 --- a/llvm/lib/Target/DirectX/DirectX.td +++ b/llvm/lib/Target/DirectX/DirectX.td @@ -22,6 +22,8 @@ include "DXILStubs.td" // DirectX Subtarget features. //===----------------------------------------------------------------------===// +defm : RemapAllTargetPseudoPointerOperands; + def DirectXInstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Hexagon/Hexagon.td b/llvm/lib/Target/Hexagon/Hexagon.td index ede8463ff644b..17c72c393b432 100644 --- a/llvm/lib/Target/Hexagon/Hexagon.td +++ b/llvm/lib/Target/Hexagon/Hexagon.td @@ -413,6 +413,8 @@ include "HexagonPatternsV65.td" include "HexagonDepMappings.td" include "HexagonIntrinsics.td" +defm : RemapAllTargetPseudoPointerOperands; + def HexagonInstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Lanai/Lanai.td b/llvm/lib/Target/Lanai/Lanai.td index c6d949f42047e..9a5422db5feeb 100644 --- a/llvm/lib/Target/Lanai/Lanai.td +++ b/llvm/lib/Target/Lanai/Lanai.td @@ -21,6 +21,8 @@ include "LanaiRegisterInfo.td" include "LanaiCallingConv.td" include "LanaiInstrInfo.td" +defm : RemapAllTargetPseudoPointerOperands; + def LanaiInstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/LoongArch/LoongArch.td b/llvm/lib/Target/LoongArch/LoongArch.td index 6497ff999f6fa..67f07f0a0370e 100644 --- a/llvm/lib/Target/LoongArch/LoongArch.td +++ b/llvm/lib/Target/LoongArch/LoongArch.td @@ -202,6 +202,8 @@ def : ProcessorModel<"la664", NoSchedModel, [Feature64Bit, // Define the LoongArch target. //===----------------------------------------------------------------------===// +defm : RemapAllTargetPseudoPointerOperands; + def LoongArchInstrInfo : InstrInfo { let guessInstructionProperties = 0; } diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index 3ad5f7fa9e2a7..ba9d0682b26dd 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -352,6 +352,8 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM, setOperationAction(ISD::SSUBSAT, VT, Legal); setOperationAction(ISD::UADDSAT, VT, Legal); setOperationAction(ISD::USUBSAT, VT, Legal); + setOperationAction(ISD::ROTL, VT, Custom); + setOperationAction(ISD::ROTR, VT, Custom); } for (MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) setOperationAction(ISD::BITREVERSE, VT, Custom); @@ -440,6 +442,8 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM, setOperationAction(ISD::UADDSAT, VT, Legal); setOperationAction(ISD::USUBSAT, VT, Legal); setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); + setOperationAction(ISD::ROTL, VT, Custom); + setOperationAction(ISD::ROTR, VT, Custom); } for (MVT VT : {MVT::v32i8, MVT::v16i16, MVT::v8i32}) setOperationAction(ISD::BITREVERSE, VT, Custom); @@ -601,6 +605,9 @@ SDValue LoongArchTargetLowering::LowerOperation(SDValue Op, return lowerBF16_TO_FP(Op, DAG); case ISD::VECREDUCE_ADD: return lowerVECREDUCE_ADD(Op, DAG); + case ISD::ROTL: + case ISD::ROTR: + return lowerRotate(Op, DAG); case ISD::VECREDUCE_AND: case ISD::VECREDUCE_OR: case ISD::VECREDUCE_XOR: @@ -827,6 +834,58 @@ SDValue LoongArchTargetLowering::lowerPREFETCH(SDValue Op, return Op; } +SDValue LoongArchTargetLowering::lowerRotate(SDValue Op, + SelectionDAG &DAG) const { + MVT VT = Op.getSimpleValueType(); + assert(VT.isVector() && "Unexpected type"); + + SDLoc DL(Op); + SDValue R = Op.getOperand(0); + SDValue Amt = Op.getOperand(1); + unsigned Opcode = Op.getOpcode(); + unsigned EltSizeInBits = VT.getScalarSizeInBits(); + + auto checkCstSplat = [](SDValue V, APInt &CstSplatValue) { + if (V.getOpcode() != ISD::BUILD_VECTOR) + return false; + if (SDValue SplatValue = + cast(V.getNode())->getSplatValue()) { + if (auto *C = dyn_cast(SplatValue)) { + CstSplatValue = C->getAPIntValue(); + return true; + } + } + return false; + }; + + // Check for constant splat rotation amount. + APInt CstSplatValue; + bool IsCstSplat = checkCstSplat(Amt, CstSplatValue); + bool isROTL = Opcode == ISD::ROTL; + + // Check for splat rotate by zero. + if (IsCstSplat && CstSplatValue.urem(EltSizeInBits) == 0) + return R; + + // LoongArch targets always prefer ISD::ROTR. + if (isROTL) { + SDValue Zero = DAG.getConstant(0, DL, VT); + return DAG.getNode(ISD::ROTR, DL, VT, R, + DAG.getNode(ISD::SUB, DL, VT, Zero, Amt)); + } + + // Rotate by a immediate. + if (IsCstSplat) { + // ISD::ROTR: Attemp to rotate by a positive immediate. + SDValue Bits = DAG.getConstant(EltSizeInBits, DL, VT); + if (SDValue Urem = + DAG.FoldConstantArithmetic(ISD::UREM, DL, VT, {Amt, Bits})) + return DAG.getNode(Opcode, DL, VT, R, Urem); + } + + return Op; +} + // Return true if Val is equal to (setcc LHS, RHS, CC). // Return false if Val is the inverse of (setcc LHS, RHS, CC). // Otherwise, return std::nullopt. diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h index 232ac6092149d..0c09fb6afd2d1 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h @@ -239,6 +239,7 @@ class LoongArchTargetLowering : public TargetLowering { SDValue lowerVECREDUCE_ADD(SDValue Op, SelectionDAG &DAG) const; SDValue lowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const; SDValue lowerConstantFP(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerRotate(SDValue Op, SelectionDAG &DAG) const; bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override; diff --git a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td index 3ccbd43a40062..d6af093411c3a 100644 --- a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td @@ -1447,6 +1447,11 @@ defm : PatXrXr; defm : PatShiftXrXr; defm : PatShiftXrSplatUimm; +// XVROTR[I]_{B/H/W/D} +defm : PatXrXr; +defm : PatShiftXrXr; +defm : PatShiftXrSplatUimm; + // XVCLZ_{B/H/W/D} defm : PatXr; diff --git a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td index 49c6521b3193c..43ad3819029cf 100644 --- a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td @@ -1657,6 +1657,11 @@ defm : PatVrVr; defm : PatShiftVrVr; defm : PatShiftVrSplatUimm; +// VROTR[I]_{B/H/W/D} +defm : PatVrVr; +defm : PatShiftVrVr; +defm : PatShiftVrSplatUimm; + // VCLZ_{B/H/W/D} defm : PatVr; diff --git a/llvm/lib/Target/M68k/M68k.td b/llvm/lib/Target/M68k/M68k.td index dab66d1022955..dfa44a423ae25 100644 --- a/llvm/lib/Target/M68k/M68k.td +++ b/llvm/lib/Target/M68k/M68k.td @@ -95,6 +95,8 @@ include "GISel/M68kRegisterBanks.td" include "M68kInstrInfo.td" +defm : RemapAllTargetPseudoPointerOperands; + def M68kInstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/MSP430/MSP430.td b/llvm/lib/Target/MSP430/MSP430.td index 38aa30fcf4dd1..cb3949838f6f2 100644 --- a/llvm/lib/Target/MSP430/MSP430.td +++ b/llvm/lib/Target/MSP430/MSP430.td @@ -61,6 +61,8 @@ include "MSP430CallingConv.td" include "MSP430InstrInfo.td" +defm : RemapAllTargetPseudoPointerOperands; + def MSP430InstrInfo : InstrInfo; //===---------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Mips/Mips.td b/llvm/lib/Target/Mips/Mips.td index e18388c179108..6c8d177093c76 100644 --- a/llvm/lib/Target/Mips/Mips.td +++ b/llvm/lib/Target/Mips/Mips.td @@ -244,6 +244,8 @@ include "MipsScheduleI6400.td" include "MipsScheduleP5600.td" include "MipsScheduleGeneric.td" +defm : RemapAllTargetPseudoPointerOperands; + def MipsInstrInfo : InstrInfo { } diff --git a/llvm/lib/Target/NVPTX/NVPTX.td b/llvm/lib/Target/NVPTX/NVPTX.td index 31c117a8c0fee..d41a43de95098 100644 --- a/llvm/lib/Target/NVPTX/NVPTX.td +++ b/llvm/lib/Target/NVPTX/NVPTX.td @@ -150,6 +150,16 @@ def : Proc<"sm_121", [SM121, PTX88]>; def : Proc<"sm_121a", [SM121a, PTX88]>; def : Proc<"sm_121f", [SM121f, PTX88]>; + +def Is64Bit : Predicate<"Subtarget->getTargetTriple().getArch() == Triple::nvptx64">; +def NVPTX64 : HwMode<[Is64Bit]>; + +def nvptx_ptr_rc : RegClassByHwMode< + [DefaultMode, NVPTX64], + [B32, B64]>; + +defm : RemapAllTargetPseudoPointerOperands; + def NVPTXInstrInfo : InstrInfo { } diff --git a/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp b/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp index 561a9c51b9cc2..b07f95018ca90 100644 --- a/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp +++ b/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp @@ -365,6 +365,10 @@ struct PPCOperand : public MCParsedAsmOperand { bool isS16ImmX4() const { return isExtImm<16>(/*Signed*/ true, 4); } bool isS16ImmX16() const { return isExtImm<16>(/*Signed*/ true, 16); } bool isS17Imm() const { return isExtImm<17>(/*Signed*/ true, 1); } + bool isS32Imm() const { + // TODO: Is ContextImmediate needed? + return Kind == Expression || isSImm<32>(); + } bool isS34Imm() const { // Once the PC-Rel ABI is finalized, evaluate whether a 34-bit // ContextImmediate is needed. diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp index 04b886ae74993..558351b515a2e 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp @@ -47,6 +47,9 @@ static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value) { case PPC::fixup_ppc_half16ds: case PPC::fixup_ppc_half16dq: return Value & 0xfffc; + case PPC::fixup_ppc_pcrel32: + case PPC::fixup_ppc_imm32: + return Value & 0xffffffff; case PPC::fixup_ppc_pcrel34: case PPC::fixup_ppc_imm34: return Value & 0x3ffffffff; @@ -71,6 +74,8 @@ static unsigned getFixupKindNumBytes(unsigned Kind) { case PPC::fixup_ppc_br24abs: case PPC::fixup_ppc_br24_notoc: return 4; + case PPC::fixup_ppc_pcrel32: + case PPC::fixup_ppc_imm32: case PPC::fixup_ppc_pcrel34: case PPC::fixup_ppc_imm34: case FK_Data_8: @@ -154,6 +159,8 @@ MCFixupKindInfo PPCAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { {"fixup_ppc_brcond14abs", 16, 14, 0}, {"fixup_ppc_half16", 0, 16, 0}, {"fixup_ppc_half16ds", 0, 14, 0}, + {"fixup_ppc_pcrel32", 0, 32, 0}, + {"fixup_ppc_imm32", 0, 32, 0}, {"fixup_ppc_pcrel34", 0, 34, 0}, {"fixup_ppc_imm34", 0, 34, 0}, {"fixup_ppc_nofixup", 0, 0, 0}}; @@ -166,6 +173,8 @@ MCFixupKindInfo PPCAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { {"fixup_ppc_brcond14abs", 2, 14, 0}, {"fixup_ppc_half16", 0, 16, 0}, {"fixup_ppc_half16ds", 2, 14, 0}, + {"fixup_ppc_pcrel32", 0, 32, 0}, + {"fixup_ppc_imm32", 0, 32, 0}, {"fixup_ppc_pcrel34", 0, 34, 0}, {"fixup_ppc_imm34", 0, 34, 0}, {"fixup_ppc_nofixup", 0, 0, 0}}; diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h index df0c666f5b113..4164b697649cd 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h @@ -40,6 +40,12 @@ enum Fixups { /// instrs like 'std'. fixup_ppc_half16ds, + // A 32-bit fixup corresponding to PC-relative paddis. + fixup_ppc_pcrel32, + + // A 32-bit fixup corresponding to Non-PC-relative paddis. + fixup_ppc_imm32, + // A 34-bit fixup corresponding to PC-relative paddi. fixup_ppc_pcrel34, diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.cpp index a2f981e861511..46d6093be3c17 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.cpp @@ -430,6 +430,17 @@ void PPCInstPrinter::printS16ImmOperand(const MCInst *MI, unsigned OpNo, printOperand(MI, OpNo, STI, O); } +void PPCInstPrinter::printS32ImmOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNo).isImm()) { + long long Value = MI->getOperand(OpNo).getImm(); + assert(isInt<32>(Value) && "Invalid s32imm argument!"); + O << (long long)Value; + } else + printOperand(MI, OpNo, STI, O); +} + void PPCInstPrinter::printS34ImmOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O) { diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.h b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.h index 01ff6255f2a03..2fbd06c5a96cf 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.h +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.h @@ -80,6 +80,8 @@ class PPCInstPrinter : public MCInstPrinter { const MCSubtargetInfo &STI, raw_ostream &O); void printS16ImmOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O); + void printS32ImmOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); void printS34ImmOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O); void printU16ImmOperand(const MCInst *MI, unsigned OpNo, diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp index 81d8e94b660d7..b28304b07e1a3 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp @@ -46,6 +46,7 @@ static void addFixup(SmallVectorImpl &Fixups, uint32_t Offset, case PPC::fixup_ppc_br24_notoc: case PPC::fixup_ppc_brcond14: case PPC::fixup_ppc_pcrel34: + case PPC::fixup_ppc_pcrel32: PCRel = true; } Fixups.push_back(MCFixup::create(Offset, Value, Kind, PCRel)); diff --git a/llvm/lib/Target/PowerPC/PPC.td b/llvm/lib/Target/PowerPC/PPC.td index 5d9ec4adf45c7..dc00aebe311f9 100644 --- a/llvm/lib/Target/PowerPC/PPC.td +++ b/llvm/lib/Target/PowerPC/PPC.td @@ -820,6 +820,8 @@ def PPCAsmParserVariant : AsmParserVariant { string BreakCharacters = "."; } +defm : RemapAllTargetPseudoPointerOperands; + def PPC : Target { // Information about the instructions. let InstructionSet = PPCInstrInfo; diff --git a/llvm/lib/Target/PowerPC/PPCInstrFuture.td b/llvm/lib/Target/PowerPC/PPCInstrFuture.td index e417ffe6d3677..39e6f4f139c11 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrFuture.td +++ b/llvm/lib/Target/PowerPC/PPCInstrFuture.td @@ -312,9 +312,41 @@ class 8RR_XX4Form_XTABC6_P opcode, dag OOL, dag IOL, string asmstr, let Inst{63} = XT{5}; } +class MLS_DForm_R_SI32_RTA5 opcode, dag OOL, dag IOL, string asmstr, + InstrItinClass itin, list pattern> + : PI<1, opcode, OOL, IOL, asmstr, itin> { + bits<5> RT; + bits<5> RA; + bits<32> SI; + + let Pattern = pattern; + + // The prefix. + let Inst{6...7} = 2; + let Inst{8} = 0; + let Inst{11} = PCRel; + let Inst{16...31} = SI{31...16}; + + // The instruction. + let Inst{38...42} = RT; + let Inst{43...47} = RA; + let Inst{48...63} = SI{15...0}; +} + +multiclass MLS_DForm_R_SI32_RTA5_p opcode, dag OOL, dag IOL, + dag PCRel_IOL, string asmstr, + InstrItinClass itin> { + def NAME : MLS_DForm_R_SI32_RTA5; + def pc : MLS_DForm_R_SI32_RTA5, + isPCRel; +} + //-------------------------- Instruction definitions -------------------------// // Predicate combinations available: // [IsISAFuture] +// [IsISAFuture, PrefixInstrs] // [HasVSX, IsISAFuture] // [HasVSX, PrefixInstrs, IsISAFuture] @@ -346,6 +378,18 @@ let Predicates = [IsISAFuture] in { } } +let Predicates = [IsISAFuture, PrefixInstrs] in { + defm PADDIS : MLS_DForm_R_SI32_RTA5_p<15, (outs gprc:$RT), + (ins gprc_nor0:$RA, s32imm:$SI), + (ins immZero:$RA, s32imm_pcrel:$SI), + "paddis $RT, $RA, $SI", IIC_LdStLFD>; + let Interpretation64Bit = 1, isCodeGenOnly = 1 in + defm PADDIS8 : MLS_DForm_R_SI32_RTA5_p<15, (outs g8rc:$RT), + (ins g8rc_nox0:$RA, s32imm:$SI), + (ins immZero:$RA, s32imm_pcrel:$SI), + "paddis $RT, $RA, $SI", IIC_LdStLFD>; +} + let Predicates = [HasVSX, IsISAFuture] in { let mayLoad = 1 in { def LXVRL : XX1Form_memOp<31, 525, (outs vsrc:$XT), diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.td b/llvm/lib/Target/PowerPC/PPCRegisterInfo.td index 65d0484805b95..e23914a050359 100644 --- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.td +++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.td @@ -808,6 +808,25 @@ def s17imm64 : Operand { let DecoderMethod = "decodeSImmOperand<16>"; let OperandType = "OPERAND_IMMEDIATE"; } +def PPCS32ImmAsmOperand : AsmOperandClass { + let Name = "S32Imm"; + let PredicateMethod = "isS32Imm"; + let RenderMethod = "addImmOperands"; +} +def s32imm : Operand { + let PrintMethod = "printS32ImmOperand"; + let EncoderMethod = "getImmEncoding"; + let ParserMatchClass = PPCS32ImmAsmOperand; + let DecoderMethod = "decodeSImmOperand<32>"; + let OperandType = "OPERAND_IMMEDIATE"; +} +def s32imm_pcrel : Operand { + let PrintMethod = "printS32ImmOperand"; + let EncoderMethod = "getImmEncoding"; + let ParserMatchClass = PPCS32ImmAsmOperand; + let DecoderMethod = "decodeSImmOperand<32>"; + let OperandType = "OPERAND_IMMEDIATE"; +} def PPCS34ImmAsmOperand : AsmOperandClass { let Name = "S34Imm"; let PredicateMethod = "isS34Imm"; @@ -904,6 +923,10 @@ def PPCRegGxRCNoR0Operand : AsmOperandClass { let Name = "RegGxRCNoR0"; let PredicateMethod = "isRegNumber"; } +def ppc_ptr_rc : RegClassByHwMode< + [PPC32, PPC64], + [GPRC, G8RC]>; + def ptr_rc_nor0_by_hwmode : RegClassByHwMode< [PPC32, PPC64], [GPRC_NOR0, G8RC_NOX0]>; diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp index 75ce1b144a2e7..9bb3724c96c11 100644 --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -4082,6 +4082,9 @@ bool RISCVAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, return false; } + case RISCV::PseudoCV_ELW: + emitLoadStoreSymbol(Inst, RISCV::CV_ELW, IDLoc, Out, /*HasTmpReg=*/false); + return false; } emitToStreamer(Out, Inst); diff --git a/llvm/lib/Target/RISCV/RISCV.td b/llvm/lib/Target/RISCV/RISCV.td index b24d8637cb27f..f6f82fd9bb55f 100644 --- a/llvm/lib/Target/RISCV/RISCV.td +++ b/llvm/lib/Target/RISCV/RISCV.td @@ -96,6 +96,8 @@ def RISCVAsmWriter : AsmWriter { int PassSubtarget = 1; } +defm : RemapAllTargetPseudoPointerOperands; + def RISCV : Target { let InstructionSet = RISCVInstrInfo; let AssemblyParsers = [RISCVAsmParser]; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 5025122db3681..7cf6f203fda89 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -1867,6 +1867,43 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { CurDAG->RemoveDeadNode(Node); return; } + case RISCVISD::PPACK_DH: { + assert(Subtarget->enablePExtCodeGen() && Subtarget->isRV32()); + + SDValue Val0 = Node->getOperand(0); + SDValue Val1 = Node->getOperand(1); + SDValue Val2 = Node->getOperand(2); + SDValue Val3 = Node->getOperand(3); + + SDValue Ops[] = { + CurDAG->getTargetConstant(RISCV::GPRPairRegClassID, DL, MVT::i32), Val0, + CurDAG->getTargetConstant(RISCV::sub_gpr_even, DL, MVT::i32), Val2, + CurDAG->getTargetConstant(RISCV::sub_gpr_odd, DL, MVT::i32)}; + SDValue RegPair0 = + SDValue(CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, + MVT::Untyped, Ops), + 0); + SDValue Ops1[] = { + CurDAG->getTargetConstant(RISCV::GPRPairRegClassID, DL, MVT::i32), Val1, + CurDAG->getTargetConstant(RISCV::sub_gpr_even, DL, MVT::i32), Val3, + CurDAG->getTargetConstant(RISCV::sub_gpr_odd, DL, MVT::i32)}; + SDValue RegPair1 = + SDValue(CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, + MVT::Untyped, Ops1), + 0); + + MachineSDNode *PackDH = CurDAG->getMachineNode( + RISCV::PPACK_DH, DL, MVT::Untyped, {RegPair0, RegPair1}); + + SDValue Lo = CurDAG->getTargetExtractSubreg(RISCV::sub_gpr_even, DL, + MVT::i32, SDValue(PackDH, 0)); + SDValue Hi = CurDAG->getTargetExtractSubreg(RISCV::sub_gpr_odd, DL, + MVT::i32, SDValue(PackDH, 0)); + ReplaceUses(SDValue(Node, 0), Lo); + ReplaceUses(SDValue(Node, 1), Hi); + CurDAG->RemoveDeadNode(Node); + return; + } case ISD::INTRINSIC_WO_CHAIN: { unsigned IntNo = Node->getConstantOperandVal(0); switch (IntNo) { @@ -2696,7 +2733,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case ISD::SCALAR_TO_VECTOR: if (Subtarget->enablePExtCodeGen()) { MVT SrcVT = Node->getOperand(0).getSimpleValueType(); - if (VT == MVT::v2i32 && SrcVT == MVT::i64) { + if ((VT == MVT::v2i32 && SrcVT == MVT::i64) || + (VT == MVT::v4i8 && SrcVT == MVT::i32)) { ReplaceUses(SDValue(Node, 0), Node->getOperand(0)); CurDAG->RemoveDeadNode(Node); return; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 3b250d7d9ad1f..be53f51afe79f 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -519,11 +519,13 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setTruncStoreAction(MVT::v4i16, MVT::v4i8, Expand); } else { VTs.append({MVT::v2i16, MVT::v4i8}); + setOperationAction(ISD::BUILD_VECTOR, MVT::v4i8, Custom); } setOperationAction(ISD::UADDSAT, VTs, Legal); setOperationAction(ISD::SADDSAT, VTs, Legal); setOperationAction(ISD::USUBSAT, VTs, Legal); setOperationAction(ISD::SSUBSAT, VTs, Legal); + setOperationAction(ISD::SSHLSAT, VTs, Legal); setOperationAction({ISD::AVGFLOORS, ISD::AVGFLOORU}, VTs, Legal); setOperationAction({ISD::ABDS, ISD::ABDU}, VTs, Legal); setOperationAction(ISD::SPLAT_VECTOR, VTs, Legal); @@ -4434,6 +4436,33 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, SDLoc DL(Op); + if (Subtarget.isRV32() && Subtarget.enablePExtCodeGen()) { + if (VT != MVT::v4i8) + return SDValue(); + + // <4 x i8> BUILD_VECTOR a, b, c, d -> PACK(PPACK.DH pair(a, b), pair(c, d)) + SDValue Val0 = + DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i8, Op->getOperand(0)); + SDValue Val1 = + DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i8, Op->getOperand(1)); + SDValue Val2 = + DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i8, Op->getOperand(2)); + SDValue Val3 = + DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i8, Op->getOperand(3)); + SDValue PackDH = + DAG.getNode(RISCVISD::PPACK_DH, DL, {MVT::v2i16, MVT::v2i16}, + {Val0, Val1, Val2, Val3}); + + return DAG.getNode( + ISD::BITCAST, DL, MVT::v4i8, + SDValue( + DAG.getMachineNode( + RISCV::PACK, DL, MVT::i32, + {DAG.getNode(ISD::BITCAST, DL, MVT::i32, PackDH.getValue(0)), + DAG.getNode(ISD::BITCAST, DL, MVT::i32, PackDH.getValue(1))}), + 0)); + } + // Proper support for f16 requires Zvfh. bf16 always requires special // handling. We need to cast the scalar to integer and create an integer // build_vector. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td index 51339d66f6de1..599358368594f 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td @@ -24,6 +24,13 @@ def SImm8UnsignedAsmOperand : SImmAsmOperand<8, "Unsigned"> { let RenderMethod = "addSImm8UnsignedOperands"; } +// (<2 x i16>, <2 x i16>) PPACK_DH (<4 x i8>, <4 x i8>, <4 x i8>, <4 x i8>) +def SDT_RISCVPPackDH + : SDTypeProfile<2, 4, [SDTCisVT<0, v2i16>, SDTCisSameAs<0, 1>, + SDTCisVT<2, v4i8>, SDTCisSameAs<0, 3>, + SDTCisSameAs<0, 4>, SDTCisSameAs<0, 5>]>; +def riscv_ppack_dh : RVSDNode<"PPACK_DH", SDT_RISCVPPackDH>; + // A 8-bit signed immediate allowing range [-128, 255] // but represented as [-128, 127]. def simm8_unsigned : RISCVOp, ImmLeaf(Imm);"> { @@ -1513,6 +1520,17 @@ let Predicates = [HasStdExtP] in { def: Pat<(XLenVecI16VT (abds GPR:$rs1, GPR:$rs2)), (PDIF_H GPR:$rs1, GPR:$rs2)>; def: Pat<(XLenVecI16VT (abdu GPR:$rs1, GPR:$rs2)), (PDIFU_H GPR:$rs1, GPR:$rs2)>; + // 8-bit logical shift left patterns + def: Pat<(XLenVecI8VT (shl GPR:$rs1, (XLenVecI8VT (splat_vector uimm3:$shamt)))), + (PSLLI_B GPR:$rs1, uimm3:$shamt)>; + + // 16-bit logical shift left patterns + def: Pat<(XLenVecI16VT (shl GPR:$rs1, (XLenVecI16VT (splat_vector uimm4:$shamt)))), + (PSLLI_H GPR:$rs1, uimm4:$shamt)>; + + // 16-bit signed saturation shift left patterns + def: Pat<(XLenVecI16VT (sshlsat GPR:$rs1, (XLenVecI16VT (splat_vector uimm4:$shamt)))), + (PSSLAI_H GPR:$rs1, uimm4:$shamt)>; // 8-bit PLI SD node pattern def: Pat<(XLenVecI8VT (splat_vector simm8_unsigned:$imm8)), (PLI_B simm8_unsigned:$imm8)>; @@ -1530,6 +1548,10 @@ let Predicates = [HasStdExtP, IsRV32] in { def : StPat; def : LdPat; def : LdPat; + + // Build vector patterns + def : Pat<(v2i16 (build_vector (XLenVT GPR:$a), (XLenVT GPR:$b))), + (PACK GPR:$a, GPR:$b)>; } // Predicates = [HasStdExtP, IsRV32] let Predicates = [HasStdExtP, IsRV64] in { @@ -1559,6 +1581,14 @@ let Predicates = [HasStdExtP, IsRV64] in { // splat pattern def: Pat<(v2i32 (splat_vector (XLenVT GPR:$rs2))), (PADD_WS (XLenVT X0), GPR:$rs2)>; + // 32-bit logical shift left patterns + def: Pat<(v2i32 (shl GPR:$rs1, (v2i32 (splat_vector uimm5:$shamt)))), + (PSLLI_W GPR:$rs1, uimm5:$shamt)>; + + // 32-bit signed saturation shift left patterns + def: Pat<(v2i32 (sshlsat GPR:$rs1, (v2i32 (splat_vector uimm5:$shamt)))), + (PSSLAI_W GPR:$rs1, uimm5:$shamt)>; + // Load/Store patterns def : StPat; def : StPat; @@ -1566,4 +1596,29 @@ let Predicates = [HasStdExtP, IsRV64] in { def : LdPat; def : LdPat; def : LdPat; + + // Build vector patterns + def : Pat<(v8i8 (build_vector (XLenVT GPR:$a), (XLenVT GPR:$b), + (XLenVT GPR:$c), (XLenVT GPR:$d), + (XLenVT undef), (XLenVT undef), + (XLenVT undef), (XLenVT undef))), + (PPACK_W (PPACK_H GPR:$a, GPR:$b), (PPACK_H GPR:$c, GPR:$d))>; + + def : Pat<(v8i8 (build_vector (XLenVT GPR:$a), (XLenVT GPR:$b), + (XLenVT GPR:$c), (XLenVT GPR:$d), + (XLenVT GPR:$e), (XLenVT GPR:$f), + (XLenVT GPR:$g), (XLenVT GPR:$h))), + (PACK(PPACK_W (PPACK_H GPR:$a, GPR:$b), (PPACK_H GPR:$c, GPR:$d)), + (PPACK_W (PPACK_H GPR:$e, GPR:$f), (PPACK_H GPR:$g, GPR:$h)))>; + + def : Pat<(v4i16 (build_vector (XLenVT GPR:$a), (XLenVT GPR:$b), + (XLenVT undef), (XLenVT undef))), + (PPACK_W GPR:$a, GPR:$b)>; + + def : Pat<(v4i16 (build_vector (XLenVT GPR:$a), (XLenVT GPR:$b), + (XLenVT GPR:$c), (XLenVT GPR:$d))), + (PACK (PPACK_W GPR:$a, GPR:$b), (PPACK_W GPR:$c, GPR:$d))>; + + def : Pat<(v2i32 (build_vector (XLenVT GPR:$a), (XLenVT GPR:$b))), + (PACK GPR:$a, GPR:$b)>; } // Predicates = [HasStdExtP, IsRV64] diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td index b683e895c31c0..bbe3baef36bab 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td @@ -838,7 +838,6 @@ def : Pat<(fpextend (bf16 FPR16:$rs)), (NDS_FCVT_S_BF16 (bf16 FPR16:$rs))>; def : Pat<(bf16 (fpround FPR32:$rs)), (NDS_FCVT_BF16_S FPR32:$rs)>; -} // Predicates = [HasVendorXAndesBFHCvt] let isCodeGenOnly = 1 in { def NDS_FMV_BF16_X : FPUnaryOp_r<0b1111000, 0b00000, 0b000, FPR16, GPR, "fmv.w.x">, @@ -847,7 +846,6 @@ def NDS_FMV_X_BF16 : FPUnaryOp_r<0b1110000, 0b00000, 0b000, GPR, FPR16, "fmv.x.w Sched<[WriteFMovF32ToI32, ReadFMovF32ToI32]>; } -let Predicates = [HasVendorXAndesBFHCvt] in { def : Pat<(riscv_nds_fmv_bf16_x GPR:$src), (NDS_FMV_BF16_X GPR:$src)>; def : Pat<(riscv_nds_fmv_x_anyextbf16 (bf16 FPR16:$src)), (NDS_FMV_X_BF16 (bf16 FPR16:$src))>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td index aa8f1a1108b6b..7abc616f03141 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td @@ -633,8 +633,9 @@ let Predicates = [HasVendorXCVmem, IsRV32] in { def CV_SW_rr : CVStore_rr<0b011, 0b0010110, "cv.sw">; } -let Predicates = [HasVendorXCVelw, IsRV32], hasSideEffects = 0, +let Predicates = [HasVendorXCVelw, IsRV32], hasSideEffects = 1, mayLoad = 1, mayStore = 0 in { + def PseudoCV_ELW : PseudoLoad<"cv.elw">; // Event load def CV_ELW : CVLoad_ri<0b011, "cv.elw">; } @@ -706,6 +707,12 @@ let Predicates = [HasVendorXCVmem, IsRV32], AddedComplexity = 1 in { def : CVStrrPat; } +let Predicates = [HasVendorXCVelw, IsRV32] in { + def : Pat<(int_riscv_cv_elw_elw (XLenVT GPR:$rs1)), (PseudoCV_ELW GPR:$rs1)>; + def : Pat<(int_riscv_cv_elw_elw (AddrRegImm (XLenVT GPR:$rs1), simm12_lo:$imm12)), + (CV_ELW GPR:$rs1, simm12_lo:$imm12)>; +} + multiclass PatCoreVBitManip { def : PatGprGpr("CV_" # NAME # "R")>; def : Pat<(intr GPR:$rs1, cv_uimm10:$imm), diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp index a5aef4bea46ab..d802d19a0edcb 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -869,6 +869,7 @@ bool RISCVRegisterInfo::getRegAllocationHints( unsigned HintType = Hint.first; Register Partner = Hint.second; + MCRegister TargetReg; if (HintType == RISCVRI::RegPairEven || HintType == RISCVRI::RegPairOdd) { // Check if we want the even or odd register of a consecutive pair bool WantOdd = (HintType == RISCVRI::RegPairOdd); @@ -877,7 +878,7 @@ bool RISCVRegisterInfo::getRegAllocationHints( if (Partner.isVirtual() && VRM && VRM->hasPhys(Partner)) { MCRegister PartnerPhys = VRM->getPhys(Partner); // Calculate the exact register we need for consecutive pairing - MCRegister TargetReg = PartnerPhys.id() + (WantOdd ? 1 : -1); + TargetReg = PartnerPhys.id() + (WantOdd ? 1 : -1); // Verify it's valid and available if (RISCV::GPRRegClass.contains(TargetReg) && @@ -888,7 +889,8 @@ bool RISCVRegisterInfo::getRegAllocationHints( // Second priority: Try to find consecutive register pairs in the allocation // order for (MCPhysReg PhysReg : Order) { - if (!PhysReg) + // Don't add the hint if we already added above. + if (TargetReg == PhysReg) continue; unsigned RegNum = getEncodingValue(PhysReg); diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index 1a1a93a9cb178..4788a428d7e64 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -1146,15 +1146,20 @@ InstructionCost RISCVTTIImpl::getGatherScatterOpCost( } InstructionCost RISCVTTIImpl::getExpandCompressMemoryOpCost( - unsigned Opcode, Type *DataTy, bool VariableMask, Align Alignment, - TTI::TargetCostKind CostKind, const Instruction *I) const { + const MemIntrinsicCostAttributes &MICA, + TTI::TargetCostKind CostKind) const { + unsigned Opcode = MICA.getID() == Intrinsic::masked_expandload + ? Instruction::Load + : Instruction::Store; + Type *DataTy = MICA.getDataType(); + bool VariableMask = MICA.getVariableMask(); + Align Alignment = MICA.getAlignment(); bool IsLegal = (Opcode == Instruction::Store && isLegalMaskedCompressStore(DataTy, Alignment)) || (Opcode == Instruction::Load && isLegalMaskedExpandLoad(DataTy, Alignment)); if (!IsLegal || CostKind != TTI::TCK_RecipThroughput) - return BaseT::getExpandCompressMemoryOpCost(Opcode, DataTy, VariableMask, - Alignment, CostKind, I); + return BaseT::getExpandCompressMemoryOpCost(MICA, CostKind); // Example compressstore sequence: // vsetivli zero, 8, e32, m2, ta, ma (ignored) // vcompress.vm v10, v8, v0 diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h index 484c4791390ac..5efa330b3ad71 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h @@ -197,9 +197,8 @@ class RISCVTTIImpl final : public BasicTTIImplBase { const Instruction *I) const override; InstructionCost - getExpandCompressMemoryOpCost(unsigned Opcode, Type *Src, bool VariableMask, - Align Alignment, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr) const override; + getExpandCompressMemoryOpCost(const MemIntrinsicCostAttributes &MICA, + TTI::TargetCostKind CostKind) const override; InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, diff --git a/llvm/lib/Target/SPIRV/SPIRV.td b/llvm/lib/Target/SPIRV/SPIRV.td index 39a4131c7f1bd..cc9c7913af427 100644 --- a/llvm/lib/Target/SPIRV/SPIRV.td +++ b/llvm/lib/Target/SPIRV/SPIRV.td @@ -14,6 +14,8 @@ include "SPIRVInstrInfo.td" include "SPIRVCombine.td" include "SPIRVBuiltins.td" +defm : RemapAllTargetPseudoPointerOperands; + def SPIRVInstrInfo : InstrInfo; class Proc Features> diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp index b2cbdb2ad7375..709f49b0fecc1 100644 --- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp @@ -3373,6 +3373,8 @@ SPIRVType *lowerBuiltinType(const Type *OpaqueType, TargetType = getInlineSpirvType(BuiltinType, MIRBuilder, GR); } else if (Name == "spirv.VulkanBuffer") { TargetType = getVulkanBufferType(BuiltinType, MIRBuilder, GR); + } else if (Name == "spirv.Padding") { + TargetType = GR->getOrCreatePaddingType(MIRBuilder); } else if (Name == "spirv.Layout") { TargetType = getLayoutType(BuiltinType, MIRBuilder, GR); } else { diff --git a/llvm/lib/Target/SPIRV/SPIRVCBufferAccess.cpp b/llvm/lib/Target/SPIRV/SPIRVCBufferAccess.cpp index 329774df554f4..227d8716d974a 100644 --- a/llvm/lib/Target/SPIRV/SPIRVCBufferAccess.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVCBufferAccess.cpp @@ -79,15 +79,20 @@ static bool replaceCBufferAccesses(Module &M) { // The handle definition should dominate all uses of the cbuffer members. // We'll insert our getpointer calls right after it. IRBuilder<> Builder(HandleDef->getNextNode()); + auto *HandleTy = cast(Mapping.Handle->getValueType()); + auto *LayoutTy = cast(HandleTy->getTypeParameter(0)); + const StructLayout *SL = M.getDataLayout().getStructLayout(LayoutTy); - for (uint32_t Index = 0; Index < Mapping.Members.size(); ++Index) { - GlobalVariable *MemberGV = Mapping.Members[Index].GV; + for (const hlsl::CBufferMember &Member : Mapping.Members) { + GlobalVariable *MemberGV = Member.GV; if (MemberGV->use_empty()) { continue; } + uint32_t IndexInStruct = SL->getElementContainingOffset(Member.Offset); + // Create the getpointer intrinsic call. - Value *IndexVal = Builder.getInt32(Index); + Value *IndexVal = Builder.getInt32(IndexInStruct); Type *PtrType = MemberGV->getType(); Value *GetPointerCall = Builder.CreateIntrinsic( PtrType, Intrinsic::spv_resource_getpointer, {HandleDef, IndexVal}); diff --git a/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp b/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp index ac09b937a584a..d394b3ac243a9 100644 --- a/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp @@ -248,5 +248,11 @@ SPIRVExtensionsParser::getValidExtensions(const Triple &TT) { R.insert(ExtensionEnum); } + if (TT.getVendor() == Triple::AMD) { + // AMD uses the translator to recover LLVM-IR from SPIRV. Currently, the + // translator doesn't implement the SPV_KHR_float_controls2 extension. + R.erase(SPIRV::Extension::SPV_KHR_float_controls2); + } + return R; } diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp index 8e14fb03127fc..eea49bfdaf04b 100644 --- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp @@ -841,6 +841,7 @@ Type *SPIRVEmitIntrinsics::deduceElementTypeHelper( uint32_t Index = cast(II->getOperand(1))->getZExtValue(); Ty = cast(Ty)->getElementType(Index); } + Ty = reconstitutePeeledArrayType(Ty); } else { llvm_unreachable("Unknown handle type for spv_resource_getpointer."); } @@ -1569,16 +1570,57 @@ Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &I) { return BrI; } -Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) { - if (I.getSourceElementType() == IntegerType::getInt8Ty(CurrF->getContext()) && - TM->getSubtargetImpl()->isLogicalSPIRV()) { - Instruction *Result = buildLogicalAccessChainFromGEP(I); - if (Result) - return Result; +static bool isFirstIndexZero(const GetElementPtrInst *GEP) { + if (GEP->getNumIndices() == 0) + return false; + if (const auto *CI = dyn_cast(GEP->getOperand(1))) { + return CI->getZExtValue() == 0; } + return false; +} +Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) { IRBuilder<> B(I.getParent()); B.SetInsertPoint(&I); + + if (TM->getSubtargetImpl()->isLogicalSPIRV() && !isFirstIndexZero(&I)) { + // Logical SPIR-V cannot use the OpPtrAccessChain instruction. If the first + // index of the GEP is not 0, then we need to try to adjust it. + // + // If the GEP is doing byte addressing, try to rebuild the full access chain + // from the type of the pointer. + if (I.getSourceElementType() == + IntegerType::getInt8Ty(CurrF->getContext())) { + return buildLogicalAccessChainFromGEP(I); + } + + // Look for the array-to-pointer decay. If this is the pattern + // we can adjust the types, and prepend a 0 to the indices. + Value *PtrOp = I.getPointerOperand(); + Type *SrcElemTy = I.getSourceElementType(); + Type *DeducedPointeeTy = deduceElementType(PtrOp, true); + + if (auto *ArrTy = dyn_cast(DeducedPointeeTy)) { + if (ArrTy->getElementType() == SrcElemTy) { + SmallVector NewIndices; + Type *FirstIdxType = I.getOperand(1)->getType(); + NewIndices.push_back(ConstantInt::get(FirstIdxType, 0)); + for (Value *Idx : I.indices()) + NewIndices.push_back(Idx); + + SmallVector Types = {I.getType(), I.getPointerOperandType()}; + SmallVector Args; + Args.push_back(B.getInt1(I.isInBounds())); + Args.push_back(I.getPointerOperand()); + Args.append(NewIndices.begin(), NewIndices.end()); + + auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args}); + replaceAllUsesWithAndErase(B, &I, NewI); + return NewI; + } + } + } + SmallVector Types = {I.getType(), I.getOperand(0)->getType()}; SmallVector Args; Args.push_back(B.getInt1(I.isInBounds())); @@ -1772,16 +1814,12 @@ void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *I, Value *Pointer = GEPI->getPointerOperand(); Type *OpTy = nullptr; - // Knowing the accessed type is mandatory for logical SPIR-V. Sadly, - // the GEP source element type should not be used for this purpose, and - // the alternative type-scavenging method is not working. - // Physical SPIR-V can work around this, but not logical, hence still - // try to rely on the broken type scavenging for logical. - bool IsRewrittenGEP = - GEPI->getSourceElementType() == IntegerType::getInt8Ty(I->getContext()); - if (IsRewrittenGEP && TM->getSubtargetImpl()->isLogicalSPIRV()) { - Value *Src = getPointerRoot(Pointer); - OpTy = GR->findDeducedElementType(Src); + // Logical SPIR-V is not allowed to use Op*PtrAccessChain instructions. If + // the first index is 0, then we can trivially lower to OpAccessChain. If + // not we need to try to rewrite the GEP. We avoid adding a pointer cast at + // this time, and will rewrite the GEP when visiting it. + if (TM->getSubtargetImpl()->isLogicalSPIRV() && !isFirstIndexZero(GEPI)) { + return; } // In all cases, fall back to the GEP type if type scavenging failed. diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp index bd0c7d15afd12..8b1a09caf907d 100644 --- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp @@ -22,6 +22,7 @@ #include "llvm/ADT/APInt.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DiagnosticInfo.h" +#include "llvm/IR/Function.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/IntrinsicsSPIRV.h" @@ -224,14 +225,43 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypeVoid(MachineIRBuilder &MIRBuilder) { } void SPIRVGlobalRegistry::invalidateMachineInstr(MachineInstr *MI) { - // TODO: - // - review other data structure wrt. possible issues related to removal - // of a machine instruction during instruction selection. + // Other maps that may hold MachineInstr*: + // - VRegToTypeMap: We cannot remove the definitions of `MI` from + // VRegToTypeMap because some calls to invalidateMachineInstr are replacing MI + // with another instruction defining the same register. We expect that if MI + // is a type instruction, and it is still referenced in VRegToTypeMap, then + // those registers are dead or the VRegToTypeMap is out-of-date. We do not + // expect passes to ask for the SPIR-V type of a dead register. If the + // VRegToTypeMap is out-of-date already, then there was an error before. We + // cannot add an assert to verify this because the VRegToTypeMap can be + // out-of-date. + // - FunctionToInstr & FunctionToInstrRev: At this point, we should not be + // deleting functions. No need to update. + // - AliasInstMDMap: Would require a linear search, and the Intel Alias + // instruction are not instructions instruction selection will be able to + // remove. + + const SPIRVSubtarget &ST = MI->getMF()->getSubtarget(); + [[maybe_unused]] const SPIRVInstrInfo *TII = ST.getInstrInfo(); + assert(!TII->isAliasingInstr(*MI) && + "Cannot invalidate aliasing instructions."); + assert(MI->getOpcode() != SPIRV::OpFunction && + "Cannot invalidate OpFunction."); + + if (MI->getOpcode() == SPIRV::OpFunctionCall) { + if (const auto *F = dyn_cast(MI->getOperand(2).getGlobal())) { + auto It = ForwardCalls.find(F); + if (It != ForwardCalls.end()) { + It->second.erase(MI); + if (It->second.empty()) + ForwardCalls.erase(It); + } + } + } + const MachineFunction *MF = MI->getMF(); auto It = LastInsertedTypeMap.find(MF); - if (It == LastInsertedTypeMap.end()) - return; - if (It->second == MI) + if (It != LastInsertedTypeMap.end() && It->second == MI) LastInsertedTypeMap.erase(MF); // remove from the duplicate tracker to avoid incorrect reuse erase(MI); @@ -314,7 +344,7 @@ Register SPIRVGlobalRegistry::createConstFP(const ConstantFP *CF, LLT LLTy = LLT::scalar(BitWidth); Register Res = CurMF->getRegInfo().createGenericVirtualRegister(LLTy); CurMF->getRegInfo().setRegClass(Res, &SPIRV::fIDRegClass); - assignFloatTypeToVReg(BitWidth, Res, I, TII); + assignSPIRVTypeToVReg(SpvType, Res, *CurMF); MachineInstr *DepMI = const_cast(SpvType); MachineIRBuilder MIRBuilder(*DepMI->getParent(), DepMI->getIterator()); @@ -890,6 +920,17 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypeStruct( const StructType *Ty, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual, StructOffsetDecorator Decorator, bool EmitIR) { + Type *OriginalElementType = nullptr; + uint64_t TotalSize = 0; + if (matchPeeledArrayPattern(Ty, OriginalElementType, TotalSize)) { + SPIRVType *ElementSPIRVType = findSPIRVType( + OriginalElementType, MIRBuilder, AccQual, + /* ExplicitLayoutRequired= */ Decorator != nullptr, EmitIR); + return getOpTypeArray(TotalSize, ElementSPIRVType, MIRBuilder, + /*ExplicitLayoutRequired=*/Decorator != nullptr, + EmitIR); + } + const SPIRVSubtarget &ST = cast(MIRBuilder.getMF().getSubtarget()); SmallVector FieldTypes; @@ -1414,6 +1455,18 @@ SPIRVType *SPIRVGlobalRegistry::getOrCreateVulkanBufferType( return R; } +SPIRVType * +SPIRVGlobalRegistry::getOrCreatePaddingType(MachineIRBuilder &MIRBuilder) { + auto Key = SPIRV::irhandle_padding(); + if (const MachineInstr *MI = findMI(Key, &MIRBuilder.getMF())) + return MI; + auto *T = Type::getInt8Ty(MIRBuilder.getContext()); + SPIRVType *R = getOrCreateSPIRVIntegerType(8, MIRBuilder); + finishCreatingSPIRVType(T, R); + add(Key, R); + return R; +} + SPIRVType *SPIRVGlobalRegistry::getOrCreateLayoutType( MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr) { auto Key = SPIRV::handle(T); diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h index 09c77f0cfd4f5..e5a1a2aa8d70f 100644 --- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h +++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h @@ -611,6 +611,8 @@ class SPIRVGlobalRegistry : public SPIRVIRMapping { SPIRV::StorageClass::StorageClass SC, bool IsWritable, bool EmitIr = false); + SPIRVType *getOrCreatePaddingType(MachineIRBuilder &MIRBuilder); + SPIRVType *getOrCreateLayoutType(MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr = false); diff --git a/llvm/lib/Target/SPIRV/SPIRVIRMapping.h b/llvm/lib/Target/SPIRV/SPIRVIRMapping.h index c99d603d340ea..47c7676d5631c 100644 --- a/llvm/lib/Target/SPIRV/SPIRVIRMapping.h +++ b/llvm/lib/Target/SPIRV/SPIRVIRMapping.h @@ -64,6 +64,7 @@ enum SpecialTypeKind { STK_Value, STK_MachineInstr, STK_VkBuffer, + STK_Padding, STK_ExplictLayoutType, STK_Last = -1 }; @@ -149,6 +150,10 @@ inline IRHandle irhandle_vkbuffer(const Type *ElementType, SpecialTypeKind::STK_VkBuffer); } +inline IRHandle irhandle_padding() { + return std::make_tuple(nullptr, 0, SpecialTypeKind::STK_Padding); +} + inline IRHandle irhandle_explict_layout_type(const Type *Ty) { const Type *WrpTy = unifyPtrType(Ty); return irhandle_ptr(WrpTy, Ty->getTypeID(), STK_ExplictLayoutType); diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp index d3fc08eb56cb3..2c27289e759eb 100644 --- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp @@ -94,6 +94,8 @@ class SPIRVInstructionSelector : public InstructionSelector { private: void resetVRegsType(MachineFunction &MF); + void removeDeadInstruction(MachineInstr &MI) const; + void removeOpNamesForDeadMI(MachineInstr &MI) const; // tblgen-erated 'select' implementation, used as the initial selector for // the patterns that don't require complex C++. @@ -149,6 +151,9 @@ class SPIRVInstructionSelector : public InstructionSelector { bool selectStackRestore(MachineInstr &I) const; bool selectMemOperation(Register ResVReg, MachineInstr &I) const; + Register getOrCreateMemSetGlobal(MachineInstr &I) const; + bool selectCopyMemory(MachineInstr &I, Register SrcReg) const; + bool selectCopyMemorySized(MachineInstr &I, Register SrcReg) const; bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, unsigned NewOpcode, @@ -467,6 +472,7 @@ static bool isConstReg(MachineRegisterInfo *MRI, MachineInstr *OpDef, switch (Opcode) { case TargetOpcode::G_CONSTANT: case TargetOpcode::G_FCONSTANT: + case TargetOpcode::G_IMPLICIT_DEF: return true; case TargetOpcode::G_INTRINSIC: case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: @@ -509,22 +515,202 @@ static bool isConstReg(MachineRegisterInfo *MRI, Register OpReg) { return false; } +// TODO(168736): We should make this either a flag in tabelgen +// or reduce our dependence on the global registry, so we can remove this +// function. It can easily be missed when new intrinsics are added. + +// Most SPIR-V instrinsics are considered to have side-effects in their tablegen +// definition because they are referenced in the global registry. This is a list +// of intrinsics that have no side effects other than their references in the +// global registry. +static bool intrinsicHasSideEffects(Intrinsic::ID ID) { + switch (ID) { + // This is not an exhaustive list and may need to be updated. + case Intrinsic::spv_all: + case Intrinsic::spv_alloca: + case Intrinsic::spv_any: + case Intrinsic::spv_bitcast: + case Intrinsic::spv_const_composite: + case Intrinsic::spv_cross: + case Intrinsic::spv_degrees: + case Intrinsic::spv_distance: + case Intrinsic::spv_extractelt: + case Intrinsic::spv_extractv: + case Intrinsic::spv_faceforward: + case Intrinsic::spv_fdot: + case Intrinsic::spv_firstbitlow: + case Intrinsic::spv_firstbitshigh: + case Intrinsic::spv_firstbituhigh: + case Intrinsic::spv_frac: + case Intrinsic::spv_gep: + case Intrinsic::spv_global_offset: + case Intrinsic::spv_global_size: + case Intrinsic::spv_group_id: + case Intrinsic::spv_insertelt: + case Intrinsic::spv_insertv: + case Intrinsic::spv_isinf: + case Intrinsic::spv_isnan: + case Intrinsic::spv_lerp: + case Intrinsic::spv_length: + case Intrinsic::spv_normalize: + case Intrinsic::spv_num_subgroups: + case Intrinsic::spv_num_workgroups: + case Intrinsic::spv_ptrcast: + case Intrinsic::spv_radians: + case Intrinsic::spv_reflect: + case Intrinsic::spv_refract: + case Intrinsic::spv_resource_getpointer: + case Intrinsic::spv_resource_handlefrombinding: + case Intrinsic::spv_resource_handlefromimplicitbinding: + case Intrinsic::spv_resource_nonuniformindex: + case Intrinsic::spv_rsqrt: + case Intrinsic::spv_saturate: + case Intrinsic::spv_sdot: + case Intrinsic::spv_sign: + case Intrinsic::spv_smoothstep: + case Intrinsic::spv_step: + case Intrinsic::spv_subgroup_id: + case Intrinsic::spv_subgroup_local_invocation_id: + case Intrinsic::spv_subgroup_max_size: + case Intrinsic::spv_subgroup_size: + case Intrinsic::spv_thread_id: + case Intrinsic::spv_thread_id_in_group: + case Intrinsic::spv_udot: + case Intrinsic::spv_undef: + case Intrinsic::spv_value_md: + case Intrinsic::spv_workgroup_size: + return false; + default: + return true; + } +} + +// TODO(168736): We should make this either a flag in tabelgen +// or reduce our dependence on the global registry, so we can remove this +// function. It can easily be missed when new intrinsics are added. +static bool isOpcodeWithNoSideEffects(unsigned Opcode) { + switch (Opcode) { + case SPIRV::OpTypeVoid: + case SPIRV::OpTypeBool: + case SPIRV::OpTypeInt: + case SPIRV::OpTypeFloat: + case SPIRV::OpTypeVector: + case SPIRV::OpTypeMatrix: + case SPIRV::OpTypeImage: + case SPIRV::OpTypeSampler: + case SPIRV::OpTypeSampledImage: + case SPIRV::OpTypeArray: + case SPIRV::OpTypeRuntimeArray: + case SPIRV::OpTypeStruct: + case SPIRV::OpTypeOpaque: + case SPIRV::OpTypePointer: + case SPIRV::OpTypeFunction: + case SPIRV::OpTypeEvent: + case SPIRV::OpTypeDeviceEvent: + case SPIRV::OpTypeReserveId: + case SPIRV::OpTypeQueue: + case SPIRV::OpTypePipe: + case SPIRV::OpTypeForwardPointer: + case SPIRV::OpTypePipeStorage: + case SPIRV::OpTypeNamedBarrier: + case SPIRV::OpTypeAccelerationStructureNV: + case SPIRV::OpTypeCooperativeMatrixNV: + case SPIRV::OpTypeCooperativeMatrixKHR: + return true; + default: + return false; + } +} + bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI) { + // If there are no definitions, then assume there is some other + // side-effect that makes this instruction live. + if (MI.getNumDefs() == 0) + return false; + for (const auto &MO : MI.all_defs()) { Register Reg = MO.getReg(); - if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg)) + if (Reg.isPhysical()) { + LLVM_DEBUG(dbgs() << "Not dead: def of physical register " << Reg); return false; + } + for (const auto &UseMI : MRI.use_nodbg_instructions(Reg)) { + if (UseMI.getOpcode() != SPIRV::OpName) { + LLVM_DEBUG(dbgs() << "Not dead: def " << MO << " has use in " << UseMI); + return false; + } + } } + if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE || MI.isFakeUse() || - MI.isLifetimeMarker()) + MI.isLifetimeMarker()) { + LLVM_DEBUG( + dbgs() + << "Not dead: Opcode is LOCAL_ESCAPE, fake use, or lifetime marker.\n"); return false; - if (MI.isPHI()) + } + if (MI.isPHI()) { + LLVM_DEBUG(dbgs() << "Dead: Phi instruction with no uses.\n"); return true; + } + + // It is possible that the only side effect is that the instruction is + // referenced in the global registry. If that is the only side effect, the + // intrinsic is dead. + if (MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS || + MI.getOpcode() == TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS) { + const auto &Intr = cast(MI); + if (!intrinsicHasSideEffects(Intr.getIntrinsicID())) { + LLVM_DEBUG(dbgs() << "Dead: Intrinsic with no real side effects.\n"); + return true; + } + } + if (MI.mayStore() || MI.isCall() || (MI.mayLoad() && MI.hasOrderedMemoryRef()) || MI.isPosition() || - MI.isDebugInstr() || MI.isTerminator() || MI.isJumpTableDebugInfo()) + MI.isDebugInstr() || MI.isTerminator() || MI.isJumpTableDebugInfo()) { + LLVM_DEBUG(dbgs() << "Not dead: instruction has side effects.\n"); return false; - return true; + } + + if (isPreISelGenericOpcode(MI.getOpcode())) { + // TODO: Is there a generic way to check if the opcode has side effects? + LLVM_DEBUG(dbgs() << "Dead: Generic opcode with no uses.\n"); + return true; + } + + if (isOpcodeWithNoSideEffects(MI.getOpcode())) { + LLVM_DEBUG(dbgs() << "Dead: known opcode with no side effects\n"); + return true; + } + + return false; +} + +void SPIRVInstructionSelector::removeOpNamesForDeadMI(MachineInstr &MI) const { + // Delete the OpName that uses the result if there is one. + for (const auto &MO : MI.all_defs()) { + Register Reg = MO.getReg(); + if (Reg.isPhysical()) + continue; + SmallVector UselessOpNames; + for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) { + assert(UseMI.getOpcode() == SPIRV::OpName && + "There is still a use of the dead function."); + UselessOpNames.push_back(&UseMI); + } + for (MachineInstr *OpNameMI : UselessOpNames) { + GR.invalidateMachineInstr(OpNameMI); + OpNameMI->eraseFromParent(); + } + } +} + +void SPIRVInstructionSelector::removeDeadInstruction(MachineInstr &MI) const { + salvageDebugInfo(*MRI, MI); + GR.invalidateMachineInstr(&MI); + removeOpNamesForDeadMI(MI); + MI.eraseFromParent(); } bool SPIRVInstructionSelector::select(MachineInstr &I) { @@ -533,6 +719,13 @@ bool SPIRVInstructionSelector::select(MachineInstr &I) { assert(I.getParent() && "Instruction should be in a basic block!"); assert(I.getParent()->getParent() && "Instruction should be in a function!"); + LLVM_DEBUG(dbgs() << "Checking if instruction is dead: " << I;); + if (isDead(I, *MRI)) { + LLVM_DEBUG(dbgs() << "Instruction is dead.\n"); + removeDeadInstruction(I); + return true; + } + Register Opcode = I.getOpcode(); // If it's not a GMIR instruction, we've selected it already. if (!isPreISelGenericOpcode(Opcode)) { @@ -584,9 +777,7 @@ bool SPIRVInstructionSelector::select(MachineInstr &I) { // if the instruction has been already made dead by folding it away // erase it LLVM_DEBUG(dbgs() << "Instruction is folded and dead.\n"); - salvageDebugInfo(*MRI, I); - GR.invalidateMachineInstr(&I); - I.eraseFromParent(); + removeDeadInstruction(I); return true; } @@ -1435,50 +1626,79 @@ bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const { .constrainAllUses(TII, TRI, RBI); } -bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg, - MachineInstr &I) const { +Register +SPIRVInstructionSelector::getOrCreateMemSetGlobal(MachineInstr &I) const { + MachineIRBuilder MIRBuilder(I); + assert(I.getOperand(1).isReg() && I.getOperand(2).isReg()); + + // TODO: check if we have such GV, add init, use buildGlobalVariable. + unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI); + Function &CurFunction = GR.CurMF->getFunction(); + Type *LLVMArrTy = + ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num); + GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy, + true, GlobalValue::InternalLinkage, + Constant::getNullValue(LLVMArrTy)); + + Type *ValTy = Type::getInt8Ty(I.getMF()->getFunction().getContext()); + Type *ArrTy = ArrayType::get(ValTy, Num); + SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType( + ArrTy, MIRBuilder, SPIRV::StorageClass::UniformConstant); + + SPIRVType *SpvArrTy = GR.getOrCreateSPIRVType( + ArrTy, MIRBuilder, SPIRV::AccessQualifier::None, false); + + unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI); + Register Const = GR.getOrCreateConstIntArray(Val, Num, I, SpvArrTy, TII); + + Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(64)); + auto MIBVar = + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable)) + .addDef(VarReg) + .addUse(GR.getSPIRVTypeID(VarTy)) + .addImm(SPIRV::StorageClass::UniformConstant) + .addUse(Const); + if (!MIBVar.constrainAllUses(TII, TRI, RBI)) + return Register(); + + GR.add(GV, MIBVar); + GR.addGlobalObject(GV, GR.CurMF, VarReg); + + buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {}); + return VarReg; +} + +bool SPIRVInstructionSelector::selectCopyMemory(MachineInstr &I, + Register SrcReg) const { MachineBasicBlock &BB = *I.getParent(); - Register SrcReg = I.getOperand(1).getReg(); - bool Result = true; - if (I.getOpcode() == TargetOpcode::G_MEMSET) { + Register DstReg = I.getOperand(0).getReg(); + SPIRVType *DstTy = GR.getSPIRVTypeForVReg(DstReg); + SPIRVType *SrcTy = GR.getSPIRVTypeForVReg(SrcReg); + if (GR.getPointeeType(DstTy) != GR.getPointeeType(SrcTy)) + report_fatal_error("OpCopyMemory requires operands to have the same type"); + uint64_t CopySize = getIConstVal(I.getOperand(2).getReg(), MRI); + SPIRVType *PointeeTy = GR.getPointeeType(DstTy); + const Type *LLVMPointeeTy = GR.getTypeForSPIRVType(PointeeTy); + if (!LLVMPointeeTy) + report_fatal_error( + "Unable to determine pointee type size for OpCopyMemory"); + const DataLayout &DL = I.getMF()->getFunction().getDataLayout(); + if (CopySize != DL.getTypeStoreSize(const_cast(LLVMPointeeTy))) + report_fatal_error( + "OpCopyMemory requires the size to match the pointee type size"); + auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemory)) + .addUse(DstReg) + .addUse(SrcReg); + if (I.getNumMemOperands()) { MachineIRBuilder MIRBuilder(I); - assert(I.getOperand(1).isReg() && I.getOperand(2).isReg()); - unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI); - unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI); - Type *ValTy = Type::getInt8Ty(I.getMF()->getFunction().getContext()); - Type *ArrTy = ArrayType::get(ValTy, Num); - SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType( - ArrTy, MIRBuilder, SPIRV::StorageClass::UniformConstant); - - SPIRVType *SpvArrTy = GR.getOrCreateSPIRVType( - ArrTy, MIRBuilder, SPIRV::AccessQualifier::None, false); - Register Const = GR.getOrCreateConstIntArray(Val, Num, I, SpvArrTy, TII); - // TODO: check if we have such GV, add init, use buildGlobalVariable. - Function &CurFunction = GR.CurMF->getFunction(); - Type *LLVMArrTy = - ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num); - // Module takes ownership of the global var. - GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy, - true, GlobalValue::InternalLinkage, - Constant::getNullValue(LLVMArrTy)); - Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(64)); - auto MIBVar = - BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable)) - .addDef(VarReg) - .addUse(GR.getSPIRVTypeID(VarTy)) - .addImm(SPIRV::StorageClass::UniformConstant) - .addUse(Const); - Result &= MIBVar.constrainAllUses(TII, TRI, RBI); - - GR.add(GV, MIBVar); - GR.addGlobalObject(GV, GR.CurMF, VarReg); - - buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {}); - SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType( - ValTy, I, SPIRV::StorageClass::UniformConstant); - SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(64)); - selectOpWithSrcs(SrcReg, SourceTy, I, {VarReg}, SPIRV::OpBitcast); + addMemoryOperands(*I.memoperands_begin(), MIB, MIRBuilder, GR); } + return MIB.constrainAllUses(TII, TRI, RBI); +} + +bool SPIRVInstructionSelector::selectCopyMemorySized(MachineInstr &I, + Register SrcReg) const { + MachineBasicBlock &BB = *I.getParent(); auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized)) .addUse(I.getOperand(0).getReg()) .addUse(SrcReg) @@ -1487,9 +1707,30 @@ bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg, MachineIRBuilder MIRBuilder(I); addMemoryOperands(*I.memoperands_begin(), MIB, MIRBuilder, GR); } - Result &= MIB.constrainAllUses(TII, TRI, RBI); - if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg()) - Result &= BuildCOPY(ResVReg, MIB->getOperand(0).getReg(), I); + return MIB.constrainAllUses(TII, TRI, RBI); +} + +bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg, + MachineInstr &I) const { + Register SrcReg = I.getOperand(1).getReg(); + bool Result = true; + if (I.getOpcode() == TargetOpcode::G_MEMSET) { + Register VarReg = getOrCreateMemSetGlobal(I); + if (!VarReg.isValid()) + return false; + Type *ValTy = Type::getInt8Ty(I.getMF()->getFunction().getContext()); + SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType( + ValTy, I, SPIRV::StorageClass::UniformConstant); + SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(64)); + Result &= selectOpWithSrcs(SrcReg, SourceTy, I, {VarReg}, SPIRV::OpBitcast); + } + if (STI.isLogicalSPIRV()) { + Result &= selectCopyMemory(I, SrcReg); + } else { + Result &= selectCopyMemorySized(I, SrcReg); + } + if (ResVReg.isValid() && ResVReg != I.getOperand(0).getReg()) + Result &= BuildCOPY(ResVReg, I.getOperand(0).getReg(), I); return Result; } @@ -3088,6 +3329,11 @@ bool SPIRVInstructionSelector::selectGEP(Register ResVReg, .addUse(GR.getSPIRVTypeID(ResType)) // Object to get a pointer to. .addUse(I.getOperand(3).getReg()); + assert(Opcode == SPIRV::OpPtrAccessChain || + Opcode == SPIRV::OpInBoundsPtrAccessChain || + (getImm(I.getOperand(4), MRI) && foldImm(I.getOperand(4), MRI) == 0) && + "Cannot translate GEP to OpAccessChain. First index must be 0."); + // Adding indices. const unsigned StartingIndex = (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain) diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp index 4ce871b6f5e5d..81c7596530ee2 100644 --- a/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp @@ -104,9 +104,13 @@ class SPIRVLegalizePointerCast : public FunctionPass { Value *loadFirstValueFromAggregate(IRBuilder<> &B, Type *ElementType, Value *Source, LoadInst *BadLoad) { SmallVector Types = {BadLoad->getPointerOperandType(), - BadLoad->getPointerOperandType()}; - SmallVector Args{/* isInBounds= */ B.getInt1(false), Source, - B.getInt32(0), B.getInt32(0)}; + Source->getType()}; + SmallVector Args{/* isInBounds= */ B.getInt1(false), Source}; + + Type *AggregateType = GR->findDeducedElementType(Source); + assert(AggregateType && "Could not deduce aggregate type"); + buildGEPIndexChain(B, ElementType, AggregateType, Args); + auto *GEP = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args}); GR->buildAssignPtr(B, ElementType, GEP); @@ -201,34 +205,20 @@ class SPIRVLegalizePointerCast : public FunctionPass { auto *SAT = dyn_cast(FromTy); auto *SVT = dyn_cast(FromTy); - auto *SST = dyn_cast(FromTy); auto *DVT = dyn_cast(ToTy); B.SetInsertPoint(LI); - // Destination is the element type of Source, and source is an array -> - // Loading 1st element. + // Destination is the element type of some member of FromTy. For example, + // loading the 1st element of an array: // - float a = array[0]; - if (SAT && SAT->getElementType() == ToTy) - Output = loadFirstValueFromAggregate(B, SAT->getElementType(), - OriginalOperand, LI); - // Destination is the element type of Source, and source is a vector -> - // Vector to scalar. - // - float a = vector.x; - else if (!DVT && SVT && SVT->getElementType() == ToTy) { - Output = loadFirstValueFromAggregate(B, SVT->getElementType(), - OriginalOperand, LI); - } + if (isTypeFirstElementAggregate(ToTy, FromTy)) + Output = loadFirstValueFromAggregate(B, ToTy, OriginalOperand, LI); // Destination is a smaller vector than source or different vector type. // - float3 v3 = vector4; // - float4 v2 = int4; else if (SVT && DVT) Output = loadVectorFromVector(B, SVT, DVT, OriginalOperand); - // Destination is the scalar type stored at the start of an aggregate. - // - struct S { float m }; - // - float v = s.m; - else if (SST && SST->getTypeAtIndex(0u) == ToTy) - Output = loadFirstValueFromAggregate(B, ToTy, OriginalOperand, LI); else if (SAT && DVT && SAT->getElementType() == DVT->getElementType()) Output = loadVectorFromArray(B, DVT, OriginalOperand); else @@ -334,7 +324,7 @@ class SPIRVLegalizePointerCast : public FunctionPass { Value *storeToFirstValueAggregate(IRBuilder<> &B, Value *Src, Value *Dst, Type *DstPointeeType, Align Alignment) { SmallVector Types = {Dst->getType(), Dst->getType()}; - SmallVector Args{/* isInBounds= */ B.getInt1(true), Dst}; + SmallVector Args{/* isInBounds= */ B.getInt1(true), Dst}; buildGEPIndexChain(B, Src->getType(), DstPointeeType, Args); auto *GEP = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args}); GR->buildAssignPtr(B, Src->getType(), GEP); diff --git a/llvm/lib/Target/SPIRV/SPIRVRegisterInfo.td b/llvm/lib/Target/SPIRV/SPIRVRegisterInfo.td index 1ef42b79f1a8e..e8b15960e9e92 100644 --- a/llvm/lib/Target/SPIRV/SPIRVRegisterInfo.td +++ b/llvm/lib/Target/SPIRV/SPIRVRegisterInfo.td @@ -15,7 +15,7 @@ let Namespace = "SPIRV" in { def p64 : PtrValueType ; class VTPtrVec - : VTVec, ptr.Value> { + : VTVec, ptr.LLVMName> { int isPointer = true; } diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp index 8f2fc01da476f..7fdb0fafa3719 100644 --- a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp @@ -1042,6 +1042,75 @@ getFirstValidInstructionInsertPoint(MachineBasicBlock &BB) { : VarPos; } +bool matchPeeledArrayPattern(const StructType *Ty, Type *&OriginalElementType, + uint64_t &TotalSize) { + // An array of N padded structs is represented as {[N-1 x <{T, pad}>], T}. + if (Ty->getStructNumElements() != 2) + return false; + + Type *FirstElement = Ty->getStructElementType(0); + Type *SecondElement = Ty->getStructElementType(1); + + if (!FirstElement->isArrayTy()) + return false; + + Type *ArrayElementType = FirstElement->getArrayElementType(); + if (!ArrayElementType->isStructTy() || + ArrayElementType->getStructNumElements() != 2) + return false; + + Type *T_in_struct = ArrayElementType->getStructElementType(0); + if (T_in_struct != SecondElement) + return false; + + auto *Padding_in_struct = + dyn_cast(ArrayElementType->getStructElementType(1)); + if (!Padding_in_struct || Padding_in_struct->getName() != "spirv.Padding") + return false; + + const uint64_t ArraySize = FirstElement->getArrayNumElements(); + TotalSize = ArraySize + 1; + OriginalElementType = ArrayElementType; + return true; +} + +Type *reconstitutePeeledArrayType(Type *Ty) { + if (!Ty->isStructTy()) + return Ty; + + auto *STy = cast(Ty); + Type *OriginalElementType = nullptr; + uint64_t TotalSize = 0; + if (matchPeeledArrayPattern(STy, OriginalElementType, TotalSize)) { + Type *ResultTy = ArrayType::get( + reconstitutePeeledArrayType(OriginalElementType), TotalSize); + return ResultTy; + } + + SmallVector NewElementTypes; + bool Changed = false; + for (Type *ElementTy : STy->elements()) { + Type *NewElementTy = reconstitutePeeledArrayType(ElementTy); + if (NewElementTy != ElementTy) + Changed = true; + NewElementTypes.push_back(NewElementTy); + } + + if (!Changed) + return Ty; + + Type *ResultTy; + if (STy->isLiteral()) + ResultTy = + StructType::get(STy->getContext(), NewElementTypes, STy->isPacked()); + else { + auto *NewTy = StructType::create(STy->getContext(), STy->getName()); + NewTy->setBody(NewElementTypes, STy->isPacked()); + ResultTy = NewTy; + } + return ResultTy; +} + std::optional getSpirvLinkageTypeFor(const SPIRVSubtarget &ST, const GlobalValue &GV) { if (GV.hasLocalLinkage() || GV.hasHiddenVisibility()) diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.h b/llvm/lib/Target/SPIRV/SPIRVUtils.h index 99d9d403ea70c..45e211a1e5d2a 100644 --- a/llvm/lib/Target/SPIRV/SPIRVUtils.h +++ b/llvm/lib/Target/SPIRV/SPIRVUtils.h @@ -321,6 +321,21 @@ Type *parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx); // Returns true if the function was changed. bool sortBlocks(Function &F); +// Check for peeled array structs and recursively reconstitute them. In HLSL +// CBuffers, arrays may have padding between the elements, but not after the +// last element. To represent this in LLVM IR an array [N x T] will be +// represented as {[N-1 x {T, spirv.Padding}], T}. The function +// matchPeeledArrayPattern recognizes this pattern retrieving the type {T, +// spirv.Padding}, and the size N. +bool matchPeeledArrayPattern(const StructType *Ty, Type *&OriginalElementType, + uint64_t &TotalSize); + +// This function will turn the type {[N-1 x {T, spirv.Padding}], T} back into +// [N x {T, spirv.Padding}]. So it can be translated into SPIR-V. The offset +// decorations will be such that there will be no padding after the array when +// relevant. +Type *reconstitutePeeledArrayType(Type *Ty); + inline bool hasInitializer(const GlobalVariable *GV) { return GV->hasInitializer() && !isa(GV->getInitializer()); } diff --git a/llvm/lib/Target/Sparc/Sparc.td b/llvm/lib/Target/Sparc/Sparc.td index 38b0508885069..ecf82fab5cc41 100644 --- a/llvm/lib/Target/Sparc/Sparc.td +++ b/llvm/lib/Target/Sparc/Sparc.td @@ -126,6 +126,8 @@ include "SparcCallingConv.td" include "SparcSchedule.td" include "SparcInstrInfo.td" +defm : RemapAllTargetPseudoPointerOperands; + def SparcInstrInfo : InstrInfo; def SparcAsmParser : AsmParser { diff --git a/llvm/lib/Target/Sparc/SparcCallingConv.td b/llvm/lib/Target/Sparc/SparcCallingConv.td index 8afd0a7fc09ad..d9c50483a029c 100644 --- a/llvm/lib/Target/Sparc/SparcCallingConv.td +++ b/llvm/lib/Target/Sparc/SparcCallingConv.td @@ -17,6 +17,9 @@ def CC_Sparc32 : CallingConv<[ // Custom assign SRet to [sp+64]. CCIfSRet>, + // f128 arguments are passed indirectly, using i32 pointers. + // FIXME GCC in soft-float mode passes f128 as if 2xi64 values. + CCIfType<[f128], CCPassIndirect>, // i32 f32 arguments get passed in integer registers if there is space. CCIfType<[i32, f32], CCAssignToReg<[I0, I1, I2, I3, I4, I5]>>, // f64 arguments are split and passed through registers or through stack. @@ -24,20 +27,20 @@ def CC_Sparc32 : CallingConv<[ // As are v2i32 arguments (this would be the default behavior for // v2i32 if it wasn't allocated to the IntPair register-class) CCIfType<[v2i32], CCCustom<"CC_Sparc_Assign_Split_64">>, - - // Alternatively, they are assigned to the stack in 4-byte aligned units. CCAssignToStack<4, 4> ]>; + def RetCC_Sparc32 : CallingConv<[ CCIfType<[i32], CCAssignToReg<[I0, I1, I2, I3, I4, I5]>>, CCIfType<[f32], CCAssignToReg<[F0, F1, F2, F3]>>, CCIfType<[f64], CCAssignToReg<[D0, D1]>>, + // FIXME GCC in soft-float mode passes f128 as if 2xi64 values. + CCIfType<[f128], CCIfInReg>>>, CCIfType<[v2i32], CCCustom<"CC_Sparc_Assign_Ret_Split_64">> ]>; - //===----------------------------------------------------------------------===// // SPARC v9 64-bit. //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp index a4a9eafd52ffe..de8768a7cdbca 100644 --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -440,6 +440,7 @@ SDValue SparcTargetLowering::LowerFormalArguments_32( MachineFunction &MF = DAG.getMachineFunction(); MachineRegisterInfo &RegInfo = MF.getRegInfo(); SparcMachineFunctionInfo *FuncInfo = MF.getInfo(); + EVT PtrVT = getPointerTy(DAG.getDataLayout()); // Assign locations to all of the incoming arguments. SmallVector ArgLocs; @@ -453,6 +454,7 @@ SDValue SparcTargetLowering::LowerFormalArguments_32( unsigned InIdx = 0; for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) { CCValAssign &VA = ArgLocs[i]; + EVT LocVT = VA.getLocVT(); if (Ins[InIdx].Flags.isSRet()) { if (InIdx != 0) @@ -466,6 +468,7 @@ SDValue SparcTargetLowering::LowerFormalArguments_32( continue; } + SDValue Arg; if (VA.isRegLoc()) { if (VA.needsCustom()) { assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32); @@ -500,76 +503,85 @@ SDValue SparcTargetLowering::LowerFormalArguments_32( } Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg); - SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); - if (VA.getLocVT() == MVT::f32) - Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg); - else if (VA.getLocVT() != MVT::i32) { - Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg, - DAG.getValueType(VA.getLocVT())); - Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg); + Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); + if (VA.getLocInfo() != CCValAssign::Indirect) { + if (VA.getLocVT() == MVT::f32) + Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg); + else if (VA.getLocVT() != MVT::i32) { + Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg, + DAG.getValueType(VA.getLocVT())); + Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg); + } + InVals.push_back(Arg); + continue; } - InVals.push_back(Arg); - continue; - } + } else { + assert(VA.isMemLoc()); - assert(VA.isMemLoc()); + unsigned Offset = VA.getLocMemOffset() + StackOffset; - unsigned Offset = VA.getLocMemOffset()+StackOffset; - auto PtrVT = getPointerTy(DAG.getDataLayout()); + if (VA.needsCustom()) { + assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32); + // If it is double-word aligned, just load. + if (Offset % 8 == 0) { + int FI = MF.getFrameInfo().CreateFixedObject(8, Offset, true); + SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT); + SDValue Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, + MachinePointerInfo()); + InVals.push_back(Load); + continue; + } - if (VA.needsCustom()) { - assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32); - // If it is double-word aligned, just load. - if (Offset % 8 == 0) { - int FI = MF.getFrameInfo().CreateFixedObject(8, - Offset, - true); + int FI = MF.getFrameInfo().CreateFixedObject(4, Offset, true); SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT); - SDValue Load = - DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo()); - InVals.push_back(Load); - continue; - } + SDValue HiVal = + DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo()); + int FI2 = MF.getFrameInfo().CreateFixedObject(4, Offset + 4, true); + SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT); - int FI = MF.getFrameInfo().CreateFixedObject(4, - Offset, - true); - SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT); - SDValue HiVal = - DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo()); - int FI2 = MF.getFrameInfo().CreateFixedObject(4, - Offset+4, - true); - SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT); + SDValue LoVal = + DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo()); - SDValue LoVal = - DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo()); + if (IsLittleEndian) + std::swap(LoVal, HiVal); - if (IsLittleEndian) - std::swap(LoVal, HiVal); + SDValue WholeValue = + DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal); + WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue); + InVals.push_back(WholeValue); + continue; + } - SDValue WholeValue = - DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal); - WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue); - InVals.push_back(WholeValue); - continue; + int FI = MF.getFrameInfo().CreateFixedObject(LocVT.getSizeInBits() / 8, + Offset, true); + SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT); + SDValue Load = DAG.getLoad(LocVT, dl, Chain, FIPtr, + MachinePointerInfo::getFixedStack(MF, FI)); + if (VA.getLocInfo() != CCValAssign::Indirect) { + InVals.push_back(Load); + continue; + } + Arg = Load; } - int FI = MF.getFrameInfo().CreateFixedObject(4, - Offset, - true); - SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT); - SDValue Load ; - if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) { - Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo()); - } else if (VA.getValVT() == MVT::f128) { - report_fatal_error("SPARCv8 does not handle f128 in calls; " - "pass indirectly"); - } else { - // We shouldn't see any other value types here. - llvm_unreachable("Unexpected ValVT encountered in frame lowering."); + assert(VA.getLocInfo() == CCValAssign::Indirect); + + SDValue ArgValue = + DAG.getLoad(VA.getValVT(), dl, Chain, Arg, MachinePointerInfo()); + InVals.push_back(ArgValue); + + unsigned ArgIndex = Ins[InIdx].OrigArgIndex; + assert(Ins[InIdx].PartOffset == 0); + while (i + 1 != e && Ins[InIdx + 1].OrigArgIndex == ArgIndex) { + CCValAssign &PartVA = ArgLocs[i + 1]; + unsigned PartOffset = Ins[InIdx + 1].PartOffset; + SDValue Address = DAG.getMemBasePlusOffset( + ArgValue, TypeSize::getFixed(PartOffset), dl); + InVals.push_back(DAG.getLoad(PartVA.getValVT(), dl, Chain, Address, + MachinePointerInfo())); + ++i; + ++InIdx; } - InVals.push_back(Load); } if (MF.getFunction().hasStructRetAttr()) { @@ -836,6 +848,8 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, CallingConv::ID CallConv = CLI.CallConv; bool isVarArg = CLI.IsVarArg; MachineFunction &MF = DAG.getMachineFunction(); + LLVMContext &Ctx = *DAG.getContext(); + EVT PtrVT = getPointerTy(MF.getDataLayout()); // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; @@ -914,7 +928,9 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, // Promote the value if needed. switch (VA.getLocInfo()) { default: llvm_unreachable("Unknown loc info!"); - case CCValAssign::Full: break; + case CCValAssign::Full: + case CCValAssign::Indirect: + break; case CCValAssign::SExt: Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); break; @@ -1013,6 +1029,49 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, continue; } + if (VA.getLocInfo() == CCValAssign::Indirect) { + // Store the argument in a stack slot and pass its address. + unsigned ArgIndex = Outs[realArgIdx].OrigArgIndex; + assert(Outs[realArgIdx].PartOffset == 0); + + EVT SlotVT; + if (i + 1 != e && Outs[realArgIdx + 1].OrigArgIndex == ArgIndex) { + Type *OrigArgType = CLI.Args[ArgIndex].Ty; + EVT OrigArgVT = getValueType(MF.getDataLayout(), OrigArgType); + MVT PartVT = + getRegisterTypeForCallingConv(Ctx, CLI.CallConv, OrigArgVT); + unsigned N = + getNumRegistersForCallingConv(Ctx, CLI.CallConv, OrigArgVT); + SlotVT = EVT::getIntegerVT(Ctx, PartVT.getSizeInBits() * N); + } else { + SlotVT = Outs[realArgIdx].VT; + } + + SDValue SpillSlot = DAG.CreateStackTemporary(SlotVT); + int FI = cast(SpillSlot)->getIndex(); + MemOpChains.push_back( + DAG.getStore(Chain, dl, Arg, SpillSlot, + MachinePointerInfo::getFixedStack(MF, FI))); + // If the original argument was split (e.g. f128), we need + // to store all parts of it here (and pass just one address). + while (i + 1 != e && Outs[realArgIdx + 1].OrigArgIndex == ArgIndex) { + SDValue PartValue = OutVals[realArgIdx + 1]; + unsigned PartOffset = Outs[realArgIdx + 1].PartOffset; + SDValue Address = DAG.getMemBasePlusOffset( + DAG.getFrameIndex(FI, PtrVT), TypeSize::getFixed(PartOffset), dl); + MemOpChains.push_back( + DAG.getStore(Chain, dl, PartValue, Address, + MachinePointerInfo::getFixedStack(MF, FI))); + assert((PartOffset + PartValue.getValueType().getStoreSize() <= + SlotVT.getStoreSize()) && + "Not enough space for argument part!"); + ++i; + ++realArgIdx; + } + + Arg = SpillSlot; + } + // Arguments that can be passed on register must be kept at // RegsToPass vector if (VA.isRegLoc()) { diff --git a/llvm/lib/Target/SystemZ/SystemZ.td b/llvm/lib/Target/SystemZ/SystemZ.td index ec110645c62dd..95f039d6328f3 100644 --- a/llvm/lib/Target/SystemZ/SystemZ.td +++ b/llvm/lib/Target/SystemZ/SystemZ.td @@ -57,6 +57,9 @@ include "SystemZInstrHFP.td" include "SystemZInstrDFP.td" include "SystemZInstrSystem.td" + +defm : RemapAllTargetPseudoPointerOperands; + def SystemZInstrInfo : InstrInfo { let guessInstructionProperties = 0; } //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp index e31d7c6a86476..f061272d3fad4 100644 --- a/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp +++ b/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp @@ -1270,7 +1270,7 @@ void SystemZAsmPrinter::emitFunctionBodyEnd() { static void emitPPA1Flags(std::unique_ptr &OutStreamer, bool VarArg, bool StackProtector, bool FPRMask, bool VRMask, - bool EHBlock, bool HasName) { + bool EHBlock, bool HasArgAreaLength, bool HasName) { enum class PPA1Flag1 : uint8_t { DSA64Bit = (0x80 >> 0), VarArg = (0x80 >> 7), @@ -1282,8 +1282,9 @@ static void emitPPA1Flags(std::unique_ptr &OutStreamer, bool VarArg, LLVM_MARK_AS_BITMASK_ENUM(ExternalProcedure) }; enum class PPA1Flag3 : uint8_t { + HasArgAreaLength = (0x80 >> 1), FPRMask = (0x80 >> 2), - LLVM_MARK_AS_BITMASK_ENUM(FPRMask) + LLVM_MARK_AS_BITMASK_ENUM(HasArgAreaLength) }; enum class PPA1Flag4 : uint8_t { EPMOffsetPresent = (0x80 >> 0), @@ -1307,6 +1308,9 @@ static void emitPPA1Flags(std::unique_ptr &OutStreamer, bool VarArg, if (StackProtector) Flags2 |= PPA1Flag2::STACKPROTECTOR; + if (HasArgAreaLength) + Flags3 |= PPA1Flag3::HasArgAreaLength; // Add emit ArgAreaLength flag. + // SavedGPRMask, SavedFPRMask, and SavedVRMask are precomputed in. if (FPRMask) Flags3 |= PPA1Flag3::FPRMask; // Add emit FPR mask flag. @@ -1339,6 +1343,9 @@ static void emitPPA1Flags(std::unique_ptr &OutStreamer, bool VarArg, OutStreamer->emitInt8(static_cast(Flags2)); // Flags 2. OutStreamer->AddComment("PPA1 Flags 3"); + if ((Flags3 & PPA1Flag3::HasArgAreaLength) == PPA1Flag3::HasArgAreaLength) + OutStreamer->AddComment( + " Bit 1: 1 = Argument Area Length is in optional area"); if ((Flags3 & PPA1Flag3::FPRMask) == PPA1Flag3::FPRMask) OutStreamer->AddComment(" Bit 2: 1 = FP Reg Mask is in optional area"); OutStreamer->emitInt8( @@ -1477,12 +1484,26 @@ void SystemZAsmPrinter::emitPPA1(MCSymbol *FnEndSym) { bool NeedEmitEHBlock = !MF->getLandingPads().empty(); + // Optional Argument Area Length. + // Note: This represents the length of the argument area that we reserve + // in our stack for setting up arguments for calls to other + // routines. If this optional field is not set, LE will reserve + // 128 bytes for the argument area. This optional field is + // created if greater than 128 bytes is required - to guarantee + // the required space is reserved on stack extension in the new + // extension. This optional field is also created if the + // routine has alloca(). This may reduce stack space + // if alloca() call causes a stack extension. + bool HasArgAreaLength = + (AllocaReg != 0) || (MFFrame.getMaxCallFrameSize() > 128); + bool HasName = MF->getFunction().hasName() && MF->getFunction().getName().size() > 0; emitPPA1Flags(OutStreamer, MF->getFunction().isVarArg(), MFFrame.hasStackProtectorIndex(), SavedFPRMask != 0, - TargetHasVector && SavedVRMask != 0, NeedEmitEHBlock, HasName); + TargetHasVector && SavedVRMask != 0, NeedEmitEHBlock, + HasArgAreaLength, HasName); OutStreamer->AddComment("Length/4 of Parms"); OutStreamer->emitInt16( @@ -1490,6 +1511,11 @@ void SystemZAsmPrinter::emitPPA1(MCSymbol *FnEndSym) { OutStreamer->AddComment("Length of Code"); OutStreamer->emitAbsoluteSymbolDiff(FnEndSym, CurrentFnEPMarkerSym, 4); + if (HasArgAreaLength) { + OutStreamer->AddComment("Argument Area Length"); + OutStreamer->emitInt32(MFFrame.getMaxCallFrameSize()); + } + // Emit saved FPR mask and offset to FPR save area (0x20 of flags 3). if (SavedFPRMask) { OutStreamer->AddComment("FPR mask"); diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp index eb1ce4a2101d7..db4f9a15d6497 100644 --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -2360,3 +2360,19 @@ SystemZInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { return std::nullopt; } + +std::pair +SystemZInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { + return std::make_pair(TF, 0u); +} + +ArrayRef> +SystemZInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { + using namespace SystemZII; + + static const std::pair TargetFlags[] = { + {MO_ADA_DATA_SYMBOL_ADDR, "systemz-ada-datasymboladdr"}, + {MO_ADA_INDIRECT_FUNC_DESC, "systemz-ada-indirectfuncdesc"}, + {MO_ADA_DIRECT_FUNC_DESC, "systemz-ada-directfuncdesc"}}; + return ArrayRef(TargetFlags); +} diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h index 4aecdd7498018..9fadf7bfb6d2b 100644 --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h @@ -71,18 +71,13 @@ enum { MO_GOT = (1 << 0), // @INDNTPOFF - MO_INDNTPOFF = (2 << 0) -}; + MO_INDNTPOFF = (2 << 0), -// z/OS XPLink specific: classifies the types of -// accesses to the ADA (Associated Data Area). -// These enums contains values that overlap with the above MO_ enums, -// but that's fine since the above enums are used with ELF, -// while these values are used with z/OS. -enum { - MO_ADA_DATA_SYMBOL_ADDR = 1, - MO_ADA_INDIRECT_FUNC_DESC, - MO_ADA_DIRECT_FUNC_DESC, + // z/OS XPLink specific: classifies the types of + // accesses to the ADA (Associated Data Area). + MO_ADA_DATA_SYMBOL_ADDR = (1 << 2), + MO_ADA_INDIRECT_FUNC_DESC = (2 << 2), + MO_ADA_DIRECT_FUNC_DESC = (3 << 2), }; // Classifies a branch. @@ -391,6 +386,12 @@ class SystemZInstrInfo : public SystemZGenInstrInfo { std::optional isCopyInstrImpl(const MachineInstr &MI) const override; + + std::pair + decomposeMachineOperandsTargetFlags(unsigned TF) const override; + + ArrayRef> + getSerializableDirectMachineOperandTargetFlags() const override; }; } // end namespace llvm diff --git a/llvm/lib/Target/VE/VE.td b/llvm/lib/Target/VE/VE.td index bb076bd9f6d41..aedce0f4ebc8f 100644 --- a/llvm/lib/Target/VE/VE.td +++ b/llvm/lib/Target/VE/VE.td @@ -30,6 +30,7 @@ include "VERegisterInfo.td" include "VECallingConv.td" include "VEInstrInfo.td" +defm : RemapAllTargetPseudoPointerOperands; def VEInstrInfo : InstrInfo {} def VEAsmParser : AsmParser { diff --git a/llvm/lib/Target/WebAssembly/WebAssembly.td b/llvm/lib/Target/WebAssembly/WebAssembly.td index 089be5f1dc70e..67015ffcfc760 100644 --- a/llvm/lib/Target/WebAssembly/WebAssembly.td +++ b/llvm/lib/Target/WebAssembly/WebAssembly.td @@ -108,6 +108,14 @@ include "WebAssemblyRegisterInfo.td" include "WebAssemblyInstrInfo.td" +def WASM64 : HwMode<[HasAddr64]>; + +def wasm_ptr_rc : RegClassByHwMode< + [DefaultMode, WASM64], + [I32, I64]>; + +defm : RemapAllTargetPseudoPointerOperands; + def WebAssemblyInstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp index 1ef10928c05d8..abbb0c2466e7d 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp @@ -65,7 +65,7 @@ void X86WinCOFFStreamer::emitCVFPOData(const MCSymbol *ProcSym, SMLoc Loc) { } void X86WinCOFFStreamer::finishImpl() { - emitFrames(nullptr); + emitFrames(); emitWindowsUnwindTables(); MCWinCOFFStreamer::finishImpl(); diff --git a/llvm/lib/Target/X86/X86.td b/llvm/lib/Target/X86/X86.td index 27ec052cfda40..8f29a64d58194 100644 --- a/llvm/lib/Target/X86/X86.td +++ b/llvm/lib/Target/X86/X86.td @@ -795,6 +795,8 @@ include "X86Schedule.td" include "X86InstrInfo.td" include "X86SchedPredicates.td" +defm : RemapAllTargetPseudoPointerOperands; + def X86InstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp index e3c44c048f7bf..6a18086cae29f 100644 --- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp +++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp @@ -608,40 +608,40 @@ bool X86ExpandPseudo::expandMI(MachineBasicBlock &MBB, Opc = GET_EGPR_IF_ENABLED(X86::TILELOADDT1); break; case X86::PTCVTROWD2PSrreV: - Opc = X86::TCVTROWD2PSrre; + Opc = X86::TCVTROWD2PSrte; break; case X86::PTCVTROWD2PSrriV: - Opc = X86::TCVTROWD2PSrri; + Opc = X86::TCVTROWD2PSrti; break; case X86::PTCVTROWPS2BF16HrreV: - Opc = X86::TCVTROWPS2BF16Hrre; + Opc = X86::TCVTROWPS2BF16Hrte; break; case X86::PTCVTROWPS2BF16HrriV: - Opc = X86::TCVTROWPS2BF16Hrri; + Opc = X86::TCVTROWPS2BF16Hrti; break; case X86::PTCVTROWPS2BF16LrreV: - Opc = X86::TCVTROWPS2BF16Lrre; + Opc = X86::TCVTROWPS2BF16Lrte; break; case X86::PTCVTROWPS2BF16LrriV: - Opc = X86::TCVTROWPS2BF16Lrri; + Opc = X86::TCVTROWPS2BF16Lrti; break; case X86::PTCVTROWPS2PHHrreV: - Opc = X86::TCVTROWPS2PHHrre; + Opc = X86::TCVTROWPS2PHHrte; break; case X86::PTCVTROWPS2PHHrriV: - Opc = X86::TCVTROWPS2PHHrri; + Opc = X86::TCVTROWPS2PHHrti; break; case X86::PTCVTROWPS2PHLrreV: - Opc = X86::TCVTROWPS2PHLrre; + Opc = X86::TCVTROWPS2PHLrte; break; case X86::PTCVTROWPS2PHLrriV: - Opc = X86::TCVTROWPS2PHLrri; + Opc = X86::TCVTROWPS2PHLrti; break; case X86::PTILEMOVROWrreV: - Opc = X86::TILEMOVROWrre; + Opc = X86::TILEMOVROWrte; break; case X86::PTILEMOVROWrriV: - Opc = X86::TILEMOVROWrri; + Opc = X86::TILEMOVROWrti; break; default: llvm_unreachable("Unexpected Opcode"); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index fe9a598fb5611..1b0bf6823e390 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2668,6 +2668,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, ISD::AVGFLOORU, ISD::BITREVERSE, ISD::ADD, + ISD::SADDSAT, + ISD::SSUBSAT, ISD::FADD, ISD::FSUB, ISD::FNEG, @@ -8151,6 +8153,8 @@ static SDValue LowerBUILD_VECTORvXi1(SDValue Op, const SDLoc &dl, case X86ISD::FHSUB: case X86ISD::HADD: case X86ISD::HSUB: + case X86ISD::HADDS: + case X86ISD::HSUBS: return true; } return false; @@ -35121,6 +35125,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { NODE_NAME_CASE(BLENDV) NODE_NAME_CASE(HADD) NODE_NAME_CASE(HSUB) + NODE_NAME_CASE(HADDS) + NODE_NAME_CASE(HSUBS) NODE_NAME_CASE(FHADD) NODE_NAME_CASE(FHSUB) NODE_NAME_CASE(CONFLICT) @@ -38358,22 +38364,22 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, default: llvm_unreachable("Unexpected instruction!"); case X86::PTCVTROWD2PSrri: - Opc = X86::TCVTROWD2PSrri; + Opc = X86::TCVTROWD2PSrti; break; case X86::PTCVTROWPS2BF16Hrri: - Opc = X86::TCVTROWPS2BF16Hrri; + Opc = X86::TCVTROWPS2BF16Hrti; break; case X86::PTCVTROWPS2PHHrri: - Opc = X86::TCVTROWPS2PHHrri; + Opc = X86::TCVTROWPS2PHHrti; break; case X86::PTCVTROWPS2BF16Lrri: - Opc = X86::TCVTROWPS2BF16Lrri; + Opc = X86::TCVTROWPS2BF16Lrti; break; case X86::PTCVTROWPS2PHLrri: - Opc = X86::TCVTROWPS2PHLrri; + Opc = X86::TCVTROWPS2PHLrti; break; case X86::PTILEMOVROWrri: - Opc = X86::TILEMOVROWrri; + Opc = X86::TILEMOVROWrti; break; } MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII->get(Opc)); @@ -38396,22 +38402,22 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, default: llvm_unreachable("Unexpected instruction!"); case X86::PTCVTROWD2PSrre: - Opc = X86::TCVTROWD2PSrre; + Opc = X86::TCVTROWD2PSrte; break; case X86::PTCVTROWPS2BF16Hrre: - Opc = X86::TCVTROWPS2BF16Hrre; + Opc = X86::TCVTROWPS2BF16Hrte; break; case X86::PTCVTROWPS2BF16Lrre: - Opc = X86::TCVTROWPS2BF16Lrre; + Opc = X86::TCVTROWPS2BF16Lrte; break; case X86::PTCVTROWPS2PHHrre: - Opc = X86::TCVTROWPS2PHHrre; + Opc = X86::TCVTROWPS2PHHrte; break; case X86::PTCVTROWPS2PHLrre: - Opc = X86::TCVTROWPS2PHLrre; + Opc = X86::TCVTROWPS2PHLrte; break; case X86::PTILEMOVROWrre: - Opc = X86::TILEMOVROWrre; + Opc = X86::TILEMOVROWrte; break; } MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII->get(Opc)); @@ -40897,8 +40903,9 @@ static SDValue canonicalizeShuffleMaskWithHorizOp( })) return SDValue(); - bool isHoriz = (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD || - Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB); + bool isHoriz = (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::FHSUB || + Opcode0 == X86ISD::HADD || Opcode0 == X86ISD::HSUB || + Opcode0 == X86ISD::HADDS || Opcode0 == X86ISD::HSUBS); bool isPack = (Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS); if (!isHoriz && !isPack) return SDValue(); @@ -54231,7 +54238,9 @@ static SDValue combineToHorizontalAddSub(SDNode *N, SelectionDAG &DAG, const X86Subtarget &Subtarget) { EVT VT = N->getValueType(0); unsigned Opcode = N->getOpcode(); - bool IsAdd = (Opcode == ISD::FADD) || (Opcode == ISD::ADD); + bool IsAdd = + (Opcode == ISD::FADD) || (Opcode == ISD::ADD) || (Opcode == ISD::SADDSAT); + bool IsSat = (Opcode == ISD::SADDSAT) || (Opcode == ISD::SSUBSAT); SmallVector PostShuffleMask; auto MergableHorizOp = [N](unsigned HorizOpcode) { @@ -54261,11 +54270,17 @@ static SDValue combineToHorizontalAddSub(SDNode *N, SelectionDAG &DAG, break; case ISD::ADD: case ISD::SUB: - if (Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32 || - VT == MVT::v16i16 || VT == MVT::v8i32)) { + case ISD::SADDSAT: + case ISD::SSUBSAT: + if (!Subtarget.hasSSSE3()) + break; + if (VT == MVT::v8i16 || VT == MVT::v16i16 || + (!IsSat && (VT == MVT::v4i32 || VT == MVT::v8i32))) { + SDValue LHS = N->getOperand(0); SDValue RHS = N->getOperand(1); - auto HorizOpcode = IsAdd ? X86ISD::HADD : X86ISD::HSUB; + auto HorizOpcode = IsSat ? (IsAdd ? X86ISD::HADDS : X86ISD::HSUBS) + : (IsAdd ? X86ISD::HADD : X86ISD::HSUB); if (isHorizontalBinOp(HorizOpcode, LHS, RHS, DAG, Subtarget, IsAdd, PostShuffleMask, MergableHorizOp(HorizOpcode))) { auto HOpBuilder = [HorizOpcode](SelectionDAG &DAG, const SDLoc &DL, @@ -59308,7 +59323,8 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT, case X86ISD::ANDNP: // TODO: AVX512 targets should only use CombineSubOperand like AVX1/2. if (!IsSplat && (VT.is256BitVector() || - (VT.is512BitVector() && Subtarget.useAVX512Regs()))) { + (VT.is512BitVector() && Subtarget.useAVX512Regs()) || + (EltSizeInBits == 1 && TLI.isTypeLegal(VT)))) { // Don't concatenate root AVX1 NOT patterns. // TODO: Allow NOT folding if Concat0 succeeds. if (Opcode == ISD::XOR && Depth == 0 && !Subtarget.hasInt256() && @@ -59318,7 +59334,8 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT, break; SDValue Concat0 = CombineSubOperand(VT, Ops, 0); SDValue Concat1 = CombineSubOperand(VT, Ops, 1); - if (Concat0 || Concat1 || Subtarget.useAVX512Regs()) + if (Concat0 || Concat1 || + (EltSizeInBits != 1 && Subtarget.useAVX512Regs())) return DAG.getNode(Opcode, DL, VT, Concat0 ? Concat0 : ConcatSubOperand(VT, Ops, 0), Concat1 ? Concat1 : ConcatSubOperand(VT, Ops, 1)); @@ -59712,6 +59729,14 @@ static SDValue combineCONCAT_VECTORS(SDNode *N, SelectionDAG &DAG, } } + // Attempt to merge logic ops if the type is legal. + if (TLI.isTypeLegal(VT) && all_of(Ops, [](SDValue Op) { + return ISD::isBitwiseLogicOp(Op.getOpcode()); + })) + if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, + DAG, Subtarget)) + return R; + // Don't do anything else for i1 vectors. return SDValue(); } @@ -61052,6 +61077,8 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case ISD::SUB: return combineSub(N, DAG, DCI, Subtarget); case X86ISD::ADD: case X86ISD::SUB: return combineX86AddSub(N, DAG, DCI, Subtarget); + case ISD::SADDSAT: + case ISD::SSUBSAT: return combineToHorizontalAddSub(N, DAG, Subtarget); case X86ISD::CLOAD: case X86ISD::CSTORE: return combineX86CloadCstore(N, DAG); case X86ISD::SBB: return combineSBB(N, DAG); diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index b7151f65942b4..c5085299716ed 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -270,6 +270,10 @@ namespace llvm { HADD, HSUB, + /// Integer horizontal saturating add/sub. + HADDS, + HSUBS, + /// Floating point horizontal add/sub. FHADD, FHSUB, diff --git a/llvm/lib/Target/X86/X86InstrAMX.td b/llvm/lib/Target/X86/X86InstrAMX.td index 522782abd710f..6b8b8f720ddd7 100644 --- a/llvm/lib/Target/X86/X86InstrAMX.td +++ b/llvm/lib/Target/X86/X86InstrAMX.td @@ -370,11 +370,11 @@ let Predicates = [HasAMXMOVRS, In64BitMode], SchedRW = [WriteSystem] in { multiclass m_tcvtrowd2ps { let Predicates = [HasAMXAVX512, HasAVX10_2, In64BitMode] in { let SchedRW = [WriteSystem] in { - def rri : Ii8<0x7, MRMSrcReg, (outs VR512:$dst), + def rti : Ii8<0x7, MRMSrcReg, (outs VR512:$dst), (ins TILE:$src1, i32u8imm:$src2), "tcvtrowd2ps\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, TA,XS, EVEX, EVEX_V512; - def rre : I<0x4A, MRMSrcReg4VOp3, (outs VR512:$dst), + def rte : I<0x4A, MRMSrcReg4VOp3, (outs VR512:$dst), (ins TILE:$src1, GR32:$src2), "tcvtrowd2ps\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, T8,XS, EVEX, VVVV, EVEX_V512; @@ -450,12 +450,12 @@ multiclass AMXAVX512_BASE Opcode1, bits<8> Opcode2, string Opstr, Prefix P1, Prefix P2> { let Predicates = [HasAMXAVX512, HasAVX10_2, In64BitMode], SchedRW = [WriteSystem] in { let OpPrefix = P1 in - def rre : I, EVEX, VVVV, EVEX_V512, T8; let OpPrefix = P2 in - def rri : Ii8, EVEX, EVEX_V512, TA; @@ -475,22 +475,22 @@ defm TCVTROWPS2PHL : AMXAVX512_BASE<0x6d, 0x77, "tcvtrowps2phl", PD, XD>; defm TCVTROWPS2BF16H : AMXAVX512_BASE<0x6d, 0x07, "tcvtrowps2bf16h", XD, XD>; defm TCVTROWPS2BF16L : AMXAVX512_BASE<0x6d, 0x77, "tcvtrowps2bf16l", XS, XS>; -multiclass m_tilemovrow { +multiclass AMXAVX512_TILEMOVE Opcode1, bits<8> Opcode2, string Opstr> { let Predicates = [HasAMXAVX512, HasAVX10_2, In64BitMode] in { let SchedRW = [WriteSystem] in { - def rri : Ii8<0x7, MRMSrcReg, (outs VR512:$dst), + def rti : Ii8, TA,PD, EVEX, EVEX_V512; - def rre : I<0x4A, MRMSrcReg4VOp3, (outs VR512:$dst), + !strconcat(Opstr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + []>, TA, PD, EVEX, EVEX_V512; + def rte : I, T8,PD, EVEX, VVVV, EVEX_V512; + !strconcat(Opstr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + []>, T8, PD, EVEX, VVVV, EVEX_V512; } } // HasAMXAVX512, HasAVX10_2, In64BitMode } -defm TILEMOVROW : m_tilemovrow; +defm TILEMOVROW : AMXAVX512_TILEMOVE<0x07, 0x4A, "tilemovrow">; let Predicates = [HasAMXAVX512, HasAVX10_2, In64BitMode] in { let SchedRW = [WriteSystem] in { diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td index 5321ecf0c1b2c..0803a4946b379 100644 --- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -71,6 +71,8 @@ def X86fhadd : SDNode<"X86ISD::FHADD", SDTFPBinOp>; def X86fhsub : SDNode<"X86ISD::FHSUB", SDTFPBinOp>; def X86hadd : SDNode<"X86ISD::HADD", SDTIntBinOp>; def X86hsub : SDNode<"X86ISD::HSUB", SDTIntBinOp>; +def X86hadds : SDNode<"X86ISD::HADDS", SDTIntBinOp>; +def X86hsubs : SDNode<"X86ISD::HSUBS", SDTIntBinOp>; def X86comi : SDNode<"X86ISD::COMI", SDTX86FCmp>; def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86FCmp>; def X86comi512 : SDNode<"X86ISD::COMX", SDTX86FCmp>; diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td index 806b02b9f9359..e4aaa1e1b594a 100644 --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -4864,12 +4864,12 @@ let isCommutable = 0 in { defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", int_x86_ssse3_psign_d_128, SchedWriteVecALU.XMM, load, 0>, VEX, VVVV, WIG; - defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", - int_x86_ssse3_phadd_sw_128, - SchedWritePHAdd.XMM, load, 0>, VEX, VVVV, WIG; - defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", - int_x86_ssse3_phsub_sw_128, - SchedWritePHAdd.XMM, load, 0>, VEX, VVVV, WIG; + defm VPHADDSW : SS3I_binop_rm<0x03, "vphaddsw", X86hadds, v8i16, v8i16, VR128, + load, i128mem, + SchedWritePHAdd.XMM, 0>, VEX, VVVV, WIG; + defm VPHSUBSW : SS3I_binop_rm<0x07, "vphsubsw", X86hsubs, v8i16, v8i16, VR128, + load, i128mem, + SchedWritePHAdd.XMM, 0>, VEX, VVVV, WIG; } } @@ -4907,12 +4907,12 @@ let isCommutable = 0 in { SchedWriteVecALU.YMM>, VEX, VVVV, VEX_L, WIG; defm VPSIGND : SS3I_binop_rm_int_y<0x0A, "vpsignd", int_x86_avx2_psign_d, SchedWriteVecALU.YMM>, VEX, VVVV, VEX_L, WIG; - defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw", - int_x86_avx2_phadd_sw, - SchedWritePHAdd.YMM>, VEX, VVVV, VEX_L, WIG; - defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw", - int_x86_avx2_phsub_sw, - SchedWritePHAdd.YMM>, VEX, VVVV, VEX_L, WIG; + defm VPHADDSWY : SS3I_binop_rm<0x03, "vphaddsw", X86hadds, v16i16, v16i16, + VR256, load, i256mem, + SchedWritePHAdd.YMM, 0>, VEX, VVVV, VEX_L, WIG; + defm VPHSUBSWY : SS3I_binop_rm<0x07, "vphsubsw", X86hsubs, v16i16, v16i16, + VR256, load, i256mem, + SchedWritePHAdd.YMM, 0>, VEX, VVVV, VEX_L, WIG; } } @@ -4935,12 +4935,10 @@ let isCommutable = 0 in { SchedWriteVecALU.XMM, memop>; defm PSHUFB : SS3I_binop_rm<0x00, "pshufb", X86pshufb, v16i8, v16i8, VR128, memop, i128mem, SchedWriteVarShuffle.XMM>; - defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", - int_x86_ssse3_phadd_sw_128, - SchedWritePHAdd.XMM, memop>; - defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", - int_x86_ssse3_phsub_sw_128, - SchedWritePHAdd.XMM, memop>; + defm PHADDSW : SS3I_binop_rm<0x03, "phaddsw", X86hadds, v8i16, v8i16, VR128, + memop, i128mem, SchedWritePHAdd.XMM>; + defm PHSUBSW : SS3I_binop_rm<0x07, "phsubsw", X86hsubs, v8i16, v8i16, VR128, + memop, i128mem, SchedWritePHAdd.XMM>; defm PMADDUBSW : SS3I_binop_rm<0x04, "pmaddubsw", X86vpmaddubsw, v8i16, v16i8, VR128, memop, i128mem, SchedWriteVecIMul.XMM>; diff --git a/llvm/lib/Target/X86/X86IntrinsicsInfo.h b/llvm/lib/Target/X86/X86IntrinsicsInfo.h index 0f725a8eb338b..99665b5872fe2 100644 --- a/llvm/lib/Target/X86/X86IntrinsicsInfo.h +++ b/llvm/lib/Target/X86/X86IntrinsicsInfo.h @@ -724,8 +724,10 @@ static const IntrinsicData IntrinsicsWithoutChain[] = { X86_INTRINSIC_DATA(avx2_permd, VPERM_2OP, X86ISD::VPERMV, 0), X86_INTRINSIC_DATA(avx2_permps, VPERM_2OP, X86ISD::VPERMV, 0), X86_INTRINSIC_DATA(avx2_phadd_d, INTR_TYPE_2OP, X86ISD::HADD, 0), + X86_INTRINSIC_DATA(avx2_phadd_sw, INTR_TYPE_2OP, X86ISD::HADDS, 0), X86_INTRINSIC_DATA(avx2_phadd_w, INTR_TYPE_2OP, X86ISD::HADD, 0), X86_INTRINSIC_DATA(avx2_phsub_d, INTR_TYPE_2OP, X86ISD::HSUB, 0), + X86_INTRINSIC_DATA(avx2_phsub_sw, INTR_TYPE_2OP, X86ISD::HSUBS, 0), X86_INTRINSIC_DATA(avx2_phsub_w, INTR_TYPE_2OP, X86ISD::HSUB, 0), X86_INTRINSIC_DATA(avx2_pmadd_ub_sw, INTR_TYPE_2OP, X86ISD::VPMADDUBSW, 0), X86_INTRINSIC_DATA(avx2_pmadd_wd, INTR_TYPE_2OP, X86ISD::VPMADDWD, 0), @@ -2017,11 +2019,13 @@ static const IntrinsicData IntrinsicsWithoutChain[] = { X86_INTRINSIC_DATA(ssse3_phadd_d, INTR_TYPE_CAST_MMX, 0, 0), X86_INTRINSIC_DATA(ssse3_phadd_d_128, INTR_TYPE_2OP, X86ISD::HADD, 0), X86_INTRINSIC_DATA(ssse3_phadd_sw, INTR_TYPE_CAST_MMX, 0, 0), + X86_INTRINSIC_DATA(ssse3_phadd_sw_128, INTR_TYPE_2OP, X86ISD::HADDS, 0), X86_INTRINSIC_DATA(ssse3_phadd_w, INTR_TYPE_CAST_MMX, 0, 0), X86_INTRINSIC_DATA(ssse3_phadd_w_128, INTR_TYPE_2OP, X86ISD::HADD, 0), X86_INTRINSIC_DATA(ssse3_phsub_d, INTR_TYPE_CAST_MMX, 0, 0), X86_INTRINSIC_DATA(ssse3_phsub_d_128, INTR_TYPE_2OP, X86ISD::HSUB, 0), X86_INTRINSIC_DATA(ssse3_phsub_sw, INTR_TYPE_CAST_MMX, 0, 0), + X86_INTRINSIC_DATA(ssse3_phsub_sw_128, INTR_TYPE_2OP, X86ISD::HSUBS, 0), X86_INTRINSIC_DATA(ssse3_phsub_w, INTR_TYPE_CAST_MMX, 0, 0), X86_INTRINSIC_DATA(ssse3_phsub_w_128, INTR_TYPE_2OP, X86ISD::HSUB, 0), X86_INTRINSIC_DATA(ssse3_pmadd_ub_sw, INTR_TYPE_CAST_MMX, 0, 0), diff --git a/llvm/lib/Target/XCore/XCore.td b/llvm/lib/Target/XCore/XCore.td index a97b3dd1d0a2b..fa8b9fe26bbe1 100644 --- a/llvm/lib/Target/XCore/XCore.td +++ b/llvm/lib/Target/XCore/XCore.td @@ -24,6 +24,8 @@ include "XCoreRegisterInfo.td" include "XCoreInstrInfo.td" include "XCoreCallingConv.td" +defm : RemapAllTargetPseudoPointerOperands; + def XCoreInstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index 4ef885e19101e..eecf42f46f88b 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -44,6 +44,8 @@ include "XtensaCallingConv.td" include "XtensaInstrInfo.td" +defm : RemapAllTargetPseudoPointerOperands; + def XtensaInstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/TargetParser/X86TargetParser.cpp b/llvm/lib/TargetParser/X86TargetParser.cpp index 02c33b0af2e2f..2810849e4af9e 100644 --- a/llvm/lib/TargetParser/X86TargetParser.cpp +++ b/llvm/lib/TargetParser/X86TargetParser.cpp @@ -544,10 +544,6 @@ constexpr FeatureBitset ImpliedFeaturesWBNOINVD = {}; constexpr FeatureBitset ImpliedFeaturesVZEROUPPER = {}; constexpr FeatureBitset ImpliedFeaturesX87 = {}; constexpr FeatureBitset ImpliedFeaturesXSAVE = {}; -constexpr FeatureBitset ImpliedFeaturesDUMMYFEATURE1 = {}; -constexpr FeatureBitset ImpliedFeaturesDUMMYFEATURE2 = {}; -constexpr FeatureBitset ImpliedFeaturesDUMMYFEATURE3 = {}; -constexpr FeatureBitset ImpliedFeaturesDUMMYFEATURE4 = {}; // Not really CPU features, but need to be in the table because clang uses // target features to communicate them to the backend. @@ -659,9 +655,14 @@ constexpr FeatureBitset ImpliedFeaturesNF = {}; constexpr FeatureBitset ImpliedFeaturesCF = {}; constexpr FeatureBitset ImpliedFeaturesZU = {}; +constexpr FeatureBitset ImpliedFeaturesAPXF = + ImpliedFeaturesEGPR | ImpliedFeaturesPush2Pop2 | ImpliedFeaturesPPX | + ImpliedFeaturesNDD | ImpliedFeaturesCCMP | ImpliedFeaturesNF | + ImpliedFeaturesCF | ImpliedFeaturesZU; + constexpr FeatureBitset ImpliedFeaturesMOVRS = {}; -constexpr FeatureInfo FeatureInfos[X86::CPU_FEATURE_MAX] = { +constexpr FeatureInfo FeatureInfos[] = { #define X86_FEATURE(ENUM, STR) {{"+" STR}, ImpliedFeatures##ENUM}, #include "llvm/TargetParser/X86TargetParser.def" }; @@ -761,10 +762,9 @@ llvm::X86::getCpuSupportsMask(ArrayRef FeatureStrs) { std::array FeatureMask{}; for (StringRef FeatureStr : FeatureStrs) { unsigned Feature = StringSwitch(FeatureStr) -#define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY) \ - .Case(STR, llvm::X86::FEATURE_##ENUM) -#define X86_MICROARCH_LEVEL(ENUM, STR, PRIORITY) \ - .Case(STR, llvm::X86::FEATURE_##ENUM) +#define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY, ABI_VALUE) .Case(STR, ABI_VALUE) +#define X86_MICROARCH_LEVEL(ENUM, STR, PRIORITY, ABI_VALUE) \ + .Case(STR, ABI_VALUE) #include "llvm/TargetParser/X86TargetParser.def" ; assert(Feature / 32 < FeatureMask.size()); @@ -777,15 +777,14 @@ unsigned llvm::X86::getFeaturePriority(ProcessorFeatures Feat) { #ifndef NDEBUG // Check that priorities are set properly in the .def file. We expect that // "compat" features are assigned non-duplicate consecutive priorities - // starting from one (1, ..., 37) and multiple zeros. -#define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY) PRIORITY, + // starting from one (1, ..., MAX_PRIORITY) and multiple zeros. +#define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY, ABI_VALUE) PRIORITY, unsigned Priorities[] = { #include "llvm/TargetParser/X86TargetParser.def" }; std::array HelperList; - const size_t MaxPriority = 37; - std::iota(HelperList.begin(), HelperList.begin() + MaxPriority + 1, 0); - for (size_t i = MaxPriority + 1; i != std::size(Priorities); ++i) + std::iota(HelperList.begin(), HelperList.begin() + MAX_PRIORITY + 1, 0); + for (size_t i = MAX_PRIORITY + 1; i != std::size(Priorities); ++i) HelperList[i] = 0; assert(std::is_permutation(HelperList.begin(), HelperList.end(), std::begin(Priorities), std::end(Priorities)) && @@ -793,7 +792,7 @@ unsigned llvm::X86::getFeaturePriority(ProcessorFeatures Feat) { #endif switch (Feat) { -#define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY) \ +#define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY, ABI_VALUE) \ case X86::FEATURE_##ENUM: \ return PRIORITY; #include "llvm/TargetParser/X86TargetParser.def" diff --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp index b575d76e897d2..7ed8fb68f107e 100644 --- a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp +++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp @@ -1466,6 +1466,329 @@ static bool foldLibCalls(Instruction &I, TargetTransformInfo &TTI, return false; } +/// Match high part of long multiplication. +/// +/// Considering a multiply made up of high and low parts, we can split the +/// multiply into: +/// x * y == (xh*T + xl) * (yh*T + yl) +/// where xh == x>>32 and xl == x & 0xffffffff. T = 2^32. +/// This expands to +/// xh*yh*T*T + xh*yl*T + xl*yh*T + xl*yl +/// which can be drawn as +/// [ xh*yh ] +/// [ xh*yl ] +/// [ xl*yh ] +/// [ xl*yl ] +/// We are looking for the "high" half, which is xh*yh + xh*yl>>32 + xl*yh>>32 + +/// some carrys. The carry makes this difficult and there are multiple ways of +/// representing it. The ones we attempt to support here are: +/// Carry: xh*yh + carry + lowsum +/// carry = lowsum < xh*yl ? 0x1000000 : 0 +/// lowsum = xh*yl + xl*yh + (xl*yl>>32) +/// Ladder: xh*yh + c2>>32 + c3>>32 +/// c2 = xh*yl + (xl*yl>>32); c3 = c2&0xffffffff + xl*yh +/// or c2 = (xl*yh&0xffffffff) + xh*yl + (xl*yl>>32); c3 = xl*yh +/// Carry4: xh*yh + carry + crosssum>>32 + (xl*yl + crosssum&0xffffffff) >> 32 +/// crosssum = xh*yl + xl*yh +/// carry = crosssum < xh*yl ? 0x1000000 : 0 +/// Ladder4: xh*yh + (xl*yh)>>32 + (xh*yl)>>32 + low>>32; +/// low = (xl*yl)>>32 + (xl*yh)&0xffffffff + (xh*yl)&0xffffffff +/// +/// They all start by matching xh*yh + 2 or 3 other operands. The bottom of the +/// tree is xh*yh, xh*yl, xl*yh and xl*yl. +static bool foldMulHigh(Instruction &I) { + Type *Ty = I.getType(); + if (!Ty->isIntOrIntVectorTy()) + return false; + + unsigned BitWidth = Ty->getScalarSizeInBits(); + APInt LowMask = APInt::getLowBitsSet(BitWidth, BitWidth / 2); + if (BitWidth % 2 != 0) + return false; + + auto CreateMulHigh = [&](Value *X, Value *Y) { + IRBuilder<> Builder(&I); + Type *NTy = Ty->getWithNewBitWidth(BitWidth * 2); + Value *XExt = Builder.CreateZExt(X, NTy); + Value *YExt = Builder.CreateZExt(Y, NTy); + Value *Mul = Builder.CreateMul(XExt, YExt, "", /*HasNUW=*/true); + Value *High = Builder.CreateLShr(Mul, BitWidth); + Value *Res = Builder.CreateTrunc(High, Ty, "", /*HasNUW=*/true); + Res->takeName(&I); + I.replaceAllUsesWith(Res); + LLVM_DEBUG(dbgs() << "Created long multiply from parts of " << *X << " and " + << *Y << "\n"); + return true; + }; + + // Common check routines for X_lo*Y_lo and X_hi*Y_lo + auto CheckLoLo = [&](Value *XlYl, Value *X, Value *Y) { + return match(XlYl, m_c_Mul(m_And(m_Specific(X), m_SpecificInt(LowMask)), + m_And(m_Specific(Y), m_SpecificInt(LowMask)))); + }; + auto CheckHiLo = [&](Value *XhYl, Value *X, Value *Y) { + return match(XhYl, + m_c_Mul(m_LShr(m_Specific(X), m_SpecificInt(BitWidth / 2)), + m_And(m_Specific(Y), m_SpecificInt(LowMask)))); + }; + + auto FoldMulHighCarry = [&](Value *X, Value *Y, Instruction *Carry, + Instruction *B) { + // Looking for LowSum >> 32 and carry (select) + if (Carry->getOpcode() != Instruction::Select) + std::swap(Carry, B); + + // Carry = LowSum < XhYl ? 0x100000000 : 0 + Value *LowSum, *XhYl; + if (!match(Carry, + m_OneUse(m_Select( + m_OneUse(m_SpecificICmp(ICmpInst::ICMP_ULT, m_Value(LowSum), + m_Value(XhYl))), + m_SpecificInt(APInt::getOneBitSet(BitWidth, BitWidth / 2)), + m_Zero())))) + return false; + + // XhYl can be Xh*Yl or Xl*Yh + if (!CheckHiLo(XhYl, X, Y)) { + if (CheckHiLo(XhYl, Y, X)) + std::swap(X, Y); + else + return false; + } + if (XhYl->hasNUsesOrMore(3)) + return false; + + // B = LowSum >> 32 + if (!match(B, m_OneUse(m_LShr(m_Specific(LowSum), + m_SpecificInt(BitWidth / 2)))) || + LowSum->hasNUsesOrMore(3)) + return false; + + // LowSum = XhYl + XlYh + XlYl>>32 + Value *XlYh, *XlYl; + auto XlYlHi = m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2)); + if (!match(LowSum, + m_c_Add(m_Specific(XhYl), + m_OneUse(m_c_Add(m_OneUse(m_Value(XlYh)), XlYlHi)))) && + !match(LowSum, m_c_Add(m_OneUse(m_Value(XlYh)), + m_OneUse(m_c_Add(m_Specific(XhYl), XlYlHi)))) && + !match(LowSum, + m_c_Add(XlYlHi, m_OneUse(m_c_Add(m_Specific(XhYl), + m_OneUse(m_Value(XlYh))))))) + return false; + + // Check XlYl and XlYh + if (!CheckLoLo(XlYl, X, Y)) + return false; + if (!CheckHiLo(XlYh, Y, X)) + return false; + + return CreateMulHigh(X, Y); + }; + + auto FoldMulHighLadder = [&](Value *X, Value *Y, Instruction *A, + Instruction *B) { + // xh*yh + c2>>32 + c3>>32 + // c2 = xh*yl + (xl*yl>>32); c3 = c2&0xffffffff + xl*yh + // or c2 = (xl*yh&0xffffffff) + xh*yl + (xl*yl>>32); c3 = xh*yl + Value *XlYh, *XhYl, *XlYl, *C2, *C3; + // Strip off the two expected shifts. + if (!match(A, m_LShr(m_Value(C2), m_SpecificInt(BitWidth / 2))) || + !match(B, m_LShr(m_Value(C3), m_SpecificInt(BitWidth / 2)))) + return false; + + if (match(C3, m_c_Add(m_Add(m_Value(), m_Value()), m_Value()))) + std::swap(C2, C3); + // Try to match c2 = (xl*yh&0xffffffff) + xh*yl + (xl*yl>>32) + if (match(C2, + m_c_Add(m_c_Add(m_And(m_Specific(C3), m_SpecificInt(LowMask)), + m_Value(XlYh)), + m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2)))) || + match(C2, m_c_Add(m_c_Add(m_And(m_Specific(C3), m_SpecificInt(LowMask)), + m_LShr(m_Value(XlYl), + m_SpecificInt(BitWidth / 2))), + m_Value(XlYh))) || + match(C2, m_c_Add(m_c_Add(m_LShr(m_Value(XlYl), + m_SpecificInt(BitWidth / 2)), + m_Value(XlYh)), + m_And(m_Specific(C3), m_SpecificInt(LowMask))))) { + XhYl = C3; + } else { + // Match c3 = c2&0xffffffff + xl*yh + if (!match(C3, m_c_Add(m_And(m_Specific(C2), m_SpecificInt(LowMask)), + m_Value(XlYh)))) + std::swap(C2, C3); + if (!match(C3, m_c_Add(m_OneUse( + m_And(m_Specific(C2), m_SpecificInt(LowMask))), + m_Value(XlYh))) || + !C3->hasOneUse() || C2->hasNUsesOrMore(3)) + return false; + + // Match c2 = xh*yl + (xl*yl >> 32) + if (!match(C2, m_c_Add(m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2)), + m_Value(XhYl)))) + return false; + } + + // Match XhYl and XlYh - they can appear either way around. + if (!CheckHiLo(XlYh, Y, X)) + std::swap(XlYh, XhYl); + if (!CheckHiLo(XlYh, Y, X)) + return false; + if (!CheckHiLo(XhYl, X, Y)) + return false; + if (!CheckLoLo(XlYl, X, Y)) + return false; + + return CreateMulHigh(X, Y); + }; + + auto FoldMulHighLadder4 = [&](Value *X, Value *Y, Instruction *A, + Instruction *B, Instruction *C) { + /// Ladder4: xh*yh + (xl*yh)>>32 + (xh+yl)>>32 + low>>32; + /// low = (xl*yl)>>32 + (xl*yh)&0xffffffff + (xh*yl)&0xffffffff + + // Find A = Low >> 32 and B/C = XhYl>>32, XlYh>>32. + auto ShiftAdd = + m_LShr(m_Add(m_Value(), m_Value()), m_SpecificInt(BitWidth / 2)); + if (!match(A, ShiftAdd)) + std::swap(A, B); + if (!match(A, ShiftAdd)) + std::swap(A, C); + Value *Low; + if (!match(A, m_LShr(m_OneUse(m_Value(Low)), m_SpecificInt(BitWidth / 2)))) + return false; + + // Match B == XhYl>>32 and C == XlYh>>32 + Value *XhYl, *XlYh; + if (!match(B, m_LShr(m_Value(XhYl), m_SpecificInt(BitWidth / 2))) || + !match(C, m_LShr(m_Value(XlYh), m_SpecificInt(BitWidth / 2)))) + return false; + if (!CheckHiLo(XhYl, X, Y)) + std::swap(XhYl, XlYh); + if (!CheckHiLo(XhYl, X, Y) || XhYl->hasNUsesOrMore(3)) + return false; + if (!CheckHiLo(XlYh, Y, X) || XlYh->hasNUsesOrMore(3)) + return false; + + // Match Low as XlYl>>32 + XhYl&0xffffffff + XlYh&0xffffffff + Value *XlYl; + if (!match( + Low, + m_c_Add( + m_OneUse(m_c_Add( + m_OneUse(m_And(m_Specific(XhYl), m_SpecificInt(LowMask))), + m_OneUse(m_And(m_Specific(XlYh), m_SpecificInt(LowMask))))), + m_OneUse( + m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2))))) && + !match( + Low, + m_c_Add( + m_OneUse(m_c_Add( + m_OneUse(m_And(m_Specific(XhYl), m_SpecificInt(LowMask))), + m_OneUse( + m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2))))), + m_OneUse(m_And(m_Specific(XlYh), m_SpecificInt(LowMask))))) && + !match( + Low, + m_c_Add( + m_OneUse(m_c_Add( + m_OneUse(m_And(m_Specific(XlYh), m_SpecificInt(LowMask))), + m_OneUse( + m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2))))), + m_OneUse(m_And(m_Specific(XhYl), m_SpecificInt(LowMask)))))) + return false; + if (!CheckLoLo(XlYl, X, Y)) + return false; + + return CreateMulHigh(X, Y); + }; + + auto FoldMulHighCarry4 = [&](Value *X, Value *Y, Instruction *Carry, + Instruction *B, Instruction *C) { + // xh*yh + carry + crosssum>>32 + (xl*yl + crosssum&0xffffffff) >> 32 + // crosssum = xh*yl+xl*yh + // carry = crosssum < xh*yl ? 0x1000000 : 0 + if (Carry->getOpcode() != Instruction::Select) + std::swap(Carry, B); + if (Carry->getOpcode() != Instruction::Select) + std::swap(Carry, C); + + // Carry = CrossSum < XhYl ? 0x100000000 : 0 + Value *CrossSum, *XhYl; + if (!match(Carry, + m_OneUse(m_Select( + m_OneUse(m_SpecificICmp(ICmpInst::ICMP_ULT, + m_Value(CrossSum), m_Value(XhYl))), + m_SpecificInt(APInt::getOneBitSet(BitWidth, BitWidth / 2)), + m_Zero())))) + return false; + + if (!match(B, m_LShr(m_Specific(CrossSum), m_SpecificInt(BitWidth / 2)))) + std::swap(B, C); + if (!match(B, m_LShr(m_Specific(CrossSum), m_SpecificInt(BitWidth / 2)))) + return false; + + Value *XlYl, *LowAccum; + if (!match(C, m_LShr(m_Value(LowAccum), m_SpecificInt(BitWidth / 2))) || + !match(LowAccum, m_c_Add(m_OneUse(m_LShr(m_Value(XlYl), + m_SpecificInt(BitWidth / 2))), + m_OneUse(m_And(m_Specific(CrossSum), + m_SpecificInt(LowMask))))) || + LowAccum->hasNUsesOrMore(3)) + return false; + if (!CheckLoLo(XlYl, X, Y)) + return false; + + if (!CheckHiLo(XhYl, X, Y)) + std::swap(X, Y); + if (!CheckHiLo(XhYl, X, Y)) + return false; + Value *XlYh; + if (!match(CrossSum, m_c_Add(m_Specific(XhYl), m_OneUse(m_Value(XlYh)))) || + !CheckHiLo(XlYh, Y, X) || CrossSum->hasNUsesOrMore(4) || + XhYl->hasNUsesOrMore(3)) + return false; + + return CreateMulHigh(X, Y); + }; + + // X and Y are the two inputs, A, B and C are other parts of the pattern + // (crosssum>>32, carry, etc). + Value *X, *Y; + Instruction *A, *B, *C; + auto HiHi = m_OneUse(m_Mul(m_LShr(m_Value(X), m_SpecificInt(BitWidth / 2)), + m_LShr(m_Value(Y), m_SpecificInt(BitWidth / 2)))); + if ((match(&I, m_c_Add(HiHi, m_OneUse(m_Add(m_Instruction(A), + m_Instruction(B))))) || + match(&I, m_c_Add(m_Instruction(A), + m_OneUse(m_c_Add(HiHi, m_Instruction(B)))))) && + A->hasOneUse() && B->hasOneUse()) + if (FoldMulHighCarry(X, Y, A, B) || FoldMulHighLadder(X, Y, A, B)) + return true; + + if ((match(&I, m_c_Add(HiHi, m_OneUse(m_c_Add( + m_Instruction(A), + m_OneUse(m_Add(m_Instruction(B), + m_Instruction(C))))))) || + match(&I, m_c_Add(m_Instruction(A), + m_OneUse(m_c_Add( + HiHi, m_OneUse(m_Add(m_Instruction(B), + m_Instruction(C))))))) || + match(&I, m_c_Add(m_Instruction(A), + m_OneUse(m_c_Add( + m_Instruction(B), + m_OneUse(m_c_Add(HiHi, m_Instruction(C))))))) || + match(&I, + m_c_Add(m_OneUse(m_c_Add(HiHi, m_Instruction(A))), + m_OneUse(m_Add(m_Instruction(B), m_Instruction(C)))))) && + A->hasOneUse() && B->hasOneUse() && C->hasOneUse()) + return FoldMulHighCarry4(X, Y, A, B, C) || + FoldMulHighLadder4(X, Y, A, B, C); + + return false; +} + /// This is the entry point for folds that could be implemented in regular /// InstCombine, but they are separated because they are not expected to /// occur frequently and/or have more than a constant-length pattern match. @@ -1495,6 +1818,7 @@ static bool foldUnusualPatterns(Function &F, DominatorTree &DT, MadeChange |= foldConsecutiveLoads(I, DL, TTI, AA, DT); MadeChange |= foldPatternedLoads(I, DL); MadeChange |= foldICmpOrChain(I, DL, TTI, AA, DT); + MadeChange |= foldMulHigh(I); // NOTE: This function introduces erasing of the instruction `I`, so it // needs to be called at the end of this sequence, otherwise we may make // bugs. diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 8e4edefec42fd..743c4f574e131 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -3077,6 +3077,11 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { } case Intrinsic::ptrauth_auth: case Intrinsic::ptrauth_resign: { + // We don't support this optimization on intrinsic calls with deactivation + // symbols, which are represented using operand bundles. + if (II->hasOperandBundles()) + break; + // (sign|resign) + (auth|resign) can be folded by omitting the middle // sign+auth component if the key and discriminator match. bool NeedSign = II->getIntrinsicID() == Intrinsic::ptrauth_resign; @@ -3088,6 +3093,11 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { // whatever we replace this sequence with. Value *AuthKey = nullptr, *AuthDisc = nullptr, *BasePtr; if (const auto *CI = dyn_cast(Ptr)) { + // We don't support this optimization on intrinsic calls with deactivation + // symbols, which are represented using operand bundles. + if (CI->hasOperandBundles()) + break; + BasePtr = CI->getArgOperand(0); if (CI->getIntrinsicID() == Intrinsic::ptrauth_sign) { if (CI->getArgOperand(1) != Key || CI->getArgOperand(2) != Disc) @@ -3110,9 +3120,10 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { if (NeedSign && isa(II->getArgOperand(4))) { auto *SignKey = cast(II->getArgOperand(3)); auto *SignDisc = cast(II->getArgOperand(4)); - auto *SignAddrDisc = ConstantPointerNull::get(Builder.getPtrTy()); + auto *Null = ConstantPointerNull::get(Builder.getPtrTy()); auto *NewCPA = ConstantPtrAuth::get(CPA->getPointer(), SignKey, - SignDisc, SignAddrDisc); + SignDisc, /*AddrDisc=*/Null, + /*DeactivationSymbol=*/Null); replaceInstUsesWith( *II, ConstantExpr::getPointerCast(NewCPA, II->getType())); return eraseInstFromFunction(*II); @@ -4005,6 +4016,27 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { } break; } + case Intrinsic::experimental_get_vector_length: { + // get.vector.length(Cnt, MaxLanes) --> Cnt when Cnt <= MaxLanes + unsigned BitWidth = + std::max(II->getArgOperand(0)->getType()->getScalarSizeInBits(), + II->getType()->getScalarSizeInBits()); + ConstantRange Cnt = + computeConstantRangeIncludingKnownBits(II->getArgOperand(0), false, + SQ.getWithInstruction(II)) + .zextOrTrunc(BitWidth); + ConstantRange MaxLanes = cast(II->getArgOperand(1)) + ->getValue() + .zextOrTrunc(Cnt.getBitWidth()); + if (cast(II->getArgOperand(2))->isOne()) + MaxLanes = MaxLanes.multiply( + getVScaleRange(II->getFunction(), Cnt.getBitWidth())); + + if (Cnt.icmp(CmpInst::ICMP_ULE, MaxLanes)) + return replaceInstUsesWith( + *II, Builder.CreateZExtOrTrunc(II->getArgOperand(0), II->getType())); + return nullptr; + } default: { // Handle target specific intrinsics std::optional V = targetInstCombineIntrinsic(*II); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index cf6e7315114dc..33eee8e059486 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -5886,6 +5886,12 @@ static void collectOffsetOp(Value *V, SmallVectorImpl &Offsets, Offsets.emplace_back(Instruction::Xor, Inst->getOperand(1)); Offsets.emplace_back(Instruction::Xor, Inst->getOperand(0)); break; + case Instruction::Shl: + if (Inst->hasNoSignedWrap()) + Offsets.emplace_back(Instruction::AShr, Inst->getOperand(1)); + if (Inst->hasNoUnsignedWrap()) + Offsets.emplace_back(Instruction::LShr, Inst->getOperand(1)); + break; case Instruction::Select: if (AllowRecursion) { collectOffsetOp(Inst->getOperand(1), Offsets, /*AllowRecursion=*/false); @@ -5942,9 +5948,31 @@ static Instruction *foldICmpEqualityWithOffset(ICmpInst &I, collectOffsetOp(Op1, OffsetOps, /*AllowRecursion=*/true); auto ApplyOffsetImpl = [&](Value *V, unsigned BinOpc, Value *RHS) -> Value * { + switch (BinOpc) { + // V = shl nsw X, RHS => X = ashr V, RHS + case Instruction::AShr: { + const APInt *CV, *CRHS; + if (!(match(V, m_APInt(CV)) && match(RHS, m_APInt(CRHS)) && + CV->ashr(*CRHS).shl(*CRHS) == *CV) && + !match(V, m_NSWShl(m_Value(), m_Specific(RHS)))) + return nullptr; + break; + } + // V = shl nuw X, RHS => X = lshr V, RHS + case Instruction::LShr: { + const APInt *CV, *CRHS; + if (!(match(V, m_APInt(CV)) && match(RHS, m_APInt(CRHS)) && + CV->lshr(*CRHS).shl(*CRHS) == *CV) && + !match(V, m_NUWShl(m_Value(), m_Specific(RHS)))) + return nullptr; + break; + } + default: + break; + } + Value *Simplified = simplifyBinOp(BinOpc, V, RHS, SQ); - // Avoid infinite loops by checking if RHS is an identity for the BinOp. - if (!Simplified || Simplified == V) + if (!Simplified) return nullptr; // Reject constant expressions as they don't simplify things. if (isa(Simplified) && !match(Simplified, m_ImmConstant())) diff --git a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp index 9239ae8741afb..b5a8f79e26436 100644 --- a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp +++ b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp @@ -178,6 +178,8 @@ getRuntimeCallName(const BoundsCheckingPass::Options::Runtime &Opts) { Name += "_minimal"; if (!Opts.MayReturn) Name += "_abort"; + else if (Opts.HandlerPreserveAllRegs) + Name += "_preserve"; return Name; } @@ -267,7 +269,10 @@ static bool addBoundsChecking(Function &F, TargetLibraryInfo &TLI, TrapCall->setDoesNotReturn(); IRB.CreateUnreachable(); } - + // The preserve-all logic is somewhat duplicated in CGExpr.cpp for + // local-bounds. Make sure to change that too. + if (Opts.Rt && Opts.Rt->HandlerPreserveAllRegs && MayReturn) + TrapCall->setCallingConv(CallingConv::PreserveAll); if (!MayReturn && SingleTrapBB && !DebugTrapBB) ReuseTrapBB = TrapBB; diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp index b46527eb1057b..400cb1ecb5e03 100644 --- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -198,195 +198,267 @@ static bool ConvertToSInt(const APFloat &APF, int64_t &IntVal) { return true; } -// Ensure we stay within the bounds of fp values that can be represented as -// integers without gaps, which are 2^24 and 2^53 for IEEE-754 single and double -// precision respectively (both on negative and positive side). -static bool isRepresentableAsExactInteger(ConstantFP *FPVal, int64_t IntVal) { - const auto &InitValueFltSema = FPVal->getValueAPF().getSemantics(); - if (!APFloat::isIEEELikeFP(InitValueFltSema)) +/// Ensure we stay within the bounds of fp values that can be represented as +/// integers without gaps, which are 2^24 and 2^53 for IEEE-754 single and +/// double precision respectively (both on negative and positive side). +static bool isRepresentableAsExactInteger(const APFloat &FPVal, + int64_t IntVal) { + const auto &FltSema = FPVal.getSemantics(); + if (!APFloat::isIEEELikeFP(FltSema)) return false; + return isUIntN(APFloat::semanticsPrecision(FltSema), AbsoluteValue(IntVal)); +} + +/// Represents a floating-point induction variable pattern that may be +/// convertible to integer form. +struct FloatingPointIV { + APFloat InitValue; + APFloat IncrValue; + APFloat ExitValue; + FCmpInst *Compare; + BinaryOperator *Add; + + FloatingPointIV(APFloat Init, APFloat Incr, APFloat Exit, FCmpInst *Compare, + BinaryOperator *Add) + : InitValue(std::move(Init)), IncrValue(std::move(Incr)), + ExitValue(std::move(Exit)), Compare(Compare), Add(Add) {} +}; - return isUIntN(APFloat::semanticsPrecision(InitValueFltSema), - AbsoluteValue(IntVal)); +/// Represents the integer values for a converted IV. +struct IntegerIV { + int64_t InitValue; + int64_t IncrValue; + int64_t ExitValue; + CmpInst::Predicate NewPred; +}; + +static CmpInst::Predicate getIntegerPredicate(CmpInst::Predicate FPPred) { + switch (FPPred) { + case CmpInst::FCMP_OEQ: + case CmpInst::FCMP_UEQ: + return CmpInst::ICMP_EQ; + case CmpInst::FCMP_ONE: + case CmpInst::FCMP_UNE: + return CmpInst::ICMP_NE; + case CmpInst::FCMP_OGT: + case CmpInst::FCMP_UGT: + return CmpInst::ICMP_SGT; + case CmpInst::FCMP_OGE: + case CmpInst::FCMP_UGE: + return CmpInst::ICMP_SGE; + case CmpInst::FCMP_OLT: + case CmpInst::FCMP_ULT: + return CmpInst::ICMP_SLT; + case CmpInst::FCMP_OLE: + case CmpInst::FCMP_ULE: + return CmpInst::ICMP_SLE; + default: + return CmpInst::BAD_ICMP_PREDICATE; + } } -/// If the loop has floating induction variable then insert corresponding -/// integer induction variable if possible. -/// For example, -/// for(double i = 0; i < 10000; ++i) -/// bar(i) -/// is converted into -/// for(int i = 0; i < 10000; ++i) -/// bar((double)i); -bool IndVarSimplify::handleFloatingPointIV(Loop *L, PHINode *PN) { +/// Analyze a PN to determine whether it represents a simple floating-point +/// induction variable, with constant fp init, increment, and exit values. +/// +/// Returns a FloatingPointIV struct if matched, std::nullopt otherwise. +static std::optional +maybeFloatingPointRecurrence(Loop *L, PHINode *PN) { + // Identify incoming and backedge for the PN. unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0)); - unsigned BackEdge = IncomingEdge^1; + unsigned BackEdge = IncomingEdge ^ 1; // Check incoming value. auto *InitValueVal = dyn_cast(PN->getIncomingValue(IncomingEdge)); - - int64_t InitValue; - if (!InitValueVal || !ConvertToSInt(InitValueVal->getValueAPF(), InitValue) || - !isRepresentableAsExactInteger(InitValueVal, InitValue)) - return false; + if (!InitValueVal) + return std::nullopt; // Check IV increment. Reject this PN if increment operation is not // an add or increment value can not be represented by an integer. auto *Incr = dyn_cast(PN->getIncomingValue(BackEdge)); - if (Incr == nullptr || Incr->getOpcode() != Instruction::FAdd) return false; + if (!Incr || Incr->getOpcode() != Instruction::FAdd) + return std::nullopt; // If this is not an add of the PHI with a constantfp, or if the constant fp // is not an integer, bail out. - ConstantFP *IncValueVal = dyn_cast(Incr->getOperand(1)); - int64_t IncValue; - if (IncValueVal == nullptr || Incr->getOperand(0) != PN || - !ConvertToSInt(IncValueVal->getValueAPF(), IncValue)) - return false; + auto *IncValueVal = dyn_cast(Incr->getOperand(1)); + if (!IncValueVal || Incr->getOperand(0) != PN) + return std::nullopt; // Check Incr uses. One user is PN and the other user is an exit condition // used by the conditional terminator. - Value::user_iterator IncrUse = Incr->user_begin(); - Instruction *U1 = cast(*IncrUse++); - if (IncrUse == Incr->user_end()) return false; - Instruction *U2 = cast(*IncrUse++); - if (IncrUse != Incr->user_end()) return false; + // TODO: Should relax this, so as to allow any `fpext` that may occur. + if (!Incr->hasNUses(2)) + return std::nullopt; // Find exit condition, which is an fcmp. If it doesn't exist, or if it isn't // only used by a branch, we can't transform it. - FCmpInst *Compare = dyn_cast(U1); - if (!Compare) - Compare = dyn_cast(U2); - if (!Compare || !Compare->hasOneUse() || - !isa(Compare->user_back())) - return false; + auto It = llvm::find_if(Incr->users(), + [](const User *U) { return isa(U); }); + if (It == Incr->users().end()) + return std::nullopt; - BranchInst *TheBr = cast(Compare->user_back()); + FCmpInst *Compare = cast(*It); + if (!Compare->hasOneUse()) + return std::nullopt; // We need to verify that the branch actually controls the iteration count // of the loop. If not, the new IV can overflow and no one will notice. // The branch block must be in the loop and one of the successors must be out // of the loop. - assert(TheBr->isConditional() && "Can't use fcmp if not conditional"); - if (!L->contains(TheBr->getParent()) || - (L->contains(TheBr->getSuccessor(0)) && - L->contains(TheBr->getSuccessor(1)))) - return false; + auto *BI = dyn_cast(Compare->user_back()); + if (!BI) + return std::nullopt; + + assert(BI->isConditional() && "Can't use fcmp if not conditional"); + if (!L->contains(BI->getParent()) || + (L->contains(BI->getSuccessor(0)) && L->contains(BI->getSuccessor(1)))) + return std::nullopt; // If it isn't a comparison with an integer-as-fp (the exit value), we can't // transform it. - ConstantFP *ExitValueVal = dyn_cast(Compare->getOperand(1)); - int64_t ExitValue; - if (ExitValueVal == nullptr || - !ConvertToSInt(ExitValueVal->getValueAPF(), ExitValue) || - !isRepresentableAsExactInteger(ExitValueVal, ExitValue)) - return false; + auto *ExitValueVal = dyn_cast(Compare->getOperand(1)); + if (!ExitValueVal) + return std::nullopt; - // Find new predicate for integer comparison. - CmpInst::Predicate NewPred = CmpInst::BAD_ICMP_PREDICATE; - switch (Compare->getPredicate()) { - default: return false; // Unknown comparison. - case CmpInst::FCMP_OEQ: - case CmpInst::FCMP_UEQ: NewPred = CmpInst::ICMP_EQ; break; - case CmpInst::FCMP_ONE: - case CmpInst::FCMP_UNE: NewPred = CmpInst::ICMP_NE; break; - case CmpInst::FCMP_OGT: - case CmpInst::FCMP_UGT: NewPred = CmpInst::ICMP_SGT; break; - case CmpInst::FCMP_OGE: - case CmpInst::FCMP_UGE: NewPred = CmpInst::ICMP_SGE; break; - case CmpInst::FCMP_OLT: - case CmpInst::FCMP_ULT: NewPred = CmpInst::ICMP_SLT; break; - case CmpInst::FCMP_OLE: - case CmpInst::FCMP_ULE: NewPred = CmpInst::ICMP_SLE; break; - } + return FloatingPointIV(InitValueVal->getValueAPF(), + IncValueVal->getValueAPF(), + ExitValueVal->getValueAPF(), Compare, Incr); +} + +/// Ensure that the floating-point IV can be converted to a semantics-preserving +/// signed 32-bit integer IV. +/// +/// Returns a IntegerIV struct if possible, std::nullopt otherwise. +static std::optional +tryConvertToIntegerIV(const FloatingPointIV &FPIV) { + // Convert floating-point predicate to integer. + auto NewPred = getIntegerPredicate(FPIV.Compare->getPredicate()); + if (NewPred == CmpInst::BAD_ICMP_PREDICATE) + return std::nullopt; + + // Convert APFloat values to signed integers. + int64_t InitValue, IncrValue, ExitValue; + if (!ConvertToSInt(FPIV.InitValue, InitValue) || + !ConvertToSInt(FPIV.IncrValue, IncrValue) || + !ConvertToSInt(FPIV.ExitValue, ExitValue)) + return std::nullopt; + + // Bail out if integers cannot be represented exactly. + if (!isRepresentableAsExactInteger(FPIV.InitValue, InitValue) || + !isRepresentableAsExactInteger(FPIV.ExitValue, ExitValue)) + return std::nullopt; // We convert the floating point induction variable to a signed i32 value if - // we can. This is only safe if the comparison will not overflow in a way - // that won't be trapped by the integer equivalent operations. Check for this - // now. + // we can. This is only safe if the comparison will not overflow in a way that + // won't be trapped by the integer equivalent operations. Check for this now. // TODO: We could use i64 if it is native and the range requires it. // The start/stride/exit values must all fit in signed i32. - if (!isInt<32>(InitValue) || !isInt<32>(IncValue) || !isInt<32>(ExitValue)) - return false; + if (!isInt<32>(InitValue) || !isInt<32>(IncrValue) || !isInt<32>(ExitValue)) + return std::nullopt; // If not actually striding (add x, 0.0), avoid touching the code. - if (IncValue == 0) - return false; + if (IncrValue == 0) + return std::nullopt; // Positive and negative strides have different safety conditions. - if (IncValue > 0) { + if (IncrValue > 0) { // If we have a positive stride, we require the init to be less than the // exit value. if (InitValue >= ExitValue) - return false; + return std::nullopt; - uint32_t Range = uint32_t(ExitValue-InitValue); + uint32_t Range = uint32_t(ExitValue - InitValue); // Check for infinite loop, either: // while (i <= Exit) or until (i > Exit) if (NewPred == CmpInst::ICMP_SLE || NewPred == CmpInst::ICMP_SGT) { - if (++Range == 0) return false; // Range overflows. + if (++Range == 0) + return std::nullopt; // Range overflows. } - unsigned Leftover = Range % uint32_t(IncValue); + unsigned Leftover = Range % uint32_t(IncrValue); // If this is an equality comparison, we require that the strided value // exactly land on the exit value, otherwise the IV condition will wrap // around and do things the fp IV wouldn't. if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) && Leftover != 0) - return false; + return std::nullopt; // If the stride would wrap around the i32 before exiting, we can't // transform the IV. - if (Leftover != 0 && int32_t(ExitValue+IncValue) < ExitValue) - return false; + if (Leftover != 0 && int32_t(ExitValue + IncrValue) < ExitValue) + return std::nullopt; } else { // If we have a negative stride, we require the init to be greater than the // exit value. if (InitValue <= ExitValue) - return false; + return std::nullopt; - uint32_t Range = uint32_t(InitValue-ExitValue); + uint32_t Range = uint32_t(InitValue - ExitValue); // Check for infinite loop, either: // while (i >= Exit) or until (i < Exit) if (NewPred == CmpInst::ICMP_SGE || NewPred == CmpInst::ICMP_SLT) { - if (++Range == 0) return false; // Range overflows. + if (++Range == 0) + return std::nullopt; // Range overflows. } - unsigned Leftover = Range % uint32_t(-IncValue); + unsigned Leftover = Range % uint32_t(-IncrValue); // If this is an equality comparison, we require that the strided value // exactly land on the exit value, otherwise the IV condition will wrap // around and do things the fp IV wouldn't. if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) && Leftover != 0) - return false; + return std::nullopt; // If the stride would wrap around the i32 before exiting, we can't // transform the IV. - if (Leftover != 0 && int32_t(ExitValue+IncValue) > ExitValue) - return false; + if (Leftover != 0 && int32_t(ExitValue + IncrValue) > ExitValue) + return std::nullopt; } + return IntegerIV{InitValue, IncrValue, ExitValue, NewPred}; +} + +/// Rewrite the floating-point IV as an integer IV. +static void canonicalizeToIntegerIV(Loop *L, PHINode *PN, + const FloatingPointIV &FPIV, + const IntegerIV &IIV, + const TargetLibraryInfo *TLI, + std::unique_ptr &MSSAU) { + unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0)); + unsigned BackEdge = IncomingEdge ^ 1; + IntegerType *Int32Ty = Type::getInt32Ty(PN->getContext()); + auto *Incr = cast(PN->getIncomingValue(BackEdge)); + auto *BI = cast(FPIV.Compare->user_back()); + + LLVM_DEBUG(dbgs() << "INDVARS: Rewriting floating-point IV to integer IV:\n" + << " Init: " << IIV.InitValue << "\n" + << " Incr: " << IIV.IncrValue << "\n" + << " Exit: " << IIV.ExitValue << "\n" + << " Pred: " << CmpInst::getPredicateName(IIV.NewPred) + << "\n" + << " Original PN: " << *PN << "\n"); // Insert new integer induction variable. PHINode *NewPHI = PHINode::Create(Int32Ty, 2, PN->getName() + ".int", PN->getIterator()); - NewPHI->addIncoming(ConstantInt::getSigned(Int32Ty, InitValue), + NewPHI->addIncoming(ConstantInt::getSigned(Int32Ty, IIV.InitValue), PN->getIncomingBlock(IncomingEdge)); NewPHI->setDebugLoc(PN->getDebugLoc()); Instruction *NewAdd = BinaryOperator::CreateAdd( - NewPHI, ConstantInt::getSigned(Int32Ty, IncValue), + NewPHI, ConstantInt::getSigned(Int32Ty, IIV.IncrValue), Incr->getName() + ".int", Incr->getIterator()); NewAdd->setDebugLoc(Incr->getDebugLoc()); NewPHI->addIncoming(NewAdd, PN->getIncomingBlock(BackEdge)); ICmpInst *NewCompare = new ICmpInst( - TheBr->getIterator(), NewPred, NewAdd, - ConstantInt::getSigned(Int32Ty, ExitValue), Compare->getName()); - NewCompare->setDebugLoc(Compare->getDebugLoc()); + BI->getIterator(), IIV.NewPred, NewAdd, + ConstantInt::getSigned(Int32Ty, IIV.ExitValue), FPIV.Compare->getName()); + NewCompare->setDebugLoc(FPIV.Compare->getDebugLoc()); // In the following deletions, PN may become dead and may be deleted. // Use a WeakTrackingVH to observe whether this happens. @@ -394,9 +466,9 @@ bool IndVarSimplify::handleFloatingPointIV(Loop *L, PHINode *PN) { // Delete the old floating point exit comparison. The branch starts using the // new comparison. - NewCompare->takeName(Compare); - Compare->replaceAllUsesWith(NewCompare); - RecursivelyDeleteTriviallyDeadInstructions(Compare, TLI, MSSAU.get()); + NewCompare->takeName(FPIV.Compare); + FPIV.Compare->replaceAllUsesWith(NewCompare); + RecursivelyDeleteTriviallyDeadInstructions(FPIV.Compare, TLI, MSSAU.get()); // Delete the old floating point increment. Incr->replaceAllUsesWith(PoisonValue::get(Incr->getType())); @@ -416,6 +488,28 @@ bool IndVarSimplify::handleFloatingPointIV(Loop *L, PHINode *PN) { PN->replaceAllUsesWith(Conv); RecursivelyDeleteTriviallyDeadInstructions(PN, TLI, MSSAU.get()); } +} + +/// If the loop has a floating induction variable, then insert corresponding +/// integer induction variable if possible. For example, the following: +/// for(double i = 0; i < 10000; ++i) +/// bar(i) +/// is converted into +/// for(int i = 0; i < 10000; ++i) +/// bar((double)i); +bool IndVarSimplify::handleFloatingPointIV(Loop *L, PHINode *PN) { + // See if the PN matches a floating-point IV pattern. + auto FPIV = maybeFloatingPointRecurrence(L, PN); + if (!FPIV) + return false; + + // Can we safely convert the floating-point values to integer ones? + auto IIV = tryConvertToIntegerIV(*FPIV); + if (!IIV) + return false; + + // Perform the rewriting. + canonicalizeToIntegerIV(L, PN, *FPIV, *IIV, TLI, MSSAU); return true; } @@ -1855,7 +1949,7 @@ bool IndVarSimplify::predicateLoopExits(Loop *L, SCEVExpander &Rewriter) { // is that enough for *all* side effects? bool HasThreadLocalSideEffects = false; for (BasicBlock *BB : L->blocks()) - for (auto &I : *BB) + for (auto &I : *BB) { // TODO:isGuaranteedToTransfer if (I.mayHaveSideEffects()) { if (!LoopPredicationTraps) @@ -1873,6 +1967,18 @@ bool IndVarSimplify::predicateLoopExits(Loop *L, SCEVExpander &Rewriter) { } } + // Skip if the loop has tokens referenced outside the loop to avoid + // changing convergence behavior. + if (I.getType()->isTokenTy()) { + for (User *U : I.users()) { + Instruction *UserInst = dyn_cast(U); + if (UserInst && !L->contains(UserInst)) { + return false; + } + } + } + } + bool Changed = false; // Finally, do the actual predication for all predicatable blocks. A couple // of notes here: diff --git a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp index e5399bdd767e2..e94ad1999e32a 100644 --- a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp @@ -12,16 +12,17 @@ // effective in simplifying arithmetic statements derived from an unrolled loop. // It can also simplify the logic of SeparateConstOffsetFromGEP. // -// There are many optimizations we can perform in the domain of SLSR. -// We look for strength reduction candidates in the following forms: +// There are many optimizations we can perform in the domain of SLSR. This file +// for now contains only an initial step. Specifically, we look for strength +// reduction candidates in the following forms: // -// Form Add: B + i * S -// Form Mul: (B + i) * S -// Form GEP: &B[i * S] +// Form 1: B + i * S +// Form 2: (B + i) * S +// Form 3: &B[i * S] // // where S is an integer variable, and i is a constant integer. If we found two // candidates S1 and S2 in the same form and S1 dominates S2, we may rewrite S2 -// in a simpler way with respect to S1 (index delta). For example, +// in a simpler way with respect to S1. For example, // // S1: X = B + i * S // S2: Y = B + i' * S => X + (i' - i) * S @@ -34,29 +35,8 @@ // // Note: (i' - i) * S is folded to the extent possible. // -// For Add and GEP forms, we can also rewrite a candidate in a simpler way -// with respect to other dominating candidates if their B or S are different -// but other parts are the same. For example, -// -// Base Delta: -// S1: X = B + i * S -// S2: Y = B' + i * S => X + (B' - B) -// -// S1: X = &B [i * S] -// S2: Y = &B'[i * S] => X + (B' - B) -// -// Stride Delta: -// S1: X = B + i * S -// S2: Y = B + i * S' => X + i * (S' - S) -// -// S1: X = &B[i * S] -// S2: Y = &B[i * S'] => X + i * (S' - S) -// -// PS: Stride delta rewrite on Mul form is usually non-profitable, and Base -// delta rewrite sometimes is profitable, so we do not support them on Mul. -// // This rewriting is in general a good idea. The code patterns we focus on -// usually come from loop unrolling, so the delta is likely the same +// usually come from loop unrolling, so (i' - i) * S is likely the same // across iterations and can be reused. When that happens, the optimized form // takes only one add starting from the second iteration. // @@ -67,14 +47,19 @@ // TODO: // // - Floating point arithmetics when fast math is enabled. +// +// - SLSR may decrease ILP at the architecture level. Targets that are very +// sensitive to ILP may want to disable it. Having SLSR to consider ILP is +// left as future work. +// +// - When (i' - i) is constant but i and i' are not, we could still perform +// SLSR. #include "llvm/Transforms/Scalar/StraightLineStrengthReduce.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/DepthFirstIterator.h" -#include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Analysis/ScalarEvolution.h" -#include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constants.h" @@ -101,19 +86,16 @@ #include #include #include -#include #include using namespace llvm; using namespace PatternMatch; -#define DEBUG_TYPE "slsr" - static const unsigned UnknownAddressSpace = std::numeric_limits::max(); DEBUG_COUNTER(StraightLineStrengthReduceCounter, "slsr-counter", - "Controls whether rewriteCandidate is executed."); + "Controls whether rewriteCandidateWithBasis is executed."); namespace { @@ -160,23 +142,15 @@ class StraightLineStrengthReduce { GEP, // &B[..][i * S][..] }; - enum DKind { - InvalidDelta, // reserved for the default constructor - IndexDelta, // Delta is a constant from Index - BaseDelta, // Delta is a constant or variable from Base - StrideDelta, // Delta is a constant or variable from Stride - }; - Candidate() = default; Candidate(Kind CT, const SCEV *B, ConstantInt *Idx, Value *S, - Instruction *I, const SCEV *StrideSCEV) - : CandidateKind(CT), Base(B), Index(Idx), Stride(S), Ins(I), - StrideSCEV(StrideSCEV) {} + Instruction *I) + : CandidateKind(CT), Base(B), Index(Idx), Stride(S), Ins(I) {} Kind CandidateKind = Invalid; const SCEV *Base = nullptr; - // TODO: Swap Index and Stride's name. + // Note that Index and Stride of a GEP candidate do not necessarily have the // same integer type. In that case, during rewriting, Stride will be // sign-extended or truncated to Index's type. @@ -203,164 +177,22 @@ class StraightLineStrengthReduce { // Points to the immediate basis of this candidate, or nullptr if we cannot // find any basis for this candidate. Candidate *Basis = nullptr; - - DKind DeltaKind = InvalidDelta; - - // Store SCEV of Stride to compute delta from different strides - const SCEV *StrideSCEV = nullptr; - - // Points to (Y - X) that will be used to rewrite this candidate. - Value *Delta = nullptr; - - /// Cost model: Evaluate the computational efficiency of the candidate. - /// - /// Efficiency levels (higher is better): - /// ZeroInst (5) - [Variable] or [Const] - /// OneInstOneVar (4) - [Variable + Const] or [Variable * Const] - /// OneInstTwoVar (3) - [Variable + Variable] or [Variable * Variable] - /// TwoInstOneVar (2) - [Const + Const * Variable] - /// TwoInstTwoVar (1) - [Variable + Const * Variable] - enum EfficiencyLevel : unsigned { - Unknown = 0, - TwoInstTwoVar = 1, - TwoInstOneVar = 2, - OneInstTwoVar = 3, - OneInstOneVar = 4, - ZeroInst = 5 - }; - - static EfficiencyLevel - getComputationEfficiency(Kind CandidateKind, const ConstantInt *Index, - const Value *Stride, const SCEV *Base = nullptr) { - bool IsConstantBase = false; - bool IsZeroBase = false; - // When evaluating the efficiency of a rewrite, if the Base's SCEV is - // not available, conservatively assume the base is not constant. - if (auto *ConstBase = dyn_cast_or_null(Base)) { - IsConstantBase = true; - IsZeroBase = ConstBase->getValue()->isZero(); - } - - bool IsConstantStride = isa(Stride); - bool IsZeroStride = - IsConstantStride && cast(Stride)->isZero(); - // All constants - if (IsConstantBase && IsConstantStride) - return ZeroInst; - - // (Base + Index) * Stride - if (CandidateKind == Mul) { - if (IsZeroStride) - return ZeroInst; - if (Index->isZero()) - return (IsConstantStride || IsConstantBase) ? OneInstOneVar - : OneInstTwoVar; - - if (IsConstantBase) - return IsZeroBase && (Index->isOne() || Index->isMinusOne()) - ? ZeroInst - : OneInstOneVar; - - if (IsConstantStride) { - auto *CI = cast(Stride); - return (CI->isOne() || CI->isMinusOne()) ? OneInstOneVar - : TwoInstOneVar; - } - return TwoInstTwoVar; - } - - // Base + Index * Stride - assert(CandidateKind == Add || CandidateKind == GEP); - if (Index->isZero() || IsZeroStride) - return ZeroInst; - - bool IsSimpleIndex = Index->isOne() || Index->isMinusOne(); - - if (IsConstantBase) - return IsZeroBase ? (IsSimpleIndex ? ZeroInst : OneInstOneVar) - : (IsSimpleIndex ? OneInstOneVar : TwoInstOneVar); - - if (IsConstantStride) - return IsZeroStride ? ZeroInst : OneInstOneVar; - - if (IsSimpleIndex) - return OneInstTwoVar; - - return TwoInstTwoVar; - } - - // Evaluate if the given delta is profitable to rewrite this candidate. - bool isProfitableRewrite(const Value *Delta, const DKind DeltaKind) const { - // This function cannot accurately evaluate the profit of whole expression - // with context. A candidate (B + I * S) cannot express whether this - // instruction needs to compute on its own (I * S), which may be shared - // with other candidates or may need instructions to compute. - // If the rewritten form has the same strength, still rewrite to - // (X + Delta) since it may expose more CSE opportunities on Delta, as - // unrolled loops usually have identical Delta for each unrolled body. - // - // Note, this function should only be used on Index Delta rewrite. - // Base and Stride delta need context info to evaluate the register - // pressure impact from variable delta. - return getComputationEfficiency(CandidateKind, Index, Stride, Base) <= - getRewriteEfficiency(Delta, DeltaKind); - } - - // Evaluate the rewrite efficiency of this candidate with its Basis - EfficiencyLevel getRewriteEfficiency() const { - return Basis ? getRewriteEfficiency(Delta, DeltaKind) : Unknown; - } - - // Evaluate the rewrite efficiency of this candidate with a given delta - EfficiencyLevel getRewriteEfficiency(const Value *Delta, - const DKind DeltaKind) const { - switch (DeltaKind) { - case BaseDelta: // [X + Delta] - return getComputationEfficiency( - CandidateKind, - ConstantInt::get(cast(Delta->getType()), 1), Delta); - case StrideDelta: // [X + Index * Delta] - return getComputationEfficiency(CandidateKind, Index, Delta); - case IndexDelta: // [X + Delta * Stride] - return getComputationEfficiency(CandidateKind, cast(Delta), - Stride); - default: - return Unknown; - } - } - - bool isHighEfficiency() const { - return getComputationEfficiency(CandidateKind, Index, Stride, Base) >= - OneInstOneVar; - } - - // Verify that this candidate has valid delta components relative to the - // basis - bool hasValidDelta(const Candidate &Basis) const { - switch (DeltaKind) { - case IndexDelta: - // Index differs, Base and Stride must match - return Base == Basis.Base && StrideSCEV == Basis.StrideSCEV; - case StrideDelta: - // Stride differs, Base and Index must match - return Base == Basis.Base && Index == Basis.Index; - case BaseDelta: - // Base differs, Stride and Index must match - return StrideSCEV == Basis.StrideSCEV && Index == Basis.Index; - default: - return false; - } - } }; bool runOnFunction(Function &F); private: - // Fetch straight-line basis for rewriting C, update C.Basis to point to it, - // and store the delta between C and its Basis in C.Delta. - void setBasisAndDeltaFor(Candidate &C); + // Returns true if Basis is a basis for C, i.e., Basis dominates C and they + // share the same base and stride. + bool isBasisFor(const Candidate &Basis, const Candidate &C); + // Returns whether the candidate can be folded into an addressing mode. - bool isFoldable(const Candidate &C, TargetTransformInfo *TTI); + bool isFoldable(const Candidate &C, TargetTransformInfo *TTI, + const DataLayout *DL); + + // Returns true if C is already in a simplest form and not worth being + // rewritten. + bool isSimplestForm(const Candidate &C); // Checks whether I is in a candidate form. If so, adds all the matching forms // to Candidates, and tries to find the immediate basis for each of them. @@ -384,6 +216,12 @@ class StraightLineStrengthReduce { // Allocate candidates and find bases for GetElementPtr instructions. void allocateCandidatesAndFindBasisForGEP(GetElementPtrInst *GEP); + // A helper function that scales Idx with ElementSize before invoking + // allocateCandidatesAndFindBasis. + void allocateCandidatesAndFindBasisForGEP(const SCEV *B, ConstantInt *Idx, + Value *S, uint64_t ElementSize, + Instruction *I); + // Adds the given form to Candidates, and finds its immediate // basis. void allocateCandidatesAndFindBasis(Candidate::Kind CT, const SCEV *B, @@ -391,7 +229,13 @@ class StraightLineStrengthReduce { Instruction *I); // Rewrites candidate C with respect to Basis. - void rewriteCandidate(const Candidate &C); + void rewriteCandidateWithBasis(const Candidate &C, const Candidate &Basis); + + // A helper function that factors ArrayIdx to a product of a stride and a + // constant index, and invokes allocateCandidatesAndFindBasis with the + // factorings. + void factorArrayIndex(Value *ArrayIdx, const SCEV *Base, uint64_t ElementSize, + GetElementPtrInst *GEP); // Emit code that computes the "bump" from Basis to C. static Value *emitBump(const Candidate &Basis, const Candidate &C, @@ -403,203 +247,12 @@ class StraightLineStrengthReduce { TargetTransformInfo *TTI = nullptr; std::list Candidates; - // Map from SCEV to instructions that represent the value, - // instructions are sorted in depth-first order. - DenseMap> SCEVToInsts; - - // Record the dependency between instructions. If C.Basis == B, we would have - // {B.Ins -> {C.Ins, ...}}. - MapVector> DependencyGraph; - - // Map between each instruction and its possible candidates. - DenseMap> RewriteCandidates; - - // All instructions that have candidates sort in topological order based on - // dependency graph, from roots to leaves. - std::vector SortedCandidateInsts; - - // Record all instructions that are already rewritten and will be removed - // later. - std::vector DeadInstructions; - - // Classify candidates against Delta kind - class CandidateDictTy { - public: - using CandsTy = SmallVector; - using BBToCandsTy = DenseMap; - - private: - // Index delta Basis must have the same (Base, StrideSCEV, Inst.Type) - using IndexDeltaKeyTy = std::tuple; - DenseMap IndexDeltaCandidates; - - // Base delta Basis must have the same (StrideSCEV, Index, Inst.Type) - using BaseDeltaKeyTy = std::tuple; - DenseMap BaseDeltaCandidates; - - // Stride delta Basis must have the same (Base, Index, Inst.Type) - using StrideDeltaKeyTy = std::tuple; - DenseMap StrideDeltaCandidates; - - public: - // TODO: Disable index delta on GEP after we completely move - // from typed GEP to PtrAdd. - const BBToCandsTy *getCandidatesWithDeltaKind(const Candidate &C, - Candidate::DKind K) const { - assert(K != Candidate::InvalidDelta); - if (K == Candidate::IndexDelta) { - IndexDeltaKeyTy IndexDeltaKey(C.Base, C.StrideSCEV, C.Ins->getType()); - auto It = IndexDeltaCandidates.find(IndexDeltaKey); - if (It != IndexDeltaCandidates.end()) - return &It->second; - } else if (K == Candidate::BaseDelta) { - BaseDeltaKeyTy BaseDeltaKey(C.StrideSCEV, C.Index, C.Ins->getType()); - auto It = BaseDeltaCandidates.find(BaseDeltaKey); - if (It != BaseDeltaCandidates.end()) - return &It->second; - } else { - assert(K == Candidate::StrideDelta); - StrideDeltaKeyTy StrideDeltaKey(C.Base, C.Index, C.Ins->getType()); - auto It = StrideDeltaCandidates.find(StrideDeltaKey); - if (It != StrideDeltaCandidates.end()) - return &It->second; - } - return nullptr; - } - - // Pointers to C must remain valid until CandidateDict is cleared. - void add(Candidate &C) { - Type *ValueType = C.Ins->getType(); - BasicBlock *BB = C.Ins->getParent(); - IndexDeltaKeyTy IndexDeltaKey(C.Base, C.StrideSCEV, ValueType); - BaseDeltaKeyTy BaseDeltaKey(C.StrideSCEV, C.Index, ValueType); - StrideDeltaKeyTy StrideDeltaKey(C.Base, C.Index, ValueType); - IndexDeltaCandidates[IndexDeltaKey][BB].push_back(&C); - BaseDeltaCandidates[BaseDeltaKey][BB].push_back(&C); - StrideDeltaCandidates[StrideDeltaKey][BB].push_back(&C); - } - // Remove all mappings from set - void clear() { - IndexDeltaCandidates.clear(); - BaseDeltaCandidates.clear(); - StrideDeltaCandidates.clear(); - } - } CandidateDict; - - const SCEV *getAndRecordSCEV(Value *V) { - auto *S = SE->getSCEV(V); - if (isa(V) && !(isa(S) || - isa(S) || isa(S))) - SCEVToInsts[S].insert(cast(V)); - - return S; - } - - // Get the nearest instruction before CI that represents the value of S, - // return nullptr if no instruction is associated with S or S is not a - // reusable expression. - Value *getNearestValueOfSCEV(const SCEV *S, const Instruction *CI) const { - if (isa(S)) - return nullptr; - - if (auto *SU = dyn_cast(S)) - return SU->getValue(); - if (auto *SC = dyn_cast(S)) - return SC->getValue(); - - auto It = SCEVToInsts.find(S); - if (It == SCEVToInsts.end()) - return nullptr; - - // Instructions are sorted in depth-first order, so search for the nearest - // instruction by walking the list in reverse order. - for (Instruction *I : reverse(It->second)) - if (DT->dominates(I, CI)) - return I; - - return nullptr; - } - - struct DeltaInfo { - Candidate *Cand; - Candidate::DKind DeltaKind; - Value *Delta; - - DeltaInfo() - : Cand(nullptr), DeltaKind(Candidate::InvalidDelta), Delta(nullptr) {} - DeltaInfo(Candidate *Cand, Candidate::DKind DeltaKind, Value *Delta) - : Cand(Cand), DeltaKind(DeltaKind), Delta(Delta) {} - operator bool() const { return Cand != nullptr; } - }; - - friend raw_ostream &operator<<(raw_ostream &OS, const DeltaInfo &DI); - - DeltaInfo compressPath(Candidate &C, Candidate *Basis) const; - - Candidate *pickRewriteCandidate(Instruction *I) const; - void sortCandidateInstructions(); - static Constant *getIndexDelta(Candidate &C, Candidate &Basis); - static bool isSimilar(Candidate &C, Candidate &Basis, Candidate::DKind K); - - // Add Basis -> C in DependencyGraph and propagate - // C.Stride and C.Delta's dependency to C - void addDependency(Candidate &C, Candidate *Basis) { - if (Basis) - DependencyGraph[Basis->Ins].emplace_back(C.Ins); - - // If any candidate of Inst has a basis, then Inst will be rewritten, - // C must be rewritten after rewriting Inst, so we need to propagate - // the dependency to C - auto PropagateDependency = [&](Instruction *Inst) { - if (auto CandsIt = RewriteCandidates.find(Inst); - CandsIt != RewriteCandidates.end() && - llvm::any_of(CandsIt->second, - [](Candidate *Cand) { return Cand->Basis; })) - DependencyGraph[Inst].emplace_back(C.Ins); - }; - - // If C has a variable delta and the delta is a candidate, - // propagate its dependency to C - if (auto *DeltaInst = dyn_cast_or_null(C.Delta)) - PropagateDependency(DeltaInst); - - // If the stride is a candidate, propagate its dependency to C - if (auto *StrideInst = dyn_cast(C.Stride)) - PropagateDependency(StrideInst); - }; + // Temporarily holds all instructions that are unlinked (but not deleted) by + // rewriteCandidateWithBasis. These instructions will be actually removed + // after all rewriting finishes. + std::vector UnlinkedInstructions; }; -inline raw_ostream &operator<<(raw_ostream &OS, - const StraightLineStrengthReduce::Candidate &C) { - OS << "Ins: " << *C.Ins << "\n Base: " << *C.Base - << "\n Index: " << *C.Index << "\n Stride: " << *C.Stride - << "\n StrideSCEV: " << *C.StrideSCEV; - if (C.Basis) - OS << "\n Delta: " << *C.Delta << "\n Basis: \n [ " << *C.Basis << " ]"; - return OS; -} - -[[maybe_unused]] LLVM_DUMP_METHOD inline raw_ostream & -operator<<(raw_ostream &OS, const StraightLineStrengthReduce::DeltaInfo &DI) { - OS << "Cand: " << *DI.Cand << "\n"; - OS << "Delta Kind: "; - switch (DI.DeltaKind) { - case StraightLineStrengthReduce::Candidate::IndexDelta: - OS << "Index"; - break; - case StraightLineStrengthReduce::Candidate::BaseDelta: - OS << "Base"; - break; - case StraightLineStrengthReduce::Candidate::StrideDelta: - OS << "Stride"; - break; - default: - break; - } - OS << "\nDelta: " << *DI.Delta; - return OS; -} - } // end anonymous namespace char StraightLineStrengthReduceLegacyPass::ID = 0; @@ -616,290 +269,17 @@ FunctionPass *llvm::createStraightLineStrengthReducePass() { return new StraightLineStrengthReduceLegacyPass(); } -// A helper function that unifies the bitwidth of A and B. -static void unifyBitWidth(APInt &A, APInt &B) { - if (A.getBitWidth() < B.getBitWidth()) - A = A.sext(B.getBitWidth()); - else if (A.getBitWidth() > B.getBitWidth()) - B = B.sext(A.getBitWidth()); -} - -Constant *StraightLineStrengthReduce::getIndexDelta(Candidate &C, - Candidate &Basis) { - APInt Idx = C.Index->getValue(), BasisIdx = Basis.Index->getValue(); - unifyBitWidth(Idx, BasisIdx); - APInt IndexDelta = Idx - BasisIdx; - IntegerType *DeltaType = - IntegerType::get(C.Ins->getContext(), IndexDelta.getBitWidth()); - return ConstantInt::get(DeltaType, IndexDelta); -} - -bool StraightLineStrengthReduce::isSimilar(Candidate &C, Candidate &Basis, - Candidate::DKind K) { - bool SameType = false; - switch (K) { - case Candidate::StrideDelta: - SameType = C.StrideSCEV->getType() == Basis.StrideSCEV->getType(); - break; - case Candidate::BaseDelta: - SameType = C.Base->getType() == Basis.Base->getType(); - break; - case Candidate::IndexDelta: - SameType = true; - break; - default:; - } - return SameType && Basis.Ins != C.Ins && - Basis.CandidateKind == C.CandidateKind; -} - -void StraightLineStrengthReduce::setBasisAndDeltaFor(Candidate &C) { - auto SearchFrom = [this, &C](const CandidateDictTy::BBToCandsTy &BBToCands, - auto IsTarget) -> bool { - // Search dominating candidates by walking the immediate-dominator chain - // from the candidate's defining block upward. Visiting blocks in this - // order ensures we prefer the closest dominating basis. - const BasicBlock *BB = C.Ins->getParent(); - while (BB) { - auto It = BBToCands.find(BB); - if (It != BBToCands.end()) - for (Candidate *Basis : reverse(It->second)) - if (IsTarget(Basis)) - return true; - - const DomTreeNode *Node = DT->getNode(BB); - if (!Node) - break; - Node = Node->getIDom(); - BB = Node ? Node->getBlock() : nullptr; - } - return false; - }; - - // Priority: - // Constant Delta from Index > Constant Delta from Base > - // Constant Delta from Stride > Variable Delta from Base or Stride - // TODO: Change the priority to align with the cost model. - - // First, look for a constant index-diff basis - if (const auto *IndexDeltaCandidates = - CandidateDict.getCandidatesWithDeltaKind(C, Candidate::IndexDelta)) { - bool FoundConstDelta = - SearchFrom(*IndexDeltaCandidates, [&](Candidate *Basis) { - if (isSimilar(C, *Basis, Candidate::IndexDelta)) { - assert(DT->dominates(Basis->Ins, C.Ins)); - auto *Delta = getIndexDelta(C, *Basis); - if (!C.isProfitableRewrite(Delta, Candidate::IndexDelta)) - return false; - C.Basis = Basis; - C.DeltaKind = Candidate::IndexDelta; - C.Delta = Delta; - LLVM_DEBUG(dbgs() << "Found delta from Index " << *C.Delta << "\n"); - return true; - } - return false; - }); - if (FoundConstDelta) - return; - } - - // No constant-index-diff basis found. look for the best possible base-diff - // or stride-diff basis - // Base/Stride diffs not supported for form (B + i) * S - if (C.CandidateKind == Candidate::Mul) - return; - - auto For = [this, &C](Candidate::DKind K) { - // return true if find a Basis with constant delta and stop searching, - // return false if did not find a Basis or the delta is not a constant - // and continue searching for a Basis with constant delta - return [K, this, &C](Candidate *Basis) -> bool { - if (!isSimilar(C, *Basis, K)) - return false; - - assert(DT->dominates(Basis->Ins, C.Ins)); - const SCEV *BasisPart = - (K == Candidate::BaseDelta) ? Basis->Base : Basis->StrideSCEV; - const SCEV *CandPart = - (K == Candidate::BaseDelta) ? C.Base : C.StrideSCEV; - const SCEV *Diff = SE->getMinusSCEV(CandPart, BasisPart); - Value *AvailableVal = getNearestValueOfSCEV(Diff, C.Ins); - if (!AvailableVal) - return false; - - // Record delta if none has been found yet, or the new delta is - // a constant that is better than the existing delta. - if (!C.Delta || isa(AvailableVal)) { - C.Delta = AvailableVal; - C.Basis = Basis; - C.DeltaKind = K; - } - return isa(C.Delta); - }; - }; - - if (const auto *BaseDeltaCandidates = - CandidateDict.getCandidatesWithDeltaKind(C, Candidate::BaseDelta)) { - if (SearchFrom(*BaseDeltaCandidates, For(Candidate::BaseDelta))) { - LLVM_DEBUG(dbgs() << "Found delta from Base: " << *C.Delta << "\n"); - return; - } - } - - if (const auto *StrideDeltaCandidates = - CandidateDict.getCandidatesWithDeltaKind(C, Candidate::StrideDelta)) { - if (SearchFrom(*StrideDeltaCandidates, For(Candidate::StrideDelta))) { - LLVM_DEBUG(dbgs() << "Found delta from Stride: " << *C.Delta << "\n"); - return; - } - } - - // If we did not find a constant delta, we might have found a variable delta - if (C.Delta) { - LLVM_DEBUG({ - dbgs() << "Found delta from "; - if (C.DeltaKind == Candidate::BaseDelta) - dbgs() << "Base: "; - else - dbgs() << "Stride: "; - dbgs() << *C.Delta << "\n"; - }); - assert(C.DeltaKind != Candidate::InvalidDelta && C.Basis); - } -} - -// Compress the path from `Basis` to the deepest Basis in the Basis chain -// to avoid non-profitable data dependency and improve ILP. -// X = A + 1 -// Y = X + 1 -// Z = Y + 1 -// -> -// X = A + 1 -// Y = A + 2 -// Z = A + 3 -// Return the delta info for C aginst the new Basis -auto StraightLineStrengthReduce::compressPath(Candidate &C, - Candidate *Basis) const - -> DeltaInfo { - if (!Basis || !Basis->Basis || C.CandidateKind == Candidate::Mul) - return {}; - Candidate *Root = Basis; - Value *NewDelta = nullptr; - auto NewKind = Candidate::InvalidDelta; - - while (Root->Basis) { - Candidate *NextRoot = Root->Basis; - if (C.Base == NextRoot->Base && C.StrideSCEV == NextRoot->StrideSCEV && - isSimilar(C, *NextRoot, Candidate::IndexDelta)) { - ConstantInt *CI = cast(getIndexDelta(C, *NextRoot)); - if (CI->isZero() || CI->isOne() || isa(C.StrideSCEV)) { - Root = NextRoot; - NewKind = Candidate::IndexDelta; - NewDelta = CI; - continue; - } - } - - const SCEV *CandPart = nullptr; - const SCEV *BasisPart = nullptr; - auto CurrKind = Candidate::InvalidDelta; - if (C.Base == NextRoot->Base && C.Index == NextRoot->Index) { - CandPart = C.StrideSCEV; - BasisPart = NextRoot->StrideSCEV; - CurrKind = Candidate::StrideDelta; - } else if (C.StrideSCEV == NextRoot->StrideSCEV && - C.Index == NextRoot->Index) { - CandPart = C.Base; - BasisPart = NextRoot->Base; - CurrKind = Candidate::BaseDelta; - } else - break; - - assert(CandPart && BasisPart); - if (!isSimilar(C, *NextRoot, CurrKind)) - break; - - if (auto DeltaVal = - dyn_cast(SE->getMinusSCEV(CandPart, BasisPart))) { - Root = NextRoot; - NewDelta = DeltaVal->getValue(); - NewKind = CurrKind; - } else - break; - } - - if (Root != Basis) { - assert(NewKind != Candidate::InvalidDelta && NewDelta); - LLVM_DEBUG(dbgs() << "Found new Basis with " << *NewDelta - << " from path compression.\n"); - return {Root, NewKind, NewDelta}; - } - - return {}; -} - -// Topologically sort candidate instructions based on their relationship in -// dependency graph. -void StraightLineStrengthReduce::sortCandidateInstructions() { - SortedCandidateInsts.clear(); - // An instruction may have multiple candidates that get different Basis - // instructions, and each candidate can get dependencies from Basis and - // Stride when Stride will also be rewritten by SLSR. Hence, an instruction - // may have multiple dependencies. Use InDegree to ensure all dependencies - // processed before processing itself. - DenseMap InDegree; - for (auto &KV : DependencyGraph) { - InDegree.try_emplace(KV.first, 0); - - for (auto *Child : KV.second) { - InDegree[Child]++; - } - } - std::queue WorkList; - DenseSet Visited; - - for (auto &KV : DependencyGraph) - if (InDegree[KV.first] == 0) - WorkList.push(KV.first); - - while (!WorkList.empty()) { - Instruction *I = WorkList.front(); - WorkList.pop(); - if (!Visited.insert(I).second) - continue; - - SortedCandidateInsts.push_back(I); - - for (auto *Next : DependencyGraph[I]) { - auto &Degree = InDegree[Next]; - if (--Degree == 0) - WorkList.push(Next); - } - } - - assert(SortedCandidateInsts.size() == DependencyGraph.size() && - "Dependency graph should not have cycles"); -} - -auto StraightLineStrengthReduce::pickRewriteCandidate(Instruction *I) const - -> Candidate * { - // Return the candidate of instruction I that has the highest profit. - auto It = RewriteCandidates.find(I); - if (It == RewriteCandidates.end()) - return nullptr; - - Candidate *BestC = nullptr; - auto BestEfficiency = Candidate::Unknown; - for (Candidate *C : reverse(It->second)) - if (C->Basis) { - auto Efficiency = C->getRewriteEfficiency(); - if (Efficiency > BestEfficiency) { - BestEfficiency = Efficiency; - BestC = C; - } - } - - return BestC; +bool StraightLineStrengthReduce::isBasisFor(const Candidate &Basis, + const Candidate &C) { + return (Basis.Ins != C.Ins && // skip the same instruction + // They must have the same type too. Basis.Base == C.Base + // doesn't guarantee their types are the same (PR23975). + Basis.Ins->getType() == C.Ins->getType() && + // Basis must dominate C in order to rewrite C with respect to Basis. + DT->dominates(Basis.Ins->getParent(), C.Ins->getParent()) && + // They share the same base, stride, and candidate kind. + Basis.Base == C.Base && Basis.Stride == C.Stride && + Basis.CandidateKind == C.CandidateKind); } static bool isGEPFoldable(GetElementPtrInst *GEP, @@ -919,7 +299,8 @@ static bool isAddFoldable(const SCEV *Base, ConstantInt *Index, Value *Stride, } bool StraightLineStrengthReduce::isFoldable(const Candidate &C, - TargetTransformInfo *TTI) { + TargetTransformInfo *TTI, + const DataLayout *DL) { if (C.CandidateKind == Candidate::Add) return isAddFoldable(C.Base, C.Index, C.Stride, TTI); if (C.CandidateKind == Candidate::GEP) @@ -927,39 +308,75 @@ bool StraightLineStrengthReduce::isFoldable(const Candidate &C, return false; } +// Returns true if GEP has zero or one non-zero index. +static bool hasOnlyOneNonZeroIndex(GetElementPtrInst *GEP) { + unsigned NumNonZeroIndices = 0; + for (Use &Idx : GEP->indices()) { + ConstantInt *ConstIdx = dyn_cast(Idx); + if (ConstIdx == nullptr || !ConstIdx->isZero()) + ++NumNonZeroIndices; + } + return NumNonZeroIndices <= 1; +} + +bool StraightLineStrengthReduce::isSimplestForm(const Candidate &C) { + if (C.CandidateKind == Candidate::Add) { + // B + 1 * S or B + (-1) * S + return C.Index->isOne() || C.Index->isMinusOne(); + } + if (C.CandidateKind == Candidate::Mul) { + // (B + 0) * S + return C.Index->isZero(); + } + if (C.CandidateKind == Candidate::GEP) { + // (char*)B + S or (char*)B - S + return ((C.Index->isOne() || C.Index->isMinusOne()) && + hasOnlyOneNonZeroIndex(cast(C.Ins))); + } + return false; +} + +// TODO: We currently implement an algorithm whose time complexity is linear in +// the number of existing candidates. However, we could do better by using +// ScopedHashTable. Specifically, while traversing the dominator tree, we could +// maintain all the candidates that dominate the basic block being traversed in +// a ScopedHashTable. This hash table is indexed by the base and the stride of +// a candidate. Therefore, finding the immediate basis of a candidate boils down +// to one hash-table look up. void StraightLineStrengthReduce::allocateCandidatesAndFindBasis( Candidate::Kind CT, const SCEV *B, ConstantInt *Idx, Value *S, Instruction *I) { - // Record the SCEV of S that we may use it as a variable delta. - // Ensure that we rewrite C with a existing IR that reproduces delta value. - - Candidate C(CT, B, Idx, S, I, getAndRecordSCEV(S)); - // If we can fold I into an addressing mode, computing I is likely free or - // takes only one instruction. So, we don't need to analyze or rewrite it. + Candidate C(CT, B, Idx, S, I); + // SLSR can complicate an instruction in two cases: // - // Currently, this algorithm can at best optimize complex computations into - // a `variable +/* constant` form. However, some targets have stricter - // constraints on the their addressing mode. - // For example, a `variable + constant` can only be folded to an addressing - // mode if the constant falls within a certain range. - // So, we also check if the instruction is already high efficient enough - // for the strength reduction algorithm. - if (!isFoldable(C, TTI) && !C.isHighEfficiency()) { - setBasisAndDeltaFor(C); - - // Compress unnecessary rewrite to improve ILP - if (auto Res = compressPath(C, C.Basis)) { - C.Basis = Res.Cand; - C.DeltaKind = Res.DeltaKind; - C.Delta = Res.Delta; + // 1. If we can fold I into an addressing mode, computing I is likely free or + // takes only one instruction. + // + // 2. I is already in a simplest form. For example, when + // X = B + 8 * S + // Y = B + S, + // rewriting Y to X - 7 * S is probably a bad idea. + // + // In the above cases, we still add I to the candidate list so that I can be + // the basis of other candidates, but we leave I's basis blank so that I + // won't be rewritten. + if (!isFoldable(C, TTI, DL) && !isSimplestForm(C)) { + // Try to compute the immediate basis of C. + unsigned NumIterations = 0; + // Limit the scan radius to avoid running in quadratice time. + static const unsigned MaxNumIterations = 50; + for (auto Basis = Candidates.rbegin(); + Basis != Candidates.rend() && NumIterations < MaxNumIterations; + ++Basis, ++NumIterations) { + if (isBasisFor(*Basis, C)) { + C.Basis = &(*Basis); + break; + } } } // Regardless of whether we find a basis for C, we need to push C to the // candidate list so that it can be the basis of other candidates. - LLVM_DEBUG(dbgs() << "Allocated Candidate: " << C << "\n"); Candidates.push_back(C); - RewriteCandidates[C.Ins].push_back(&Candidates.back()); - CandidateDict.add(Candidates.back()); } void StraightLineStrengthReduce::allocateCandidatesAndFindBasis( @@ -1058,6 +475,54 @@ void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForMul( } } +void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP( + const SCEV *B, ConstantInt *Idx, Value *S, uint64_t ElementSize, + Instruction *I) { + // I = B + sext(Idx *nsw S) * ElementSize + // = B + (sext(Idx) * sext(S)) * ElementSize + // = B + (sext(Idx) * ElementSize) * sext(S) + // Casting to IntegerType is safe because we skipped vector GEPs. + IntegerType *PtrIdxTy = cast(DL->getIndexType(I->getType())); + ConstantInt *ScaledIdx = ConstantInt::get( + PtrIdxTy, Idx->getSExtValue() * (int64_t)ElementSize, true); + allocateCandidatesAndFindBasis(Candidate::GEP, B, ScaledIdx, S, I); +} + +void StraightLineStrengthReduce::factorArrayIndex(Value *ArrayIdx, + const SCEV *Base, + uint64_t ElementSize, + GetElementPtrInst *GEP) { + // At least, ArrayIdx = ArrayIdx *nsw 1. + allocateCandidatesAndFindBasisForGEP( + Base, ConstantInt::get(cast(ArrayIdx->getType()), 1), + ArrayIdx, ElementSize, GEP); + Value *LHS = nullptr; + ConstantInt *RHS = nullptr; + // One alternative is matching the SCEV of ArrayIdx instead of ArrayIdx + // itself. This would allow us to handle the shl case for free. However, + // matching SCEVs has two issues: + // + // 1. this would complicate rewriting because the rewriting procedure + // would have to translate SCEVs back to IR instructions. This translation + // is difficult when LHS is further evaluated to a composite SCEV. + // + // 2. ScalarEvolution is designed to be control-flow oblivious. It tends + // to strip nsw/nuw flags which are critical for SLSR to trace into + // sext'ed multiplication. + if (match(ArrayIdx, m_NSWMul(m_Value(LHS), m_ConstantInt(RHS)))) { + // SLSR is currently unsafe if i * S may overflow. + // GEP = Base + sext(LHS *nsw RHS) * ElementSize + allocateCandidatesAndFindBasisForGEP(Base, RHS, LHS, ElementSize, GEP); + } else if (match(ArrayIdx, m_NSWShl(m_Value(LHS), m_ConstantInt(RHS)))) { + // GEP = Base + sext(LHS <getBitWidth(), 1); + ConstantInt *PowerOf2 = + ConstantInt::get(RHS->getContext(), One << RHS->getValue()); + allocateCandidatesAndFindBasisForGEP(Base, PowerOf2, LHS, ElementSize, GEP); + } +} + void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP( GetElementPtrInst *GEP) { // TODO: handle vector GEPs @@ -1081,14 +546,11 @@ void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP( const SCEV *BaseExpr = SE->getGEPExpr(cast(GEP), IndexExprs); Value *ArrayIdx = GEP->getOperand(I); uint64_t ElementSize = GTI.getSequentialElementStride(*DL); - IntegerType *PtrIdxTy = cast(DL->getIndexType(GEP->getType())); - ConstantInt *ElementSizeIdx = ConstantInt::get(PtrIdxTy, ElementSize, true); if (ArrayIdx->getType()->getIntegerBitWidth() <= DL->getIndexSizeInBits(GEP->getAddressSpace())) { // Skip factoring if ArrayIdx is wider than the index size, because // ArrayIdx is implicitly truncated to the index size. - allocateCandidatesAndFindBasis(Candidate::GEP, BaseExpr, ElementSizeIdx, - ArrayIdx, GEP); + factorArrayIndex(ArrayIdx, BaseExpr, ElementSize, GEP); } // When ArrayIdx is the sext of a value, we try to factor that value as // well. Handling this case is important because array indices are @@ -1099,159 +561,118 @@ void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP( DL->getIndexSizeInBits(GEP->getAddressSpace())) { // Skip factoring if TruncatedArrayIdx is wider than the pointer size, // because TruncatedArrayIdx is implicitly truncated to the pointer size. - allocateCandidatesAndFindBasis(Candidate::GEP, BaseExpr, ElementSizeIdx, - TruncatedArrayIdx, GEP); + factorArrayIndex(TruncatedArrayIdx, BaseExpr, ElementSize, GEP); } IndexExprs[I - 1] = OrigIndexExpr; } } +// A helper function that unifies the bitwidth of A and B. +static void unifyBitWidth(APInt &A, APInt &B) { + if (A.getBitWidth() < B.getBitWidth()) + A = A.sext(B.getBitWidth()); + else if (A.getBitWidth() > B.getBitWidth()) + B = B.sext(A.getBitWidth()); +} + Value *StraightLineStrengthReduce::emitBump(const Candidate &Basis, const Candidate &C, IRBuilder<> &Builder, const DataLayout *DL) { - auto CreateMul = [&](Value *LHS, Value *RHS) { - if (ConstantInt *CR = dyn_cast(RHS)) { - const APInt &ConstRHS = CR->getValue(); - IntegerType *DeltaType = - IntegerType::get(C.Ins->getContext(), ConstRHS.getBitWidth()); - if (ConstRHS.isPowerOf2()) { - ConstantInt *Exponent = - ConstantInt::get(DeltaType, ConstRHS.logBase2()); - return Builder.CreateShl(LHS, Exponent); - } - if (ConstRHS.isNegatedPowerOf2()) { - ConstantInt *Exponent = - ConstantInt::get(DeltaType, (-ConstRHS).logBase2()); - return Builder.CreateNeg(Builder.CreateShl(LHS, Exponent)); - } - } - - return Builder.CreateMul(LHS, RHS); - }; - - Value *Delta = C.Delta; - // If Delta is 0, C is a fully redundant of C.Basis, - // just replace C.Ins with Basis.Ins - if (ConstantInt *CI = dyn_cast(Delta); - CI && CI->getValue().isZero()) - return nullptr; - - if (C.DeltaKind == Candidate::IndexDelta) { - APInt IndexDelta = cast(C.Delta)->getValue(); - // IndexDelta - // X = B + i * S - // Y = B + i` * S - // = B + (i + IndexDelta) * S - // = B + i * S + IndexDelta * S - // = X + IndexDelta * S - // Bump = (i' - i) * S - - // Common case 1: if (i' - i) is 1, Bump = S. - if (IndexDelta == 1) - return C.Stride; - // Common case 2: if (i' - i) is -1, Bump = -S. - if (IndexDelta.isAllOnes()) - return Builder.CreateNeg(C.Stride); - - IntegerType *DeltaType = - IntegerType::get(Basis.Ins->getContext(), IndexDelta.getBitWidth()); - Value *ExtendedStride = Builder.CreateSExtOrTrunc(C.Stride, DeltaType); - - return CreateMul(ExtendedStride, C.Delta); + APInt Idx = C.Index->getValue(), BasisIdx = Basis.Index->getValue(); + unifyBitWidth(Idx, BasisIdx); + APInt IndexOffset = Idx - BasisIdx; + + // Compute Bump = C - Basis = (i' - i) * S. + // Common case 1: if (i' - i) is 1, Bump = S. + if (IndexOffset == 1) + return C.Stride; + // Common case 2: if (i' - i) is -1, Bump = -S. + if (IndexOffset.isAllOnes()) + return Builder.CreateNeg(C.Stride); + + // Otherwise, Bump = (i' - i) * sext/trunc(S). Note that (i' - i) and S may + // have different bit widths. + IntegerType *DeltaType = + IntegerType::get(Basis.Ins->getContext(), IndexOffset.getBitWidth()); + Value *ExtendedStride = Builder.CreateSExtOrTrunc(C.Stride, DeltaType); + if (IndexOffset.isPowerOf2()) { + // If (i' - i) is a power of 2, Bump = sext/trunc(S) << log(i' - i). + ConstantInt *Exponent = ConstantInt::get(DeltaType, IndexOffset.logBase2()); + return Builder.CreateShl(ExtendedStride, Exponent); } - - assert(C.DeltaKind == Candidate::StrideDelta || - C.DeltaKind == Candidate::BaseDelta); - assert(C.CandidateKind != Candidate::Mul); - // StrideDelta - // X = B + i * S - // Y = B + i * S' - // = B + i * (S + StrideDelta) - // = B + i * S + i * StrideDelta - // = X + i * StrideDelta - // Bump = i * (S' - S) - // - // BaseDelta - // X = B + i * S - // Y = B' + i * S - // = (B + BaseDelta) + i * S - // = X + BaseDelta - // Bump = (B' - B). - Value *Bump = C.Delta; - if (C.DeltaKind == Candidate::StrideDelta) { - // If this value is consumed by a GEP, promote StrideDelta before doing - // StrideDelta * Index to ensure the same semantics as the original GEP. - if (C.CandidateKind == Candidate::GEP) { - auto *GEP = cast(C.Ins); - Type *NewScalarIndexTy = - DL->getIndexType(GEP->getPointerOperandType()->getScalarType()); - Bump = Builder.CreateSExtOrTrunc(Bump, NewScalarIndexTy); - } - if (!C.Index->isOne()) { - Value *ExtendedIndex = - Builder.CreateSExtOrTrunc(C.Index, Bump->getType()); - Bump = CreateMul(Bump, ExtendedIndex); - } + if (IndexOffset.isNegatedPowerOf2()) { + // If (i - i') is a power of 2, Bump = -sext/trunc(S) << log(i' - i). + ConstantInt *Exponent = + ConstantInt::get(DeltaType, (-IndexOffset).logBase2()); + return Builder.CreateNeg(Builder.CreateShl(ExtendedStride, Exponent)); } - return Bump; + Constant *Delta = ConstantInt::get(DeltaType, IndexOffset); + return Builder.CreateMul(ExtendedStride, Delta); } -void StraightLineStrengthReduce::rewriteCandidate(const Candidate &C) { +void StraightLineStrengthReduce::rewriteCandidateWithBasis( + const Candidate &C, const Candidate &Basis) { if (!DebugCounter::shouldExecute(StraightLineStrengthReduceCounter)) return; - const Candidate &Basis = *C.Basis; - assert(C.Delta && C.CandidateKind == Basis.CandidateKind && - C.hasValidDelta(Basis)); + assert(C.CandidateKind == Basis.CandidateKind && C.Base == Basis.Base && + C.Stride == Basis.Stride); + // We run rewriteCandidateWithBasis on all candidates in a post-order, so the + // basis of a candidate cannot be unlinked before the candidate. + assert(Basis.Ins->getParent() != nullptr && "the basis is unlinked"); + + // An instruction can correspond to multiple candidates. Therefore, instead of + // simply deleting an instruction when we rewrite it, we mark its parent as + // nullptr (i.e. unlink it) so that we can skip the candidates whose + // instruction is already rewritten. + if (!C.Ins->getParent()) + return; IRBuilder<> Builder(C.Ins); Value *Bump = emitBump(Basis, C, Builder, DL); Value *Reduced = nullptr; // equivalent to but weaker than C.Ins - // If delta is 0, C is a fully redundant of Basis, and Bump is nullptr, - // just replace C.Ins with Basis.Ins - if (!Bump) - Reduced = Basis.Ins; - else { - switch (C.CandidateKind) { - case Candidate::Add: - case Candidate::Mul: { - // C = Basis + Bump - Value *NegBump; - if (match(Bump, m_Neg(m_Value(NegBump)))) { - // If Bump is a neg instruction, emit C = Basis - (-Bump). - Reduced = Builder.CreateSub(Basis.Ins, NegBump); - // We only use the negative argument of Bump, and Bump itself may be - // trivially dead. - RecursivelyDeleteTriviallyDeadInstructions(Bump); - } else { - // It's tempting to preserve nsw on Bump and/or Reduced. However, it's - // usually unsound, e.g., - // - // X = (-2 +nsw 1) *nsw INT_MAX - // Y = (-2 +nsw 3) *nsw INT_MAX - // => - // Y = X + 2 * INT_MAX - // - // Neither + and * in the resultant expression are nsw. - Reduced = Builder.CreateAdd(Basis.Ins, Bump); - } - break; - } - case Candidate::GEP: { - bool InBounds = cast(C.Ins)->isInBounds(); - // C = (char *)Basis + Bump - Reduced = Builder.CreatePtrAdd(Basis.Ins, Bump, "", InBounds); - break; + switch (C.CandidateKind) { + case Candidate::Add: + case Candidate::Mul: { + // C = Basis + Bump + Value *NegBump; + if (match(Bump, m_Neg(m_Value(NegBump)))) { + // If Bump is a neg instruction, emit C = Basis - (-Bump). + Reduced = Builder.CreateSub(Basis.Ins, NegBump); + // We only use the negative argument of Bump, and Bump itself may be + // trivially dead. + RecursivelyDeleteTriviallyDeadInstructions(Bump); + } else { + // It's tempting to preserve nsw on Bump and/or Reduced. However, it's + // usually unsound, e.g., + // + // X = (-2 +nsw 1) *nsw INT_MAX + // Y = (-2 +nsw 3) *nsw INT_MAX + // => + // Y = X + 2 * INT_MAX + // + // Neither + and * in the resultant expression are nsw. + Reduced = Builder.CreateAdd(Basis.Ins, Bump); } - default: - llvm_unreachable("C.CandidateKind is invalid"); - }; - Reduced->takeName(C.Ins); + break; + } + case Candidate::GEP: { + bool InBounds = cast(C.Ins)->isInBounds(); + // C = (char *)Basis + Bump + Reduced = Builder.CreatePtrAdd(Basis.Ins, Bump, "", InBounds); + break; } + default: + llvm_unreachable("C.CandidateKind is invalid"); + }; + Reduced->takeName(C.Ins); C.Ins->replaceAllUsesWith(Reduced); - DeadInstructions.push_back(C.Ins); + // Unlink C.Ins so that we can skip other candidates also corresponding to + // C.Ins. The actual deletion is postponed to the end of runOnFunction. + C.Ins->removeFromParent(); + UnlinkedInstructions.push_back(C.Ins); } bool StraightLineStrengthReduceLegacyPass::runOnFunction(Function &F) { @@ -1265,42 +686,33 @@ bool StraightLineStrengthReduceLegacyPass::runOnFunction(Function &F) { } bool StraightLineStrengthReduce::runOnFunction(Function &F) { - LLVM_DEBUG(dbgs() << "SLSR on Function: " << F.getName() << "\n"); // Traverse the dominator tree in the depth-first order. This order makes sure // all bases of a candidate are in Candidates when we process it. for (const auto Node : depth_first(DT)) for (auto &I : *(Node->getBlock())) allocateCandidatesAndFindBasis(&I); - // Build the dependency graph and sort candidate instructions from dependency - // roots to leaves - for (auto &C : Candidates) { - DependencyGraph.try_emplace(C.Ins); - addDependency(C, C.Basis); + // Rewrite candidates in the reverse depth-first order. This order makes sure + // a candidate being rewritten is not a basis for any other candidate. + while (!Candidates.empty()) { + const Candidate &C = Candidates.back(); + if (C.Basis != nullptr) { + rewriteCandidateWithBasis(C, *C.Basis); + } + Candidates.pop_back(); + } + + // Delete all unlink instructions. + for (auto *UnlinkedInst : UnlinkedInstructions) { + for (unsigned I = 0, E = UnlinkedInst->getNumOperands(); I != E; ++I) { + Value *Op = UnlinkedInst->getOperand(I); + UnlinkedInst->setOperand(I, nullptr); + RecursivelyDeleteTriviallyDeadInstructions(Op); + } + UnlinkedInst->deleteValue(); } - sortCandidateInstructions(); - - // Rewrite candidates in the topological order that rewrites a Candidate - // always before rewriting its Basis - for (Instruction *I : reverse(SortedCandidateInsts)) - if (Candidate *C = pickRewriteCandidate(I)) - rewriteCandidate(*C); - - for (auto *DeadIns : DeadInstructions) - // A dead instruction may be another dead instruction's op, - // don't delete an instruction twice - if (DeadIns->getParent()) - RecursivelyDeleteTriviallyDeadInstructions(DeadIns); - - bool Ret = !DeadInstructions.empty(); - DeadInstructions.clear(); - DependencyGraph.clear(); - RewriteCandidates.clear(); - SortedCandidateInsts.clear(); - // First clear all references to candidates in the list - CandidateDict.clear(); - // Then destroy the list - Candidates.clear(); + bool Ret = !UnlinkedInstructions.empty(); + UnlinkedInstructions.clear(); return Ret; } diff --git a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp index 11db0ec487328..076c5da4393fc 100644 --- a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp +++ b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp @@ -92,6 +92,15 @@ emptyAndDetachBlock(BasicBlock *BB, "applying corresponding DTU updates."); } +static bool HasLoopOrEntryConvergenceToken(const BasicBlock *BB) { + for (const Instruction &I : *BB) { + const ConvergenceControlInst *CCI = dyn_cast(&I); + if (CCI && (CCI->isLoop() || CCI->isEntry())) + return true; + } + return false; +} + void llvm::detachDeadBlocks(ArrayRef BBs, SmallVectorImpl *Updates, bool KeepOneInputPHIs) { @@ -259,6 +268,13 @@ bool llvm::MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU, if (llvm::is_contained(PN.incoming_values(), &PN)) return false; + // Don't break if both the basic block and the predecessor contain loop or + // entry convergent intrinsics, since there may only be one convergence token + // per block. + if (HasLoopOrEntryConvergenceToken(BB) && + HasLoopOrEntryConvergenceToken(PredBB)) + return false; + LLVM_DEBUG(dbgs() << "Merging: " << BB->getName() << " into " << PredBB->getName() << "\n"); diff --git a/llvm/lib/Transforms/Utils/LoopUnroll.cpp b/llvm/lib/Transforms/Utils/LoopUnroll.cpp index 5b94897f4342f..0f256398e5b1e 100644 --- a/llvm/lib/Transforms/Utils/LoopUnroll.cpp +++ b/llvm/lib/Transforms/Utils/LoopUnroll.cpp @@ -1093,6 +1093,7 @@ llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI, if (!RdxResult) { RdxResult = PartialReductions.front(); IRBuilder Builder(ExitBlock, ExitBlock->getFirstNonPHIIt()); + Builder.setFastMathFlags(Reductions.begin()->second.getFastMathFlags()); RecurKind RK = Reductions.begin()->second.getRecurrenceKind(); for (Instruction *RdxPart : drop_begin(PartialReductions)) { RdxResult = Builder.CreateBinOp( @@ -1253,16 +1254,19 @@ llvm::canParallelizeReductionWhenUnrolling(PHINode &Phi, Loop *L, /*DemandedBits=*/nullptr, /*AC=*/nullptr, /*DT=*/nullptr, SE)) return std::nullopt; + if (RdxDesc.hasUsesOutsideReductionChain()) + return std::nullopt; RecurKind RK = RdxDesc.getRecurrenceKind(); // Skip unsupported reductions. - // TODO: Handle additional reductions, including FP and min-max - // reductions. - if (!RecurrenceDescriptor::isIntegerRecurrenceKind(RK) || - RecurrenceDescriptor::isAnyOfRecurrenceKind(RK) || + // TODO: Handle additional reductions, including min-max reductions. + if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RK) || RecurrenceDescriptor::isFindIVRecurrenceKind(RK) || RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) return std::nullopt; + if (RdxDesc.hasExactFPMath()) + return std::nullopt; + if (RdxDesc.IntermediateStore) return std::nullopt; diff --git a/llvm/lib/Transforms/Utils/ValueMapper.cpp b/llvm/lib/Transforms/Utils/ValueMapper.cpp index 9021d8b289baf..6e36006890df4 100644 --- a/llvm/lib/Transforms/Utils/ValueMapper.cpp +++ b/llvm/lib/Transforms/Utils/ValueMapper.cpp @@ -526,8 +526,9 @@ Value *Mapper::mapValue(const Value *V) { if (isa(C)) return getVM()[V] = ConstantVector::get(Ops); if (isa(C)) - return getVM()[V] = ConstantPtrAuth::get(Ops[0], cast(Ops[1]), - cast(Ops[2]), Ops[3]); + return getVM()[V] = + ConstantPtrAuth::get(Ops[0], cast(Ops[1]), + cast(Ops[2]), Ops[3], Ops[4]); // If this is a no-operand constant, it must be because the type was remapped. if (isa(C)) return getVM()[V] = PoisonValue::get(NewTy); diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp index 6d24c407eb5f4..c28314f6ab124 100644 --- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -953,15 +953,15 @@ bool Vectorizer::vectorizeChain(Chain &C) { unsigned EOffset = (E.OffsetFromLeader - C[0].OffsetFromLeader).getZExtValue(); unsigned VecIdx = 8 * EOffset / DL.getTypeSizeInBits(VecElemTy); - if (auto *VT = dyn_cast(T)) { + if (!VecTy->isVectorTy()) { + V = VecInst; + } else if (auto *VT = dyn_cast(T)) { auto Mask = llvm::to_vector<8>( llvm::seq(VecIdx, VecIdx + VT->getNumElements())); V = Builder.CreateShuffleVector(VecInst, Mask, I->getName()); - } else if (VecTy != VecElemTy) { + } else { V = Builder.CreateExtractElement(VecInst, Builder.getInt32(VecIdx), I->getName()); - } else { - V = VecInst; } if (V->getType() != I->getType()) V = Builder.CreateBitOrPointerCast(V, I->getType()); diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp index ba21bbbe112e6..f2e9c3146b0e8 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -877,6 +877,11 @@ bool LoopVectorizationLegality::canVectorizeInstr(Instruction &I) { Requirements->addExactFPMathInst(RedDes.getExactFPMathInst()); AllowedExit.insert(RedDes.getLoopExitInstr()); Reductions[Phi] = RedDes; + assert((!RedDes.hasUsesOutsideReductionChain() || + RecurrenceDescriptor::isMinMaxRecurrenceKind( + RedDes.getRecurrenceKind())) && + "Only min/max recurrences are allowed to have multiple uses " + "currently"); return true; } @@ -2095,24 +2100,6 @@ bool LoopVectorizationLegality::canFoldTailByMasking() const { for (const auto &Reduction : getReductionVars()) ReductionLiveOuts.insert(Reduction.second.getLoopExitInstr()); - // TODO: handle non-reduction outside users when tail is folded by masking. - for (auto *AE : AllowedExit) { - // Check that all users of allowed exit values are inside the loop or - // are the live-out of a reduction. - if (ReductionLiveOuts.count(AE)) - continue; - for (User *U : AE->users()) { - Instruction *UI = cast(U); - if (TheLoop->contains(UI)) - continue; - LLVM_DEBUG( - dbgs() - << "LV: Cannot fold tail by masking, loop has an outside user for " - << *UI << "\n"); - return false; - } - } - for (const auto &Entry : getInductionVars()) { PHINode *OrigPhi = Entry.first; for (User *U : OrigPhi->users()) { diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index a63956c0cba6b..4a89f7dd8672e 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -5254,7 +5254,8 @@ LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, unsigned IID = I->getOpcode() == Instruction::Load ? Intrinsic::masked_load : Intrinsic::masked_store; - Cost += TTI.getMaskedMemoryOpCost({IID, VectorTy, Alignment, AS}, CostKind); + Cost += TTI.getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(IID, VectorTy, Alignment, AS), CostKind); } else { TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0)); Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, @@ -5313,10 +5314,14 @@ LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, if (!Legal->isUniform(Ptr, VF)) PtrTy = toVectorTy(PtrTy, VF); + unsigned IID = I->getOpcode() == Instruction::Load + ? Intrinsic::masked_gather + : Intrinsic::masked_scatter; return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) + - TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, - Legal->isMaskRequired(I), Alignment, - CostKind, I); + TTI.getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(IID, VectorTy, Ptr, + Legal->isMaskRequired(I), Alignment, I), + CostKind); } InstructionCost @@ -6588,6 +6593,11 @@ void LoopVectorizationCostModel::collectInLoopReductions() { PHINode *Phi = Reduction.first; const RecurrenceDescriptor &RdxDesc = Reduction.second; + // Multi-use reductions (e.g., used in FindLastIV patterns) are handled + // separately and should not be considered for in-loop reductions. + if (RdxDesc.hasUsesOutsideReductionChain()) + continue; + // We don't collect reductions that are type promoted (yet). if (RdxDesc.getRecurrenceType() != Phi->getType()) continue; @@ -7021,10 +7031,11 @@ static bool planContainsAdditionalSimplifications(VPlan &Plan, VPInstruction::FirstOrderRecurrenceSplice>()))) return true; } - // The VPlan-based cost model is more accurate for partial reduction and + // The VPlan-based cost model is more accurate for partial reductions and // comparing against the legacy cost isn't desirable. - if (isa(&R)) - return true; + if (auto *VPR = dyn_cast(&R)) + if (VPR->isPartialReduction()) + return true; // The VPlan-based cost model can analyze if recipes are scalar // recursively, but the legacy cost model cannot. @@ -7176,17 +7187,29 @@ VectorizationFactor LoopVectorizationPlanner::computeBestVF() { VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind, *CM.PSE.getSE(), OrigLoop); precomputeCosts(BestPlan, BestFactor.Width, CostCtx); - // Verify that the VPlan-based and legacy cost models agree, except for VPlans - // with early exits and plans with additional VPlan simplifications. The - // legacy cost model doesn't properly model costs for such loops. - assert((BestFactor.Width == LegacyVF.Width || BestPlan.hasEarlyExit() || - !Legal->getLAI()->getSymbolicStrides().empty() || - planContainsAdditionalSimplifications(getPlanFor(BestFactor.Width), - CostCtx, OrigLoop, - BestFactor.Width) || - planContainsAdditionalSimplifications( - getPlanFor(LegacyVF.Width), CostCtx, OrigLoop, LegacyVF.Width)) && - " VPlan cost model and legacy cost model disagreed"); + // Verify that the VPlan-based and legacy cost models agree, except for + // * VPlans with early exits, + // * VPlans with additional VPlan simplifications, + // * EVL-based VPlans with gather/scatters (the VPlan-based cost model uses + // vp_scatter/vp_gather). + // The legacy cost model doesn't properly model costs for such loops. + bool UsesEVLGatherScatter = + any_of(VPBlockUtils::blocksOnly(vp_depth_first_shallow( + BestPlan.getVectorLoopRegion()->getEntry())), + [](VPBasicBlock *VPBB) { + return any_of(*VPBB, [](VPRecipeBase &R) { + return isa(&R) && + !cast(&R)->isConsecutive(); + }); + }); + assert( + (BestFactor.Width == LegacyVF.Width || BestPlan.hasEarlyExit() || + !Legal->getLAI()->getSymbolicStrides().empty() || UsesEVLGatherScatter || + planContainsAdditionalSimplifications( + getPlanFor(BestFactor.Width), CostCtx, OrigLoop, BestFactor.Width) || + planContainsAdditionalSimplifications( + getPlanFor(LegacyVF.Width), CostCtx, OrigLoop, LegacyVF.Width)) && + " VPlan cost model and legacy cost model disagreed"); assert((BestFactor.Width.isScalar() || BestFactor.ScalarCost > 0) && "when vectorizing, the scalar cost must be computed."); #endif @@ -7992,9 +8015,10 @@ void VPRecipeBuilder::collectScaledReductions(VFRange &Range) { MapVector>> ChainsByPhi; - for (const auto &[Phi, RdxDesc] : Legal->getReductionVars()) - getScaledReductions(Phi, RdxDesc.getLoopExitInstr(), Range, - ChainsByPhi[Phi]); + for (const auto &[Phi, RdxDesc] : Legal->getReductionVars()) { + if (Instruction *RdxExitInstr = RdxDesc.getLoopExitInstr()) + getScaledReductions(Phi, RdxExitInstr, Range, ChainsByPhi[Phi]); + } // A partial reduction is invalid if any of its extends are used by // something that isn't another partial reduction. This is because the @@ -8207,11 +8231,16 @@ VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(VPSingleDefRecipe *R, Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); // If the PHI is used by a partial reduction, set the scale factor. + bool UseInLoopReduction = CM.isInLoopReduction(Phi); + bool UseOrderedReductions = CM.useOrderedReductions(RdxDesc); unsigned ScaleFactor = getScalingForReduction(RdxDesc.getLoopExitInstr()).value_or(1); + PhiRecipe = new VPReductionPHIRecipe( - Phi, RdxDesc.getRecurrenceKind(), *StartV, CM.isInLoopReduction(Phi), - CM.useOrderedReductions(RdxDesc), ScaleFactor); + Phi, RdxDesc.getRecurrenceKind(), *StartV, + getReductionStyle(UseInLoopReduction, UseOrderedReductions, + ScaleFactor), + RdxDesc.hasUsesOutsideReductionChain()); } else { // TODO: Currently fixed-order recurrences are modeled as chains of // first-order recurrences. If there are no users of the intermediate @@ -8280,16 +8309,18 @@ VPRecipeBuilder::tryToCreatePartialReduction(VPInstruction *Reduction, VPValue *BinOp = Reduction->getOperand(0); VPValue *Accumulator = Reduction->getOperand(1); - if (isa(BinOp) || isa(BinOp)) + VPRecipeBase *BinOpRecipe = BinOp->getDefiningRecipe(); + if (isa(BinOpRecipe) || + (isa(BinOpRecipe) && + cast(BinOpRecipe)->isPartialReduction())) std::swap(BinOp, Accumulator); assert(ScaleFactor == vputils::getVFScaleFactor(Accumulator->getDefiningRecipe()) && "all accumulators in chain must have same scale factor"); - unsigned ReductionOpcode = Reduction->getOpcode(); auto *ReductionI = Reduction->getUnderlyingInstr(); - if (ReductionOpcode == Instruction::Sub) { + if (Reduction->getOpcode() == Instruction::Sub) { auto *const Zero = ConstantInt::get(ReductionI->getType(), 0); SmallVector Ops; Ops.push_back(Plan.getOrAddLiveIn(Zero)); @@ -8297,14 +8328,15 @@ VPRecipeBuilder::tryToCreatePartialReduction(VPInstruction *Reduction, BinOp = new VPWidenRecipe(*ReductionI, Ops, VPIRFlags(*ReductionI), VPIRMetadata(), ReductionI->getDebugLoc()); Builder.insert(BinOp->getDefiningRecipe()); - ReductionOpcode = Instruction::Add; } VPValue *Cond = nullptr; if (CM.blockNeedsPredicationForAnyReason(ReductionI->getParent())) Cond = getBlockInMask(Builder.getInsertBlock()); - return new VPPartialReductionRecipe(ReductionOpcode, Accumulator, BinOp, Cond, - ScaleFactor, ReductionI); + + return new VPReductionRecipe( + RecurKind::Add, FastMathFlags(), ReductionI, Accumulator, BinOp, Cond, + RdxUnordered{/*VFScaleFactor=*/ScaleFactor}, ReductionI->getDebugLoc()); } void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, @@ -8336,6 +8368,7 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, if (auto Plan = tryToBuildVPlanWithVPRecipes( std::unique_ptr(VPlan0->duplicate()), SubRange, &LVer)) { // Now optimize the initial VPlan. + VPlanTransforms::hoistPredicatedLoads(*Plan, *PSE.getSE(), OrigLoop); VPlanTransforms::runPass(VPlanTransforms::truncateToMinimalBitwidths, *Plan, CM.getMinimalBitwidths()); VPlanTransforms::runPass(VPlanTransforms::optimize, *Plan); @@ -8541,6 +8574,11 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes( // Adjust the recipes for any inloop reductions. adjustRecipesForReductions(Plan, RecipeBuilder, Range.Start); + // Apply mandatory transformation to handle reductions with multiple in-loop + // uses if possible, bail out otherwise. + if (!VPlanTransforms::runPass(VPlanTransforms::handleMultiUseReductions, + *Plan)) + return nullptr; // Apply mandatory transformation to handle FP maxnum/minnum reduction with // NaNs if possible, bail out otherwise. if (!VPlanTransforms::runPass(VPlanTransforms::handleMaxMinNumReductions, @@ -8794,9 +8832,10 @@ void LoopVectorizationPlanner::adjustRecipesForReductions( if (CM.blockNeedsPredicationForAnyReason(CurrentLinkI->getParent())) CondOp = RecipeBuilder.getBlockInMask(CurrentLink->getParent()); - auto *RedRecipe = new VPReductionRecipe( - Kind, FMFs, CurrentLinkI, PreviousLink, VecOp, CondOp, - PhiR->isOrdered(), CurrentLinkI->getDebugLoc()); + ReductionStyle Style = getReductionStyle(true, PhiR->isOrdered(), 1); + auto *RedRecipe = + new VPReductionRecipe(Kind, FMFs, CurrentLinkI, PreviousLink, VecOp, + CondOp, Style, CurrentLinkI->getDebugLoc()); // Append the recipe to the end of the VPBasicBlock because we need to // ensure that it comes after all of it's inputs, including CondOp. // Delete CurrentLink as it will be invalid if its operand is replaced @@ -8831,8 +8870,9 @@ void LoopVectorizationPlanner::adjustRecipesForReductions( // Don't output selects for partial reductions because they have an output // with fewer lanes than the VF. So the operands of the select would have // different numbers of lanes. Partial reductions mask the input instead. + auto *RR = dyn_cast(OrigExitingVPV->getDefiningRecipe()); if (!PhiR->isInLoop() && CM.foldTailByMasking() && - !isa(OrigExitingVPV)) { + (!RR || !RR->isPartialReduction())) { VPValue *Cond = RecipeBuilder.getBlockInMask(PhiR->getParent()); std::optional FMFs = PhiTy->isFloatingPointTy() @@ -8929,7 +8969,8 @@ void LoopVectorizationPlanner::adjustRecipesForReductions( if (FinalReductionResult == U || Parent->getParent()) continue; U->replaceUsesOfWith(OrigExitingVPV, FinalReductionResult); - if (match(U, m_ExtractLastElement(m_VPValue()))) + if (match(U, m_CombineOr(m_ExtractLastElement(m_VPValue()), + m_ExtractLane(m_VPValue(), m_VPValue())))) cast(U)->replaceAllUsesWith(FinalReductionResult); } diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 3b36ccbd677dc..0eb8ad8d3c93d 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -6900,10 +6900,11 @@ static bool isMaskedLoadCompress( ScalarLoadsCost; InstructionCost LoadCost = 0; if (IsMasked) { - LoadCost = TTI.getMaskedMemoryOpCost({Intrinsic::masked_load, LoadVecTy, - CommonAlignment, - LI->getPointerAddressSpace()}, - CostKind); + LoadCost = TTI.getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(Intrinsic::masked_load, LoadVecTy, + CommonAlignment, + LI->getPointerAddressSpace()), + CostKind); } else { LoadCost = TTI.getMemoryOpCost(Instruction::Load, LoadVecTy, CommonAlignment, @@ -7246,9 +7247,11 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads( ScalarGEPCost; // The cost of masked gather. InstructionCost MaskedGatherCost = - TTI.getGatherScatterOpCost( - Instruction::Load, VecTy, cast(VL0)->getPointerOperand(), - /*VariableMask=*/false, CommonAlignment, CostKind) + + TTI.getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(Intrinsic::masked_gather, VecTy, + cast(VL0)->getPointerOperand(), + /*VariableMask=*/false, CommonAlignment), + CostKind) + (ProfitableGatherPointers ? 0 : VectorGEPCost); InstructionCost GatherCost = getScalarizationOverhead(TTI, ScalarTy, VecTy, DemandedElts, @@ -7355,26 +7358,30 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads( VectorGEPCost; break; case LoadsState::StridedVectorize: - VecLdCost += TTI.getStridedMemoryOpCost(Instruction::Load, SubVecTy, - LI0->getPointerOperand(), - /*VariableMask=*/false, - CommonAlignment, CostKind) + + VecLdCost += TTI.getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes( + Intrinsic::experimental_vp_strided_load, + SubVecTy, LI0->getPointerOperand(), + /*VariableMask=*/false, CommonAlignment), + CostKind) + VectorGEPCost; break; case LoadsState::CompressVectorize: - VecLdCost += TTI.getMaskedMemoryOpCost( - {Intrinsic::masked_load, SubVecTy, CommonAlignment, - LI0->getPointerAddressSpace()}, + VecLdCost += TTI.getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes( + Intrinsic::masked_load, SubVecTy, + CommonAlignment, LI0->getPointerAddressSpace()), CostKind) + - VectorGEPCost + ::getShuffleCost(TTI, TTI::SK_PermuteSingleSrc, SubVecTy, {}, CostKind); break; case LoadsState::ScatterVectorize: - VecLdCost += TTI.getGatherScatterOpCost(Instruction::Load, SubVecTy, - LI0->getPointerOperand(), - /*VariableMask=*/false, - CommonAlignment, CostKind) + + VecLdCost += TTI.getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes( + Intrinsic::masked_gather, SubVecTy, + LI0->getPointerOperand(), + /*VariableMask=*/false, CommonAlignment), + CostKind) + VectorGEPCost; break; case LoadsState::Gather: @@ -13328,9 +13335,12 @@ void BoUpSLP::transformNodes() { BaseLI->getPointerAddressSpace(), CostKind, TTI::OperandValueInfo()) + ::getShuffleCost(*TTI, TTI::SK_Reverse, VecTy, Mask, CostKind); - InstructionCost StridedCost = TTI->getStridedMemoryOpCost( - Instruction::Load, VecTy, BaseLI->getPointerOperand(), - /*VariableMask=*/false, CommonAlignment, CostKind, BaseLI); + InstructionCost StridedCost = TTI->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(Intrinsic::experimental_vp_strided_load, + VecTy, BaseLI->getPointerOperand(), + /*VariableMask=*/false, CommonAlignment, + BaseLI), + CostKind); if (StridedCost < OriginalVecCost || ForceStridedLoads) { // Strided load is more profitable than consecutive load + reverse - // transform the node to strided load. @@ -13363,9 +13373,12 @@ void BoUpSLP::transformNodes() { BaseSI->getPointerAddressSpace(), CostKind, TTI::OperandValueInfo()) + ::getShuffleCost(*TTI, TTI::SK_Reverse, VecTy, Mask, CostKind); - InstructionCost StridedCost = TTI->getStridedMemoryOpCost( - Instruction::Store, VecTy, BaseSI->getPointerOperand(), - /*VariableMask=*/false, CommonAlignment, CostKind, BaseSI); + InstructionCost StridedCost = TTI->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(Intrinsic::experimental_vp_strided_store, + VecTy, BaseSI->getPointerOperand(), + /*VariableMask=*/false, CommonAlignment, + BaseSI), + CostKind); if (StridedCost < OriginalVecCost) // Strided store is more profitable than reverse + consecutive store - // transform the node to strided store. @@ -15131,9 +15144,11 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef VectorizedVals, assert(StridedLoadTy && "Missing StridedPoinerInfo for tree entry."); Align CommonAlignment = computeCommonAlignment(UniqueValues.getArrayRef()); - VecLdCost = TTI->getStridedMemoryOpCost( - Instruction::Load, StridedLoadTy, LI0->getPointerOperand(), - /*VariableMask=*/false, CommonAlignment, CostKind); + VecLdCost = TTI->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(Intrinsic::experimental_vp_strided_load, + StridedLoadTy, LI0->getPointerOperand(), + /*VariableMask=*/false, CommonAlignment), + CostKind); if (StridedLoadTy != VecTy) VecLdCost += TTI->getCastInstrCost(Instruction::BitCast, VecTy, StridedLoadTy, @@ -15168,9 +15183,10 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef VectorizedVals, Instruction::Load, LoadVecTy, InterleaveFactor, {}, CommonAlignment, LI0->getPointerAddressSpace(), CostKind); } else if (IsMasked) { - VecLdCost = TTI->getMaskedMemoryOpCost( - {Intrinsic::masked_load, LoadVecTy, CommonAlignment, - LI0->getPointerAddressSpace()}, + VecLdCost = TTI->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(Intrinsic::masked_load, LoadVecTy, + CommonAlignment, + LI0->getPointerAddressSpace()), CostKind); // TODO: include this cost into CommonCost. VecLdCost += ::getShuffleCost(*TTI, TTI::SK_PermuteSingleSrc, @@ -15188,9 +15204,11 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef VectorizedVals, case TreeEntry::ScatterVectorize: { Align CommonAlignment = computeCommonAlignment(UniqueValues.getArrayRef()); - VecLdCost = TTI->getGatherScatterOpCost( - Instruction::Load, VecTy, LI0->getPointerOperand(), - /*VariableMask=*/false, CommonAlignment, CostKind); + VecLdCost = TTI->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(Intrinsic::masked_gather, VecTy, + LI0->getPointerOperand(), + /*VariableMask=*/false, CommonAlignment), + CostKind); break; } case TreeEntry::CombinedVectorize: @@ -15230,9 +15248,11 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef VectorizedVals, if (E->State == TreeEntry::StridedVectorize) { Align CommonAlignment = computeCommonAlignment(UniqueValues.getArrayRef()); - VecStCost = TTI->getStridedMemoryOpCost( - Instruction::Store, VecTy, BaseSI->getPointerOperand(), - /*VariableMask=*/false, CommonAlignment, CostKind); + VecStCost = TTI->getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(Intrinsic::experimental_vp_strided_store, + VecTy, BaseSI->getPointerOperand(), + /*VariableMask=*/false, CommonAlignment), + CostKind); } else { assert(E->State == TreeEntry::Vectorize && "Expected either strided or consecutive stores."); diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 0c7d9c0193a03..6ca750fc53279 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -44,6 +44,7 @@ #include #include #include +#include namespace llvm { @@ -566,7 +567,6 @@ class VPSingleDefRecipe : public VPRecipeBase, public VPValue { case VPRecipeBase::VPWidenIntOrFpInductionSC: case VPRecipeBase::VPWidenPointerInductionSC: case VPRecipeBase::VPReductionPHISC: - case VPRecipeBase::VPPartialReductionSC: return true; case VPRecipeBase::VPBranchOnMaskSC: case VPRecipeBase::VPInterleaveEVLSC: @@ -1099,6 +1099,13 @@ class LLVM_ABI_FOR_TEST VPInstruction : public VPRecipeWithIRFlags, // Implemented with @llvm.experimental.cttz.elts, but returns the expected // result even with operands that are all zeroes. FirstActiveLane, + // Calculates the last active lane index of the vector predicate operands. + // The predicates must be prefix-masks (all 1s before all 0s). Used when + // tail-folding to extract the correct live-out value from the last active + // iteration. It produces the lane index across all unrolled iterations. + // Unrolling will add all copies of its original operand as additional + // operands. + LastActiveLane, // The opcodes below are used for VPInstructionWithType. // @@ -2064,6 +2071,9 @@ class LLVM_ABI_FOR_TEST VPHeaderPHIRecipe : public VPSingleDefRecipe, static inline bool classof(const VPValue *V) { return isa(V->getDefiningRecipe()); } + static inline bool classof(const VPSingleDefRecipe *R) { + return isa(static_cast(R)); + } /// Generate the phi nodes. void execute(VPTransformState &State) override = 0; @@ -2129,7 +2139,7 @@ class VPWidenInductionRecipe : public VPHeaderPHIRecipe { return R && classof(R); } - static inline bool classof(const VPHeaderPHIRecipe *R) { + static inline bool classof(const VPSingleDefRecipe *R) { return classof(static_cast(R)); } @@ -2392,6 +2402,29 @@ struct VPFirstOrderRecurrencePHIRecipe : public VPHeaderPHIRecipe { #endif }; +/// Possible variants of a reduction. + +/// This reduction is ordered and in-loop. +struct RdxOrdered {}; +/// This reduction is in-loop. +struct RdxInLoop {}; +/// This reduction is unordered with the partial result scaled down by some +/// factor. +struct RdxUnordered { + unsigned VFScaleFactor; +}; +using ReductionStyle = std::variant; + +inline ReductionStyle getReductionStyle(bool InLoop, bool Ordered, + unsigned ScaleFactor) { + assert((!Ordered || InLoop) && "Ordered implies in-loop"); + if (Ordered) + return RdxOrdered{}; + if (InLoop) + return RdxInLoop{}; + return RdxUnordered{/*VFScaleFactor=*/ScaleFactor}; +} + /// A recipe for handling reduction phis. The start value is the first operand /// of the recipe and the incoming value from the backedge is the second /// operand. @@ -2400,32 +2433,29 @@ class VPReductionPHIRecipe : public VPHeaderPHIRecipe, /// The recurrence kind of the reduction. const RecurKind Kind; - /// The phi is part of an in-loop reduction. - bool IsInLoop; - - /// The phi is part of an ordered reduction. Requires IsInLoop to be true. - bool IsOrdered; + ReductionStyle Style; - /// When expanding the reduction PHI, the plan's VF element count is divided - /// by this factor to form the reduction phi's VF. - unsigned VFScaleFactor = 1; + /// The phi is part of a multi-use reduction (e.g., used in FindLastIV + /// patterns for argmin/argmax). + /// TODO: Also support cases where the phi itself has a single use, but its + /// compare has multiple uses. + bool HasUsesOutsideReductionChain; public: /// Create a new VPReductionPHIRecipe for the reduction \p Phi. VPReductionPHIRecipe(PHINode *Phi, RecurKind Kind, VPValue &Start, - bool IsInLoop = false, bool IsOrdered = false, - unsigned VFScaleFactor = 1) + ReductionStyle Style, + bool HasUsesOutsideReductionChain = false) : VPHeaderPHIRecipe(VPDef::VPReductionPHISC, Phi, &Start), Kind(Kind), - IsInLoop(IsInLoop), IsOrdered(IsOrdered), VFScaleFactor(VFScaleFactor) { - assert((!IsOrdered || IsInLoop) && "IsOrdered requires IsInLoop"); - } + Style(Style), + HasUsesOutsideReductionChain(HasUsesOutsideReductionChain) {} ~VPReductionPHIRecipe() override = default; VPReductionPHIRecipe *clone() override { auto *R = new VPReductionPHIRecipe( dyn_cast_or_null(getUnderlyingValue()), getRecurrenceKind(), - *getOperand(0), IsInLoop, IsOrdered, VFScaleFactor); + *getOperand(0), Style, HasUsesOutsideReductionChain); R->addOperand(getBackedgeValue()); return R; } @@ -2435,8 +2465,12 @@ class VPReductionPHIRecipe : public VPHeaderPHIRecipe, /// Generate the phi/select nodes. void execute(VPTransformState &State) override; - /// Get the factor that the VF of this recipe's output should be scaled by. - unsigned getVFScaleFactor() const { return VFScaleFactor; } + /// Get the factor that the VF of this recipe's output should be scaled by, or + /// 1 if it isn't scaled. + unsigned getVFScaleFactor() const { + auto *Partial = std::get_if(&Style); + return Partial ? Partial->VFScaleFactor : 1; + } /// Returns the number of incoming values, also number of incoming blocks. /// Note that at the moment, VPWidenPointerInductionRecipe only has a single @@ -2447,10 +2481,21 @@ class VPReductionPHIRecipe : public VPHeaderPHIRecipe, RecurKind getRecurrenceKind() const { return Kind; } /// Returns true, if the phi is part of an ordered reduction. - bool isOrdered() const { return IsOrdered; } + bool isOrdered() const { return std::holds_alternative(Style); } - /// Returns true, if the phi is part of an in-loop reduction. - bool isInLoop() const { return IsInLoop; } + /// Returns true if the phi is part of an in-loop reduction. + bool isInLoop() const { + return std::holds_alternative(Style) || + std::holds_alternative(Style); + } + + /// Returns true if the reduction outputs a vector with a scaled down VF. + bool isPartialReduction() const { return getVFScaleFactor() > 1; } + + /// Returns true, if the phi is part of a multi-use reduction. + bool hasUsesOutsideReductionChain() const { + return HasUsesOutsideReductionChain; + } /// Returns true if the recipe only uses the first lane of operand \p Op. bool usesFirstLaneOnly(const VPValue *Op) const override { @@ -2732,23 +2777,25 @@ class LLVM_ABI_FOR_TEST VPInterleaveEVLRecipe final : public VPInterleaveBase { #endif }; -/// A recipe to represent inloop reduction operations, performing a reduction on -/// a vector operand into a scalar value, and adding the result to a chain. -/// The Operands are {ChainOp, VecOp, [Condition]}. +/// A recipe to represent inloop, ordered or partial reduction operations. It +/// performs a reduction on a vector operand into a scalar (vector in the case +/// of a partial reduction) value, and adds the result to a chain. The Operands +/// are {ChainOp, VecOp, [Condition]}. class LLVM_ABI_FOR_TEST VPReductionRecipe : public VPRecipeWithIRFlags { + /// The recurrence kind for the reduction in question. RecurKind RdxKind; - bool IsOrdered; /// Whether the reduction is conditional. bool IsConditional = false; + ReductionStyle Style; protected: VPReductionRecipe(const unsigned char SC, RecurKind RdxKind, FastMathFlags FMFs, Instruction *I, ArrayRef Operands, VPValue *CondOp, - bool IsOrdered, DebugLoc DL) + ReductionStyle Style, DebugLoc DL) : VPRecipeWithIRFlags(SC, Operands, FMFs, DL), RdxKind(RdxKind), - IsOrdered(IsOrdered) { + Style(Style) { if (CondOp) { IsConditional = true; addOperand(CondOp); @@ -2759,30 +2806,29 @@ class LLVM_ABI_FOR_TEST VPReductionRecipe : public VPRecipeWithIRFlags { public: VPReductionRecipe(RecurKind RdxKind, FastMathFlags FMFs, Instruction *I, VPValue *ChainOp, VPValue *VecOp, VPValue *CondOp, - bool IsOrdered, DebugLoc DL = DebugLoc::getUnknown()) + ReductionStyle Style, DebugLoc DL = DebugLoc::getUnknown()) : VPReductionRecipe(VPDef::VPReductionSC, RdxKind, FMFs, I, - ArrayRef({ChainOp, VecOp}), CondOp, - IsOrdered, DL) {} + ArrayRef({ChainOp, VecOp}), CondOp, Style, + DL) {} VPReductionRecipe(const RecurKind RdxKind, FastMathFlags FMFs, VPValue *ChainOp, VPValue *VecOp, VPValue *CondOp, - bool IsOrdered, DebugLoc DL = DebugLoc::getUnknown()) + ReductionStyle Style, DebugLoc DL = DebugLoc::getUnknown()) : VPReductionRecipe(VPDef::VPReductionSC, RdxKind, FMFs, nullptr, - ArrayRef({ChainOp, VecOp}), CondOp, - IsOrdered, DL) {} + ArrayRef({ChainOp, VecOp}), CondOp, Style, + DL) {} ~VPReductionRecipe() override = default; VPReductionRecipe *clone() override { return new VPReductionRecipe(RdxKind, getFastMathFlags(), getUnderlyingInstr(), getChainOp(), getVecOp(), - getCondOp(), IsOrdered, getDebugLoc()); + getCondOp(), Style, getDebugLoc()); } static inline bool classof(const VPRecipeBase *R) { return R->getVPDefID() == VPRecipeBase::VPReductionSC || - R->getVPDefID() == VPRecipeBase::VPReductionEVLSC || - R->getVPDefID() == VPRecipeBase::VPPartialReductionSC; + R->getVPDefID() == VPRecipeBase::VPReductionEVLSC; } static inline bool classof(const VPUser *U) { @@ -2809,9 +2855,16 @@ class LLVM_ABI_FOR_TEST VPReductionRecipe : public VPRecipeWithIRFlags { /// Return the recurrence kind for the in-loop reduction. RecurKind getRecurrenceKind() const { return RdxKind; } /// Return true if the in-loop reduction is ordered. - bool isOrdered() const { return IsOrdered; }; + bool isOrdered() const { return std::holds_alternative(Style); }; /// Return true if the in-loop reduction is conditional. bool isConditional() const { return IsConditional; }; + /// Returns true if the reduction outputs a vector with a scaled down VF. + bool isPartialReduction() const { return getVFScaleFactor() > 1; } + /// Returns true if the reduction is in-loop. + bool isInLoop() const { + return std::holds_alternative(Style) || + std::holds_alternative(Style); + } /// The VPValue of the scalar Chain being accumulated. VPValue *getChainOp() const { return getOperand(0); } /// The VPValue of the vector value to be reduced. @@ -2820,69 +2873,12 @@ class LLVM_ABI_FOR_TEST VPReductionRecipe : public VPRecipeWithIRFlags { VPValue *getCondOp() const { return isConditional() ? getOperand(getNumOperands() - 1) : nullptr; } - -protected: -#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) - /// Print the recipe. - void printRecipe(raw_ostream &O, const Twine &Indent, - VPSlotTracker &SlotTracker) const override; -#endif -}; - -/// A recipe for forming partial reductions. In the loop, an accumulator and -/// vector operand are added together and passed to the next iteration as the -/// next accumulator. After the loop body, the accumulator is reduced to a -/// scalar value. -class VPPartialReductionRecipe : public VPReductionRecipe { - unsigned Opcode; - - /// The divisor by which the VF of this recipe's output should be divided - /// during execution. - unsigned VFScaleFactor; - -public: - VPPartialReductionRecipe(Instruction *ReductionInst, VPValue *Op0, - VPValue *Op1, VPValue *Cond, unsigned VFScaleFactor) - : VPPartialReductionRecipe(ReductionInst->getOpcode(), Op0, Op1, Cond, - VFScaleFactor, ReductionInst) {} - VPPartialReductionRecipe(unsigned Opcode, VPValue *Op0, VPValue *Op1, - VPValue *Cond, unsigned ScaleFactor, - Instruction *ReductionInst = nullptr) - : VPReductionRecipe(VPDef::VPPartialReductionSC, RecurKind::Add, - FastMathFlags(), ReductionInst, - ArrayRef({Op0, Op1}), Cond, false, {}), - Opcode(Opcode), VFScaleFactor(ScaleFactor) { - [[maybe_unused]] auto *AccumulatorRecipe = - getChainOp()->getDefiningRecipe(); - // When cloning as part of a VPExpressionRecipe the chain op could have - // replaced by a temporary VPValue, so it doesn't have a defining recipe. - assert((!AccumulatorRecipe || - isa(AccumulatorRecipe) || - isa(AccumulatorRecipe)) && - "Unexpected operand order for partial reduction recipe"); - } - ~VPPartialReductionRecipe() override = default; - - VPPartialReductionRecipe *clone() override { - return new VPPartialReductionRecipe(Opcode, getOperand(0), getOperand(1), - getCondOp(), VFScaleFactor, - getUnderlyingInstr()); - } - - VP_CLASSOF_IMPL(VPDef::VPPartialReductionSC) - - /// Generate the reduction in the loop. - void execute(VPTransformState &State) override; - - /// Return the cost of this VPPartialReductionRecipe. - InstructionCost computeCost(ElementCount VF, - VPCostContext &Ctx) const override; - - /// Get the binary op's opcode. - unsigned getOpcode() const { return Opcode; } - - /// Get the factor that the VF of this recipe's output should be scaled by. - unsigned getVFScaleFactor() const { return VFScaleFactor; } + /// Get the factor that the VF of this recipe's output should be scaled by, or + /// 1 if it isn't scaled. + unsigned getVFScaleFactor() const { + auto *Partial = std::get_if(&Style); + return Partial ? Partial->VFScaleFactor : 1; + } protected: #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) @@ -2905,7 +2901,7 @@ class LLVM_ABI_FOR_TEST VPReductionEVLRecipe : public VPReductionRecipe { R.getFastMathFlags(), cast_or_null(R.getUnderlyingValue()), ArrayRef({R.getChainOp(), R.getVecOp(), &EVL}), CondOp, - R.isOrdered(), DL) {} + getReductionStyle(/*InLoop=*/true, R.isOrdered(), 1), DL) {} ~VPReductionEVLRecipe() override = default; @@ -3173,7 +3169,7 @@ class VPExpressionRecipe : public VPSingleDefRecipe { void decompose(); unsigned getVFScaleFactor() const { - auto *PR = dyn_cast(ExpressionRecipes.back()); + auto *PR = dyn_cast(ExpressionRecipes.back()); return PR ? PR->getVFScaleFactor() : 1; } diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp index 80a2e4bc3f754..ea38a8b16ebc7 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp @@ -11,6 +11,7 @@ #include "VPlanCFG.h" #include "VPlanDominatorTree.h" #include "VPlanHelpers.h" +#include "VPlanPatternMatch.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/TypeSwitch.h" #include "llvm/Analysis/ScalarEvolution.h" @@ -19,6 +20,7 @@ #include "llvm/IR/PatternMatch.h" using namespace llvm; +using namespace VPlanPatternMatch; #define DEBUG_TYPE "vplan" @@ -115,6 +117,7 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPInstruction *R) { case VPInstruction::ExtractLane: return inferScalarType(R->getOperand(1)); case VPInstruction::FirstActiveLane: + case VPInstruction::LastActiveLane: return Type::getIntNTy(Ctx, 64); case VPInstruction::ExtractLastElement: case VPInstruction::ExtractLastLanePerPart: @@ -288,10 +291,10 @@ Type *VPTypeAnalysis::inferScalarType(const VPValue *V) { [](const auto *R) { return R->getScalarType(); }) .Case([this](const VPRecipeBase *R) { - return inferScalarType(R->getOperand(0)); - }) + VPVectorEndPointerRecipe, VPWidenCanonicalIVRecipe>( + [this](const VPRecipeBase *R) { + return inferScalarType(R->getOperand(0)); + }) // VPInstructionWithType must be handled before VPInstruction. .Case( @@ -326,8 +329,7 @@ void llvm::collectEphemeralRecipesForVPlan( vp_depth_first_deep(Plan.getVectorLoopRegion()->getEntry()))) { for (VPRecipeBase &R : *VPBB) { auto *RepR = dyn_cast(&R); - if (!RepR || !match(RepR->getUnderlyingInstr(), - PatternMatch::m_Intrinsic())) + if (!RepR || !match(RepR, m_Intrinsic())) continue; Worklist.push_back(RepR); EphRecipes.insert(RepR); @@ -561,11 +563,12 @@ SmallVector llvm::calculateRegisterUsageForPlan( // fewer lanes than the VF. unsigned ScaleFactor = vputils::getVFScaleFactor(VPV->getDefiningRecipe()); - ElementCount VF = VFs[J].divideCoefficientBy(ScaleFactor); - LLVM_DEBUG(if (VF != VFs[J]) { - dbgs() << "LV(REG): Scaled down VF from " << VFs[J] << " to " << VF - << " for " << *R << "\n"; - }); + ElementCount VF = VFs[J]; + if (ScaleFactor > 1) { + VF = VFs[J].divideCoefficientBy(ScaleFactor); + LLVM_DEBUG(dbgs() << "LV(REG): Scaled down VF from " << VFs[J] + << " to " << VF << " for " << *R << "\n";); + } Type *ScalarTy = TypeInfo.inferScalarType(VPV); unsigned ClassID = TTI.getRegisterClassForType(true, ScalarTy); diff --git a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp index 5fbd61a929fe2..329b62cee4fce 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp @@ -22,6 +22,7 @@ #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/MDBuilder.h" +#include "llvm/Transforms/Utils/LoopUtils.h" #include "llvm/Transforms/Utils/LoopVersioning.h" #define DEBUG_TYPE "vplan" @@ -827,15 +828,18 @@ void VPlanTransforms::addMinimumVectorEpilogueIterationCheck( Branch->setMetadata(LLVMContext::MD_prof, BranchWeights); } -/// If \p RedPhiR is used by a ComputeReductionResult recipe, return it. -/// Otherwise return nullptr. -static VPInstruction * -findComputeReductionResult(VPReductionPHIRecipe *RedPhiR) { - auto It = find_if(RedPhiR->users(), [](VPUser *U) { - auto *VPI = dyn_cast(U); - return VPI && VPI->getOpcode() == VPInstruction::ComputeReductionResult; - }); - return It == RedPhiR->user_end() ? nullptr : cast(*It); +/// If \p V is used by a recipe matching pattern \p P, return it. Otherwise +/// return nullptr; +template +static VPRecipeBase *findUserOf(VPValue *V, const MatchT &P) { + auto It = find_if(V->users(), match_fn(P)); + return It == V->user_end() ? nullptr : cast(*It); +} + +/// If \p V is used by a VPInstruction with \p Opcode, return it. Otherwise +/// return nullptr. +template static VPInstruction *findUserOf(VPValue *V) { + return cast_or_null(findUserOf(V, m_VPInstruction())); } bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) { @@ -845,23 +849,13 @@ bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) { if (!MinMaxR) return nullptr; - auto *RepR = dyn_cast(MinMaxR); - if (!isa(MinMaxR) && - !(RepR && isa(RepR->getUnderlyingInstr()))) + // Check that MinMaxR is a VPWidenIntrinsicRecipe or VPReplicateRecipe + // with an intrinsic that matches the reduction kind. + Intrinsic::ID ExpectedIntrinsicID = + getMinMaxReductionIntrinsicOp(RedPhiR->getRecurrenceKind()); + if (!match(MinMaxR, m_Intrinsic(ExpectedIntrinsicID))) return nullptr; -#ifndef NDEBUG - Intrinsic::ID RdxIntrinsicId = - RedPhiR->getRecurrenceKind() == RecurKind::FMaxNum ? Intrinsic::maxnum - : Intrinsic::minnum; - assert(((isa(MinMaxR) && - cast(MinMaxR)->getVectorIntrinsicID() == - RdxIntrinsicId) || - (RepR && cast(RepR->getUnderlyingInstr()) - ->getIntrinsicID() == RdxIntrinsicId)) && - "Intrinsic did not match recurrence kind"); -#endif - if (MinMaxR->getOperand(0) == RedPhiR) return MinMaxR->getOperand(1); @@ -942,7 +936,8 @@ bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) { // If we exit early due to NaNs, compute the final reduction result based on // the reduction phi at the beginning of the last vector iteration. - auto *RdxResult = findComputeReductionResult(RedPhiR); + auto *RdxResult = + findUserOf(RedPhiR); auto *NewSel = MiddleBuilder.createSelect(AnyNaNLane, RedPhiR, RdxResult->getOperand(1)); @@ -1001,3 +996,155 @@ bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) { MiddleTerm->setOperand(0, NewCond); return true; } + +bool VPlanTransforms::handleMultiUseReductions(VPlan &Plan) { + for (auto &PhiR : make_early_inc_range( + Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis())) { + auto *MinMaxPhiR = dyn_cast(&PhiR); + // TODO: check for multi-uses in VPlan directly. + if (!MinMaxPhiR || !MinMaxPhiR->hasUsesOutsideReductionChain()) + continue; + + // MinMaxPhiR has users outside the reduction cycle in the loop. Check if + // the only other user is a FindLastIV reduction. MinMaxPhiR must have + // exactly 3 users: 1) the min/max operation, the compare of a FindLastIV + // reduction and ComputeReductionResult. The comparisom must compare + // MinMaxPhiR against the min/max operand used for the min/max reduction + // and only be used by the select of the FindLastIV reduction. + RecurKind RdxKind = MinMaxPhiR->getRecurrenceKind(); + assert( + RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind) && + "only min/max recurrences support users outside the reduction chain"); + + auto *MinMaxOp = + dyn_cast(MinMaxPhiR->getBackedgeValue()); + if (!MinMaxOp) + return false; + + // Check that MinMaxOp is a VPWidenIntrinsicRecipe or VPReplicateRecipe + // with an intrinsic that matches the reduction kind. + Intrinsic::ID ExpectedIntrinsicID = getMinMaxReductionIntrinsicOp(RdxKind); + if (!match(MinMaxOp, m_Intrinsic(ExpectedIntrinsicID))) + return false; + + // MinMaxOp must have 2 users: 1) MinMaxPhiR and 2) ComputeReductionResult + // (asserted below). + assert(MinMaxOp->getNumUsers() == 2 && + "MinMaxOp must have exactly 2 users"); + VPValue *MinMaxOpValue = MinMaxOp->getOperand(0); + if (MinMaxOpValue == MinMaxPhiR) + MinMaxOpValue = MinMaxOp->getOperand(1); + + VPValue *CmpOpA; + VPValue *CmpOpB; + CmpPredicate Pred; + auto *Cmp = dyn_cast_or_null(findUserOf( + MinMaxPhiR, m_Cmp(Pred, m_VPValue(CmpOpA), m_VPValue(CmpOpB)))); + if (!Cmp || Cmp->getNumUsers() != 1 || + (CmpOpA != MinMaxOpValue && CmpOpB != MinMaxOpValue)) + return false; + + if (MinMaxOpValue != CmpOpB) + Pred = CmpInst::getSwappedPredicate(Pred); + + // MinMaxPhiR must have exactly 3 users: + // * MinMaxOp, + // * Cmp (that's part of a FindLastIV chain), + // * ComputeReductionResult. + if (MinMaxPhiR->getNumUsers() != 3) + return false; + + VPInstruction *MinMaxResult = + findUserOf(MinMaxPhiR); + assert(is_contained(MinMaxPhiR->users(), MinMaxOp) && + "one user must be MinMaxOp"); + assert(MinMaxResult && "MinMaxResult must be a user of MinMaxPhiR"); + assert(is_contained(MinMaxOp->users(), MinMaxResult) && + "MinMaxResult must be a user of MinMaxOp (and of MinMaxPhiR"); + + // Cmp must be used by the select of a FindLastIV chain. + VPValue *Sel = dyn_cast(Cmp->getSingleUser()); + VPValue *IVOp, *FindIV; + if (!Sel || Sel->getNumUsers() != 2 || + !match(Sel, + m_Select(m_Specific(Cmp), m_VPValue(IVOp), m_VPValue(FindIV)))) + return false; + + if (!isa(FindIV)) { + std::swap(FindIV, IVOp); + Pred = CmpInst::getInversePredicate(Pred); + } + + auto *FindIVPhiR = dyn_cast(FindIV); + if (!FindIVPhiR || !RecurrenceDescriptor::isFindLastIVRecurrenceKind( + FindIVPhiR->getRecurrenceKind())) + return false; + + // TODO: Support cases where IVOp is the IV increment. + if (!match(IVOp, m_TruncOrSelf(m_VPValue(IVOp))) || + !isa(IVOp)) + return false; + + CmpInst::Predicate RdxPredicate = [RdxKind]() { + switch (RdxKind) { + case RecurKind::UMin: + return CmpInst::ICMP_UGE; + case RecurKind::UMax: + return CmpInst::ICMP_ULE; + case RecurKind::SMax: + return CmpInst::ICMP_SLE; + case RecurKind::SMin: + return CmpInst::ICMP_SGE; + default: + llvm_unreachable("unhandled recurrence kind"); + } + }(); + + // TODO: Strict predicates need to find the first IV value for which the + // predicate holds, not the last. + if (Pred != RdxPredicate) + return false; + + assert(!FindIVPhiR->isInLoop() && !FindIVPhiR->isOrdered() && + "cannot handle inloop/ordered reductions yet"); + + // The reduction using MinMaxPhiR needs adjusting to compute the correct + // result: + // 1. We need to find the last IV for which the condition based on the + // min/max recurrence is true, + // 2. Compare the partial min/max reduction result to its final value and, + // 3. Select the lanes of the partial FindLastIV reductions which + // correspond to the lanes matching the min/max reduction result. + // + // For example, this transforms + // vp<%min.result> = compute-reduction-result ir<%min.val>, + // ir<%min.val.next> + // vp<%find.iv.result = compute-find-iv-result ir<%min.idx>, ir<0>, + // SENTINEL, vp<%min.idx.next> + // + // into: + // + // vp = compute-reduction-result ir<%min.val>, ir<%min.val.next> + // vp<%final.min.cmp> = icmp eq ir<%min.val.next>, vp + // vp<%final.iv> = select vp<%final.min.cmp>, ir<%min.idx.next>, SENTINEL + // vp<%find.iv.result> = compute-find-iv-result ir<%min.idx>, ir<0>, + // SENTINEL, vp<%final.iv> + VPInstruction *FindIVResult = + findUserOf(FindIVPhiR); + assert(FindIVResult->getParent() == MinMaxResult->getParent() && + "both results must be computed in the same block"); + MinMaxResult->moveBefore(*FindIVResult->getParent(), + FindIVResult->getIterator()); + + VPBuilder B(FindIVResult); + VPValue *MinMaxExiting = MinMaxResult->getOperand(1); + auto *FinalMinMaxCmp = + B.createICmp(CmpInst::ICMP_EQ, MinMaxExiting, MinMaxResult); + VPValue *Sentinel = FindIVResult->getOperand(2); + VPValue *LastIVExiting = FindIVResult->getOperand(3); + auto *FinalIVSelect = + B.createSelect(FinalMinMaxCmp, LastIVExiting, Sentinel); + FindIVResult->setOperand(3, FinalIVSelect); + } + return true; +} diff --git a/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h b/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h index 91a392cccc1e3..750ef8edd94bb 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h +++ b/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h @@ -398,12 +398,24 @@ m_ExtractElement(const Op0_t &Op0, const Op1_t &Op1) { return m_VPInstruction(Op0, Op1); } +template +inline VPInstruction_match +m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1) { + return m_VPInstruction(Op0, Op1); +} + template inline VPInstruction_match m_ExtractLastLanePerPart(const Op0_t &Op0) { return m_VPInstruction(Op0); } +template +inline VPInstruction_match +m_ExtractPenultimateElement(const Op0_t &Op0) { + return m_VPInstruction(Op0); +} + template inline VPInstruction_match m_ActiveLaneMask(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) { @@ -436,6 +448,16 @@ m_FirstActiveLane(const Op0_t &Op0) { return m_VPInstruction(Op0); } +template +inline VPInstruction_match +m_LastActiveLane(const Op0_t &Op0) { + return m_VPInstruction(Op0); +} + +inline VPInstruction_match m_StepVector() { + return m_VPInstruction(); +} + template inline AllRecipe_match m_Unary(const Op0_t &Op0) { return AllRecipe_match(Op0); @@ -446,6 +468,12 @@ inline AllRecipe_match m_Trunc(const Op0_t &Op0) { return m_Unary(Op0); } +template +inline match_combine_or, Op0_t> +m_TruncOrSelf(const Op0_t &Op0) { + return m_CombineOr(m_Trunc(Op0), Op0); +} + template inline AllRecipe_match m_ZExt(const Op0_t &Op0) { return m_Unary(Op0); @@ -834,6 +862,11 @@ template inline IntrinsicID_match m_Intrinsic() { return IntrinsicID_match(IntrID); } +/// Match intrinsic calls with a runtime intrinsic ID. +inline IntrinsicID_match m_Intrinsic(Intrinsic::ID IntrID) { + return IntrinsicID_match(IntrID); +} + template inline typename m_Intrinsic_Ty::Ty m_Intrinsic(const T0 &Op0) { return m_CombineAnd(m_Intrinsic(), m_Argument<0>(Op0)); diff --git a/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp b/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp index fb17d5dd62b9d..3579af21d8b07 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp @@ -44,11 +44,6 @@ class VPPredicator { /// possibly inserting new recipes at \p Dst (using Builder's insertion point) VPValue *createEdgeMask(VPBasicBlock *Src, VPBasicBlock *Dst); - /// Returns the *entry* mask for \p VPBB. - VPValue *getBlockInMask(VPBasicBlock *VPBB) const { - return BlockMaskCache.lookup(VPBB); - } - /// Record \p Mask as the *entry* mask of \p VPBB, which is expected to not /// already have a mask. void setBlockInMask(VPBasicBlock *VPBB, VPValue *Mask) { @@ -68,6 +63,11 @@ class VPPredicator { } public: + /// Returns the *entry* mask for \p VPBB. + VPValue *getBlockInMask(VPBasicBlock *VPBB) const { + return BlockMaskCache.lookup(VPBB); + } + /// Returns the precomputed predicate of the edge from \p Src to \p Dst. VPValue *getEdgeMask(const VPBasicBlock *Src, const VPBasicBlock *Dst) const { return EdgeMaskCache.lookup({Src, Dst}); @@ -301,5 +301,34 @@ VPlanTransforms::introduceMasksAndLinearize(VPlan &Plan, bool FoldTail) { PrevVPBB = VPBB; } + + // If we folded the tail and introduced a header mask, any extract of the + // last element must be updated to extract from the last active lane of the + // header mask instead (i.e., the lane corresponding to the last active + // iteration). + if (FoldTail) { + assert(Plan.getExitBlocks().size() == 1 && + "only a single-exit block is supported currently"); + VPBasicBlock *EB = Plan.getExitBlocks().front(); + assert(EB->getSinglePredecessor() == Plan.getMiddleBlock() && + "the exit block must have middle block as single predecessor"); + + VPBuilder B(Plan.getMiddleBlock()->getTerminator()); + for (auto &P : EB->phis()) { + auto *ExitIRI = cast(&P); + VPValue *Inc = ExitIRI->getIncomingValue(0); + VPValue *Op; + if (!match(Inc, m_ExtractLastElement(m_VPValue(Op)))) + continue; + + // Compute the index of the last active lane. + VPValue *HeaderMask = Predicator.getBlockInMask(Header); + VPValue *LastActiveLane = + B.createNaryOp(VPInstruction::LastActiveLane, HeaderMask); + auto *Ext = + B.createNaryOp(VPInstruction::ExtractLane, {LastActiveLane, Op}); + Inc->replaceAllUsesWith(Ext); + } + } return Predicator.getBlockMaskCache(); } diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index e41f67103e096..0baf7172e4443 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -180,7 +180,6 @@ bool VPRecipeBase::mayHaveSideEffects() const { return cast(this)->mayHaveSideEffects(); case VPBlendSC: case VPReductionEVLSC: - case VPPartialReductionSC: case VPReductionSC: case VPScalarIVStepsSC: case VPVectorPointerSC: @@ -314,134 +313,6 @@ bool VPRecipeBase::isScalarCast() const { return VPI && Instruction::isCast(VPI->getOpcode()); } -InstructionCost -VPPartialReductionRecipe::computeCost(ElementCount VF, - VPCostContext &Ctx) const { - std::optional Opcode; - VPValue *Op = getVecOp(); - uint64_t MulConst; - - InstructionCost CondCost = 0; - if (isConditional()) { - CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; - auto *VecTy = Ctx.Types.inferScalarType(Op); - auto *CondTy = Ctx.Types.inferScalarType(getCondOp()); - CondCost = Ctx.TTI.getCmpSelInstrCost(Instruction::Select, VecTy, CondTy, - Pred, Ctx.CostKind); - } - - // If the partial reduction is predicated, a select will be operand 1. - // If it isn't predicated and the mul isn't operating on a constant, then it - // should have been turned into a VPExpressionRecipe. - // FIXME: Replace the entire function with this once all partial reduction - // variants are bundled into VPExpressionRecipe. - if (!match(Op, m_Mul(m_VPValue(), m_ConstantInt(MulConst)))) { - auto *PhiType = Ctx.Types.inferScalarType(getChainOp()); - auto *InputType = Ctx.Types.inferScalarType(getVecOp()); - return CondCost + Ctx.TTI.getPartialReductionCost( - getOpcode(), InputType, InputType, PhiType, VF, - TTI::PR_None, TTI::PR_None, {}, Ctx.CostKind); - } - - VPRecipeBase *OpR = Op->getDefiningRecipe(); - Type *InputTypeA = nullptr, *InputTypeB = nullptr; - TTI::PartialReductionExtendKind ExtAType = TTI::PR_None, - ExtBType = TTI::PR_None; - - auto GetExtendKind = [](VPRecipeBase *R) { - if (!R) - return TTI::PR_None; - auto *WidenCastR = dyn_cast(R); - if (!WidenCastR) - return TTI::PR_None; - if (WidenCastR->getOpcode() == Instruction::CastOps::ZExt) - return TTI::PR_ZeroExtend; - if (WidenCastR->getOpcode() == Instruction::CastOps::SExt) - return TTI::PR_SignExtend; - return TTI::PR_None; - }; - - // Pick out opcode, type/ext information and use sub side effects from a widen - // recipe. - auto HandleWiden = [&](VPWidenRecipe *Widen) { - if (match(Widen, m_Sub(m_ZeroInt(), m_VPValue(Op)))) { - Widen = dyn_cast(Op); - } - Opcode = Widen->getOpcode(); - VPRecipeBase *ExtAR = Widen->getOperand(0)->getDefiningRecipe(); - VPRecipeBase *ExtBR = Widen->getOperand(1)->getDefiningRecipe(); - InputTypeA = Ctx.Types.inferScalarType(ExtAR ? ExtAR->getOperand(0) - : Widen->getOperand(0)); - InputTypeB = Ctx.Types.inferScalarType(ExtBR ? ExtBR->getOperand(0) - : Widen->getOperand(1)); - ExtAType = GetExtendKind(ExtAR); - ExtBType = GetExtendKind(ExtBR); - - using namespace VPlanPatternMatch; - const APInt *C; - if (!ExtBR && match(Widen->getOperand(1), m_APInt(C)) && - canConstantBeExtended(C, InputTypeA, ExtAType)) { - InputTypeB = InputTypeA; - ExtBType = ExtAType; - } - }; - - if (isa(OpR)) { - InputTypeA = Ctx.Types.inferScalarType(OpR->getOperand(0)); - ExtAType = GetExtendKind(OpR); - } else if (isa(OpR)) { - if (auto RedPhiOp1R = dyn_cast_or_null(getOperand(1))) { - InputTypeA = Ctx.Types.inferScalarType(RedPhiOp1R->getOperand(0)); - ExtAType = GetExtendKind(RedPhiOp1R); - } else if (auto Widen = dyn_cast_or_null(getOperand(1))) - HandleWiden(Widen); - } else if (auto Widen = dyn_cast(OpR)) { - HandleWiden(Widen); - } else if (auto Reduction = dyn_cast(OpR)) { - return CondCost + Reduction->computeCost(VF, Ctx); - } - auto *PhiType = Ctx.Types.inferScalarType(getOperand(1)); - return CondCost + Ctx.TTI.getPartialReductionCost( - getOpcode(), InputTypeA, InputTypeB, PhiType, VF, - ExtAType, ExtBType, Opcode, Ctx.CostKind); - ; -} - -void VPPartialReductionRecipe::execute(VPTransformState &State) { - auto &Builder = State.Builder; - - assert(getOpcode() == Instruction::Add && - "Unhandled partial reduction opcode"); - - Value *BinOpVal = State.get(getVecOp()); - Value *PhiVal = State.get(getChainOp()); - assert(PhiVal && BinOpVal && "Phi and Mul must be set"); - - Type *RetTy = PhiVal->getType(); - - if (isConditional()) { - Value *Cond = State.get(getCondOp()); - Value *Zero = ConstantInt::get(BinOpVal->getType(), 0); - BinOpVal = Builder.CreateSelect(Cond, BinOpVal, Zero); - } - - CallInst *V = - Builder.CreateIntrinsic(RetTy, Intrinsic::vector_partial_reduce_add, - {PhiVal, BinOpVal}, nullptr, "partial.reduce"); - - State.set(this, V); -} - -#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) -void VPPartialReductionRecipe::printRecipe(raw_ostream &O, const Twine &Indent, - VPSlotTracker &SlotTracker) const { - O << Indent << "PARTIAL-REDUCE "; - printAsOperand(O, SlotTracker); - O << " = " << Instruction::getOpcodeName(getOpcode()) << " "; - printOperands(O, SlotTracker); -} -#endif - void VPIRFlags::intersectFlags(const VPIRFlags &Other) { assert(OpType == Other.OpType && "OpType must match"); switch (OpType) { @@ -569,7 +440,6 @@ unsigned VPInstruction::getNumOperandsForOpcode(unsigned Opcode) { case VPInstruction::ExtractLastElement: case VPInstruction::ExtractLastLanePerPart: case VPInstruction::ExtractPenultimateElement: - case VPInstruction::FirstActiveLane: case VPInstruction::Not: case VPInstruction::ResumeForEpilogue: case VPInstruction::Unpack: @@ -599,6 +469,8 @@ unsigned VPInstruction::getNumOperandsForOpcode(unsigned Opcode) { case Instruction::PHI: case Instruction::Switch: case VPInstruction::AnyOf: + case VPInstruction::FirstActiveLane: + case VPInstruction::LastActiveLane: case VPInstruction::SLPLoad: case VPInstruction::SLPStore: // Cannot determine the number of operands from the opcode. @@ -1184,6 +1056,29 @@ InstructionCost VPInstruction::computeCost(ElementCount VF, {PredTy, Type::getInt1Ty(Ctx.LLVMCtx)}); return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind); } + case VPInstruction::LastActiveLane: { + Type *ScalarTy = Ctx.Types.inferScalarType(getOperand(0)); + if (VF.isScalar()) + return Ctx.TTI.getCmpSelInstrCost(Instruction::ICmp, ScalarTy, + CmpInst::makeCmpResultType(ScalarTy), + CmpInst::ICMP_EQ, Ctx.CostKind); + // Calculate the cost of determining the lane index: NOT + cttz_elts + SUB. + auto *PredTy = toVectorTy(ScalarTy, VF); + IntrinsicCostAttributes Attrs(Intrinsic::experimental_cttz_elts, + Type::getInt64Ty(Ctx.LLVMCtx), + {PredTy, Type::getInt1Ty(Ctx.LLVMCtx)}); + InstructionCost Cost = Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind); + // Add cost of NOT operation on the predicate. + Cost += Ctx.TTI.getArithmeticInstrCost( + Instruction::Xor, PredTy, Ctx.CostKind, + {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None}, + {TargetTransformInfo::OK_UniformConstantValue, + TargetTransformInfo::OP_None}); + // Add cost of SUB operation on the index. + Cost += Ctx.TTI.getArithmeticInstrCost( + Instruction::Sub, Type::getInt64Ty(Ctx.LLVMCtx), Ctx.CostKind); + return Cost; + } case VPInstruction::FirstOrderRecurrenceSplice: { assert(VF.isVector() && "Scalar FirstOrderRecurrenceSplice?"); SmallVector Mask(VF.getKnownMinValue()); @@ -1238,6 +1133,7 @@ bool VPInstruction::isVectorToScalar() const { getOpcode() == Instruction::ExtractElement || getOpcode() == VPInstruction::ExtractLane || getOpcode() == VPInstruction::FirstActiveLane || + getOpcode() == VPInstruction::LastActiveLane || getOpcode() == VPInstruction::ComputeAnyOfResult || getOpcode() == VPInstruction::ComputeFindIVResult || getOpcode() == VPInstruction::ComputeReductionResult || @@ -1305,6 +1201,7 @@ bool VPInstruction::opcodeMayReadOrWriteFromMemory() const { case VPInstruction::ActiveLaneMask: case VPInstruction::ExplicitVectorLength: case VPInstruction::FirstActiveLane: + case VPInstruction::LastActiveLane: case VPInstruction::FirstOrderRecurrenceSplice: case VPInstruction::LogicalAnd: case VPInstruction::Not: @@ -1481,6 +1378,9 @@ void VPInstruction::printRecipe(raw_ostream &O, const Twine &Indent, case VPInstruction::FirstActiveLane: O << "first-active-lane"; break; + case VPInstruction::LastActiveLane: + O << "last-active-lane"; + break; case VPInstruction::ReductionStartVector: O << "reduction-start-vector"; break; @@ -2593,22 +2493,11 @@ void VPWidenGEPRecipe::printRecipe(raw_ostream &O, const Twine &Indent, } #endif -static Type *getGEPIndexTy(bool IsScalable, bool IsReverse, bool IsUnitStride, - unsigned CurrentPart, IRBuilderBase &Builder) { - // Use i32 for the gep index type when the value is constant, - // or query DataLayout for a more suitable index type otherwise. - const DataLayout &DL = Builder.GetInsertBlock()->getDataLayout(); - return !IsUnitStride || (IsScalable && (IsReverse || CurrentPart > 0)) - ? DL.getIndexType(Builder.getPtrTy(0)) - : Builder.getInt32Ty(); -} - void VPVectorEndPointerRecipe::execute(VPTransformState &State) { auto &Builder = State.Builder; unsigned CurrentPart = getUnrollPart(*this); - bool IsUnitStride = Stride == 1 || Stride == -1; - Type *IndexTy = getGEPIndexTy(State.VF.isScalable(), /*IsReverse*/ true, - IsUnitStride, CurrentPart, Builder); + const DataLayout &DL = Builder.GetInsertBlock()->getDataLayout(); + Type *IndexTy = DL.getIndexType(State.TypeAnalysis.inferScalarType(this)); // The wide store needs to start at the last vector element. Value *RunTimeVF = State.get(getVFValue(), VPLane(0)); @@ -2644,8 +2533,8 @@ void VPVectorEndPointerRecipe::printRecipe(raw_ostream &O, const Twine &Indent, void VPVectorPointerRecipe::execute(VPTransformState &State) { auto &Builder = State.Builder; unsigned CurrentPart = getUnrollPart(*this); - Type *IndexTy = getGEPIndexTy(State.VF.isScalable(), /*IsReverse*/ false, - /*IsUnitStride*/ true, CurrentPart, Builder); + const DataLayout &DL = Builder.GetInsertBlock()->getDataLayout(); + Type *IndexTy = DL.getIndexType(State.TypeAnalysis.inferScalarType(this)); Value *Ptr = State.get(getOperand(0), VPLane(0)); Value *Increment = createStepForVF(Builder, IndexTy, State.VF, CurrentPart); @@ -2706,7 +2595,6 @@ void VPBlendRecipe::printRecipe(raw_ostream &O, const Twine &Indent, void VPReductionRecipe::execute(VPTransformState &State) { assert(!State.Lane && "Reduction being replicated."); - Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ true); RecurKind Kind = getRecurrenceKind(); assert(!RecurrenceDescriptor::isAnyOfRecurrenceKind(Kind) && "In-loop AnyOf reductions aren't currently supported"); @@ -2728,7 +2616,8 @@ void VPReductionRecipe::execute(VPTransformState &State) { } Value *NewRed; Value *NextInChain; - if (IsOrdered) { + if (isOrdered()) { + Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ true); if (State.VF.isVector()) NewRed = createOrderedReduction(State.Builder, Kind, NewVecOp, PrevInChain); @@ -2738,8 +2627,18 @@ void VPReductionRecipe::execute(VPTransformState &State) { PrevInChain, NewVecOp); PrevInChain = NewRed; NextInChain = NewRed; + } else if (isPartialReduction()) { + assert(Kind == RecurKind::Add && "Unexpected partial reduction kind"); + Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ false); + NewRed = State.Builder.CreateIntrinsic( + PrevInChain->getType(), Intrinsic::vector_partial_reduce_add, + {PrevInChain, NewVecOp}, nullptr, "partial.reduce"); + PrevInChain = NewRed; + NextInChain = NewRed; } else { - PrevInChain = State.get(getChainOp(), /*IsScalar*/ true); + assert(isInLoop() && + "The reduction must either be ordered, partial or in-loop"); + Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ true); NewRed = createSimpleReduction(State.Builder, NewVecOp, Kind); if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) NextInChain = createMinMaxOp(State.Builder, Kind, NewRed, PrevInChain); @@ -2748,7 +2647,7 @@ void VPReductionRecipe::execute(VPTransformState &State) { (Instruction::BinaryOps)RecurrenceDescriptor::getOpcode(Kind), PrevInChain, NewRed); } - State.set(this, NextInChain, /*IsScalar*/ true); + State.set(this, NextInChain, /*IsScalar*/ !isPartialReduction()); } void VPReductionEVLRecipe::execute(VPTransformState &State) { @@ -2795,6 +2694,22 @@ InstructionCost VPReductionRecipe::computeCost(ElementCount VF, std::optional OptionalFMF = ElementTy->isFloatingPointTy() ? std::make_optional(FMFs) : std::nullopt; + if (isPartialReduction()) { + InstructionCost CondCost = 0; + if (isConditional()) { + CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; + auto *CondTy = cast( + toVectorTy(Ctx.Types.inferScalarType(getCondOp()), VF)); + CondCost = Ctx.TTI.getCmpSelInstrCost(Instruction::Select, VectorTy, + CondTy, Pred, Ctx.CostKind); + } + return CondCost + Ctx.TTI.getPartialReductionCost( + Opcode, ElementTy, ElementTy, ElementTy, VF, + TargetTransformInfo::PR_None, + TargetTransformInfo::PR_None, std::nullopt, + Ctx.CostKind); + } + // TODO: Support any-of reductions. assert( (!RecurrenceDescriptor::isAnyOfRecurrenceKind(RdxKind) || @@ -2900,7 +2815,9 @@ InstructionCost VPExpressionRecipe::computeCost(ElementCount VF, unsigned Opcode = RecurrenceDescriptor::getOpcode( cast(ExpressionRecipes[1])->getRecurrenceKind()); auto *ExtR = cast(ExpressionRecipes[0]); - return isa(ExpressionRecipes.back()) + + return cast(ExpressionRecipes.back()) + ->isPartialReduction() ? Ctx.TTI.getPartialReductionCost( Opcode, Ctx.Types.inferScalarType(getOperand(0)), nullptr, RedTy, VF, @@ -2920,7 +2837,8 @@ InstructionCost VPExpressionRecipe::computeCost(ElementCount VF, Opcode = Instruction::Sub; [[fallthrough]]; case ExpressionTypes::ExtMulAccReduction: { - if (isa(ExpressionRecipes.back())) { + auto *RedR = cast(ExpressionRecipes.back()); + if (RedR->isPartialReduction()) { auto *Ext0R = cast(ExpressionRecipes[0]); auto *Ext1R = cast(ExpressionRecipes[1]); auto *Mul = cast(ExpressionRecipes[2]); @@ -2959,8 +2877,8 @@ bool VPExpressionRecipe::mayHaveSideEffects() const { bool VPExpressionRecipe::isSingleScalar() const { // Cannot use vputils::isSingleScalar(), because all external operands // of the expression will be live-ins while bundled. - return isa(ExpressionRecipes.back()) && - !isa(ExpressionRecipes.back()); + auto *RR = dyn_cast(ExpressionRecipes.back()); + return RR && !RR->isPartialReduction(); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) @@ -2972,12 +2890,11 @@ void VPExpressionRecipe::printRecipe(raw_ostream &O, const Twine &Indent, O << " = "; auto *Red = cast(ExpressionRecipes.back()); unsigned Opcode = RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind()); - bool IsPartialReduction = isa(Red); switch (ExpressionType) { case ExpressionTypes::ExtendedReduction: { getOperand(1)->printAsOperand(O, SlotTracker); - O << " + " << (IsPartialReduction ? "partial." : "") << "reduce."; + O << " + " << (Red->isPartialReduction() ? "partial." : "") << "reduce."; O << Instruction::getOpcodeName(Opcode) << " ("; getOperand(0)->printAsOperand(O, SlotTracker); Red->printFlags(O); @@ -2994,7 +2911,7 @@ void VPExpressionRecipe::printRecipe(raw_ostream &O, const Twine &Indent, } case ExpressionTypes::ExtNegatedMulAccReduction: { getOperand(getNumOperands() - 1)->printAsOperand(O, SlotTracker); - O << " + " << (IsPartialReduction ? "partial." : "") << "reduce."; + O << " + " << (Red->isPartialReduction() ? "partial." : "") << "reduce."; O << Instruction::getOpcodeName( RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind())) << " (sub (0, mul"; @@ -3019,7 +2936,7 @@ void VPExpressionRecipe::printRecipe(raw_ostream &O, const Twine &Indent, case ExpressionTypes::MulAccReduction: case ExpressionTypes::ExtMulAccReduction: { getOperand(getNumOperands() - 1)->printAsOperand(O, SlotTracker); - O << " + " << (IsPartialReduction ? "partial." : "") << "reduce."; + O << " + " << (Red->isPartialReduction() ? "partial." : "") << "reduce."; O << Instruction::getOpcodeName( RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind())) << " ("; @@ -3056,7 +2973,10 @@ void VPExpressionRecipe::printRecipe(raw_ostream &O, const Twine &Indent, void VPReductionRecipe::printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const { - O << Indent << "REDUCE "; + if (isPartialReduction()) + O << Indent << "PARTIAL-REDUCE "; + else + O << Indent << "REDUCE "; printAsOperand(O, SlotTracker); O << " = "; getChainOp()->printAsOperand(O, SlotTracker); @@ -3580,18 +3500,24 @@ InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF, if (!vputils::isSingleScalar(getAddr())) PtrTy = toVectorTy(PtrTy, VF); + unsigned IID = isa(this) ? Intrinsic::masked_gather + : isa(this) ? Intrinsic::masked_scatter + : isa(this) ? Intrinsic::vp_gather + : Intrinsic::vp_scatter; return Ctx.TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, Ctx.CostKind) + - Ctx.TTI.getGatherScatterOpCost(Opcode, Ty, Ptr, IsMasked, Alignment, - Ctx.CostKind, &Ingredient); + Ctx.TTI.getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(IID, Ty, Ptr, IsMasked, Alignment, + &Ingredient), + Ctx.CostKind); } InstructionCost Cost = 0; if (IsMasked) { unsigned IID = isa(this) ? Intrinsic::masked_load : Intrinsic::masked_store; - Cost += - Ctx.TTI.getMaskedMemoryOpCost({IID, Ty, Alignment, AS}, Ctx.CostKind); + Cost += Ctx.TTI.getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(IID, Ty, Alignment, AS), Ctx.CostKind); } else { TTI::OperandValueInfo OpInfo = Ctx.getOperandInfo( isa(this) ? getOperand(0) @@ -3701,18 +3627,19 @@ InstructionCost VPWidenLoadEVLRecipe::computeCost(ElementCount VF, if (!Consecutive || IsMasked) return VPWidenMemoryRecipe::computeCost(VF, Ctx); - // We need to use the getMaskedMemoryOpCost() instead of getMemoryOpCost() + // We need to use the getMemIntrinsicInstrCost() instead of getMemoryOpCost() // here because the EVL recipes using EVL to replace the tail mask. But in the // legacy model, it will always calculate the cost of mask. - // TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we + // TODO: Using getMemoryOpCost() instead of getMemIntrinsicInstrCost when we // don't need to compare to the legacy cost model. Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF); unsigned AS = cast(Ctx.Types.inferScalarType(getAddr())) ->getAddressSpace(); // FIXME: getMaskedMemoryOpCost assumes masked_* intrinsics. // After migrating to getMemIntrinsicInstrCost, switch this to vp_load. - InstructionCost Cost = Ctx.TTI.getMaskedMemoryOpCost( - {Intrinsic::masked_load, Ty, Alignment, AS}, Ctx.CostKind); + InstructionCost Cost = Ctx.TTI.getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(Intrinsic::masked_load, Ty, Alignment, AS), + Ctx.CostKind); if (!Reverse) return Cost; @@ -3812,18 +3739,19 @@ InstructionCost VPWidenStoreEVLRecipe::computeCost(ElementCount VF, if (!Consecutive || IsMasked) return VPWidenMemoryRecipe::computeCost(VF, Ctx); - // We need to use the getMaskedMemoryOpCost() instead of getMemoryOpCost() + // We need to use the getMemIntrinsicInstrCost() instead of getMemoryOpCost() // here because the EVL recipes using EVL to replace the tail mask. But in the // legacy model, it will always calculate the cost of mask. - // TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we + // TODO: Using getMemoryOpCost() instead of getMemIntrinsicInstrCost when we // don't need to compare to the legacy cost model. Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF); unsigned AS = cast(Ctx.Types.inferScalarType(getAddr())) ->getAddressSpace(); // FIXME: getMaskedMemoryOpCost assumes masked_* intrinsics. // After migrating to getMemIntrinsicInstrCost, switch this to vp_store. - InstructionCost Cost = Ctx.TTI.getMaskedMemoryOpCost( - {Intrinsic::masked_store, Ty, Alignment, AS}, Ctx.CostKind); + InstructionCost Cost = Ctx.TTI.getMemIntrinsicInstrCost( + MemIntrinsicCostAttributes(Intrinsic::masked_store, Ty, Alignment, AS), + Ctx.CostKind); if (!Reverse) return Cost; @@ -4444,7 +4372,7 @@ void VPReductionPHIRecipe::execute(VPTransformState &State) { // this value when we vectorize all of the instructions that use the PHI. BasicBlock *VectorPH = State.CFG.VPBB2IRBB.at(getParent()->getCFGPredecessor(0)); - bool ScalarPHI = State.VF.isScalar() || IsInLoop; + bool ScalarPHI = State.VF.isScalar() || isInLoop(); Value *StartV = State.get(StartVPV, ScalarPHI); Type *VecTy = StartV->getType(); @@ -4453,7 +4381,7 @@ void VPReductionPHIRecipe::execute(VPTransformState &State) { "recipe must be in the vector loop header"); auto *Phi = PHINode::Create(VecTy, 2, "vec.phi"); Phi->insertBefore(HeaderBB->getFirstInsertionPt()); - State.set(this, Phi, IsInLoop); + State.set(this, Phi, isInLoop()); Phi->addIncoming(StartV, VectorPH); } @@ -4466,8 +4394,8 @@ void VPReductionPHIRecipe::printRecipe(raw_ostream &O, const Twine &Indent, printAsOperand(O, SlotTracker); O << " = phi "; printOperands(O, SlotTracker); - if (VFScaleFactor != 1) - O << " (VF scaled by 1/" << VFScaleFactor << ")"; + if (getVFScaleFactor() > 1) + O << " (VF scaled by 1/" << getVFScaleFactor() << ")"; } #endif diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 9174058baad65..b12f8ccc73c7e 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -139,6 +139,41 @@ bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes( return true; } +// Check if a load can be hoisted by verifying it doesn't alias with any stores +// in blocks between FirstBB and LastBB using scoped noalias metadata. +static bool canHoistLoadWithNoAliasCheck(VPReplicateRecipe *Load, + VPBasicBlock *FirstBB, + VPBasicBlock *LastBB) { + // Get the load's memory location and check if it aliases with any stores + // using scoped noalias metadata. + auto LoadLoc = vputils::getMemoryLocation(*Load); + if (!LoadLoc || !LoadLoc->AATags.Scope) + return false; + + const AAMDNodes &LoadAA = LoadLoc->AATags; + for (VPBlockBase *Block = FirstBB; Block; + Block = Block->getSingleSuccessor()) { + // This function assumes a simple linear chain of blocks. If there are + // multiple successors, we would need more complex analysis. + assert(Block->getNumSuccessors() <= 1 && + "Expected at most one successor in block chain"); + auto *VPBB = cast(Block); + for (VPRecipeBase &R : *VPBB) { + if (R.mayWriteToMemory()) { + auto Loc = vputils::getMemoryLocation(R); + // Bail out if we can't get the location or if the scoped noalias + // metadata indicates potential aliasing. + if (!Loc || ScopedNoAliasAAResult::mayAliasInScopes( + LoadAA.Scope, Loc->AATags.NoAlias)) + return false; + } + } + if (Block == LastBB) + break; + } + return true; +} + /// Return true if we do not know how to (mechanically) hoist or sink \p R out /// of a loop region. static bool cannotHoistOrSinkRecipe(const VPRecipeBase &R) { @@ -668,6 +703,23 @@ static SmallVector collectUsersRecursively(VPValue *V) { return Users.takeVector(); } +/// Scalarize a VPWidenPointerInductionRecipe by replacing it with a PtrAdd +/// (IndStart, ScalarIVSteps (0, Step)). This is used when the recipe only +/// generates scalar values. +static VPValue * +scalarizeVPWidenPointerInduction(VPWidenPointerInductionRecipe *PtrIV, + VPlan &Plan, VPBuilder &Builder) { + const InductionDescriptor &ID = PtrIV->getInductionDescriptor(); + VPValue *StartV = Plan.getConstantInt(ID.getStep()->getType(), 0); + VPValue *StepV = PtrIV->getOperand(1); + VPScalarIVStepsRecipe *Steps = createScalarIVSteps( + Plan, InductionDescriptor::IK_IntInduction, Instruction::Add, nullptr, + nullptr, StartV, StepV, PtrIV->getDebugLoc(), Builder); + + return Builder.createPtrAdd(PtrIV->getStartValue(), Steps, + PtrIV->getDebugLoc(), "next.gep"); +} + /// Legalize VPWidenPointerInductionRecipe, by replacing it with a PtrAdd /// (IndStart, ScalarIVSteps (0, Step)) if only its scalar values are used, as /// VPWidenPointerInductionRecipe will generate vectors only. If some users @@ -720,16 +772,7 @@ static void legalizeAndOptimizeInductions(VPlan &Plan) { if (!PtrIV->onlyScalarsGenerated(Plan.hasScalableVF())) continue; - const InductionDescriptor &ID = PtrIV->getInductionDescriptor(); - VPValue *StartV = Plan.getConstantInt(ID.getStep()->getType(), 0); - VPValue *StepV = PtrIV->getOperand(1); - VPScalarIVStepsRecipe *Steps = createScalarIVSteps( - Plan, InductionDescriptor::IK_IntInduction, Instruction::Add, nullptr, - nullptr, StartV, StepV, PtrIV->getDebugLoc(), Builder); - - VPValue *PtrAdd = Builder.createPtrAdd(PtrIV->getStartValue(), Steps, - PtrIV->getDebugLoc(), "next.gep"); - + VPValue *PtrAdd = scalarizeVPWidenPointerInduction(PtrIV, Plan, Builder); PtrIV->replaceAllUsesWith(PtrAdd); continue; } @@ -826,8 +869,8 @@ static VPValue *optimizeEarlyExitInductionUser(VPlan &Plan, VPValue *Op, ScalarEvolution &SE) { VPValue *Incoming, *Mask; - if (!match(Op, m_VPInstruction( - m_FirstActiveLane(m_VPValue(Mask)), m_VPValue(Incoming)))) + if (!match(Op, m_ExtractLane(m_FirstActiveLane(m_VPValue(Mask)), + m_VPValue(Incoming)))) return nullptr; auto *WideIV = getOptimizableIVOf(Incoming, SE); @@ -1327,8 +1370,7 @@ static void simplifyRecipe(VPSingleDefRecipe *Def, VPTypeAnalysis &TypeInfo) { } // Look through ExtractPenultimateElement (BuildVector ....). - if (match(Def, m_VPInstruction( - m_BuildVector()))) { + if (match(Def, m_ExtractPenultimateElement(m_BuildVector()))) { auto *BuildVector = cast(Def->getOperand(0)); Def->replaceAllUsesWith( BuildVector->getOperand(BuildVector->getNumOperands() - 2)); @@ -1480,32 +1522,36 @@ static void narrowToSingleScalarRecipes(VPlan &Plan) { continue; } - // Skip recipes that aren't single scalars or don't have only their - // scalar results used. In the latter case, we would introduce extra - // broadcasts. - if (!vputils::isSingleScalar(RepOrWidenR) || - !all_of(RepOrWidenR->users(), [RepOrWidenR](const VPUser *U) { - if (auto *Store = dyn_cast(U)) { - // VPWidenStore doesn't have users, and stores are always - // profitable to widen: hence, permitting address and mask - // operands, and single-scalar stored values is an important leaf - // condition. The assert must hold as we checked the RepOrWidenR - // operand against vputils::isSingleScalar. - assert(RepOrWidenR != Store->getStoredValue() || - vputils::isSingleScalar(Store->getStoredValue())); - (void)Store; - return true; - } - - if (auto *VPI = dyn_cast(U)) { - unsigned Opcode = VPI->getOpcode(); - if (Opcode == VPInstruction::ExtractLastElement || - Opcode == VPInstruction::ExtractLastLanePerPart || - Opcode == VPInstruction::ExtractPenultimateElement) - return true; - } - - return U->usesScalars(RepOrWidenR); + // Skip recipes that aren't single scalars. + if (!vputils::isSingleScalar(RepOrWidenR)) + continue; + + // Skip recipes for which conversion to single-scalar does introduce + // additional broadcasts. No extra broadcasts are needed, if either only + // the scalars of the recipe are used, or at least one of the operands + // would require a broadcast. In the latter case, the single-scalar may + // need to be broadcasted, but another broadcast is removed. + if (!all_of(RepOrWidenR->users(), + [RepOrWidenR](const VPUser *U) { + if (auto *VPI = dyn_cast(U)) { + unsigned Opcode = VPI->getOpcode(); + if (Opcode == VPInstruction::ExtractLastElement || + Opcode == VPInstruction::ExtractLastLanePerPart || + Opcode == VPInstruction::ExtractPenultimateElement) + return true; + } + + return U->usesScalars(RepOrWidenR); + }) && + none_of(RepOrWidenR->operands(), [RepOrWidenR](VPValue *Op) { + if (Op->getSingleUser() != RepOrWidenR) + return false; + // Non-constant live-ins require broadcasts, while constants do not + // need explicit broadcasts. + bool LiveInNeedsBroadcast = + Op->isLiveIn() && !isa(Op->getLiveInIRValue()); + auto *OpR = dyn_cast(Op); + return LiveInNeedsBroadcast || (OpR && OpR->isSingleScalar()); })) continue; @@ -2140,6 +2186,32 @@ bool VPlanTransforms::adjustFixedOrderRecurrences(VPlan &Plan, // Set the first operand of RecurSplice to FOR again, after replacing // all users. RecurSplice->setOperand(0, FOR); + + // Check for users extracting at the penultimate active lane of the FOR. + // If only a single lane is active in the current iteration, we need to + // select the last element from the previous iteration (from the FOR phi + // directly). + for (VPUser *U : RecurSplice->users()) { + if (!match(U, m_ExtractLane(m_LastActiveLane(m_VPValue()), + m_Specific(RecurSplice)))) + continue; + + VPBuilder B(cast(U)); + VPValue *LastActiveLane = cast(U)->getOperand(0); + Type *I64Ty = Type::getInt64Ty(Plan.getContext()); + VPValue *Zero = Plan.getOrAddLiveIn(ConstantInt::get(I64Ty, 0)); + VPValue *One = Plan.getOrAddLiveIn(ConstantInt::get(I64Ty, 1)); + VPValue *PenultimateIndex = + B.createNaryOp(Instruction::Sub, {LastActiveLane, One}); + VPValue *PenultimateLastIter = + B.createNaryOp(VPInstruction::ExtractLane, + {PenultimateIndex, FOR->getBackedgeValue()}); + VPValue *LastPrevIter = + B.createNaryOp(VPInstruction::ExtractLastElement, FOR); + VPValue *Cmp = B.createICmp(CmpInst::ICMP_EQ, LastActiveLane, Zero); + VPValue *Sel = B.createSelect(Cmp, LastPrevIter, PenultimateLastIter); + cast(U)->replaceAllUsesWith(Sel); + } } return true; } @@ -2676,6 +2748,7 @@ static VPRecipeBase *optimizeMaskToEVL(VPValue *HeaderMask, VPRecipeBase &CurRecipe, VPTypeAnalysis &TypeInfo, VPValue &EVL) { VPlan *Plan = CurRecipe.getParent()->getPlan(); + DebugLoc DL = CurRecipe.getDebugLoc(); VPValue *Addr, *Mask, *EndPtr; /// Adjust any end pointers so that they point to the end of EVL lanes not VF. @@ -2727,13 +2800,21 @@ static VPRecipeBase *optimizeMaskToEVL(VPValue *HeaderMask, m_Select(m_Specific(HeaderMask), m_VPValue(LHS), m_VPValue(RHS)))) return new VPWidenIntrinsicRecipe( Intrinsic::vp_merge, {Plan->getTrue(), LHS, RHS, &EVL}, - TypeInfo.inferScalarType(LHS), {}, {}, CurRecipe.getDebugLoc()); + TypeInfo.inferScalarType(LHS), {}, {}, DL); if (match(&CurRecipe, m_Select(m_RemoveMask(HeaderMask, Mask), m_VPValue(LHS), m_VPValue(RHS)))) return new VPWidenIntrinsicRecipe( Intrinsic::vp_merge, {Mask, LHS, RHS, &EVL}, - TypeInfo.inferScalarType(LHS), {}, {}, CurRecipe.getDebugLoc()); + TypeInfo.inferScalarType(LHS), {}, {}, DL); + + if (match(&CurRecipe, m_LastActiveLane(m_Specific(HeaderMask)))) { + Type *Ty = TypeInfo.inferScalarType(CurRecipe.getVPSingleValue()); + VPValue *ZExt = + VPBuilder(&CurRecipe).createScalarCast(Instruction::ZExt, &EVL, Ty, DL); + return new VPInstruction(Instruction::Sub, + {ZExt, Plan->getConstantInt(Ty, 1)}, {}, {}, DL); + } return nullptr; } @@ -3506,6 +3587,16 @@ void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan) { } if (auto *WidenIVR = dyn_cast(&R)) { + // If the recipe only generates scalars, scalarize it instead of + // expanding it. + if (WidenIVR->onlyScalarsGenerated(Plan.hasScalableVF())) { + VPBuilder Builder(WidenIVR); + VPValue *PtrAdd = + scalarizeVPWidenPointerInduction(WidenIVR, Plan, Builder); + WidenIVR->replaceAllUsesWith(PtrAdd); + ToRemove.push_back(WidenIVR); + continue; + } expandVPWidenPointerInduction(WidenIVR, TypeInfo); ToRemove.push_back(WidenIVR); continue; @@ -3528,6 +3619,34 @@ void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan) { ToRemove.push_back(Expr); } + // Expand LastActiveLane into Not + FirstActiveLane + Sub. + auto *LastActiveL = dyn_cast(&R); + if (LastActiveL && + LastActiveL->getOpcode() == VPInstruction::LastActiveLane) { + // Create Not(Mask) for all operands. + SmallVector NotMasks; + for (VPValue *Op : LastActiveL->operands()) { + VPValue *NotMask = Builder.createNot(Op, LastActiveL->getDebugLoc()); + NotMasks.push_back(NotMask); + } + + // Create FirstActiveLane on the inverted masks. + VPValue *FirstInactiveLane = Builder.createNaryOp( + VPInstruction::FirstActiveLane, NotMasks, + LastActiveL->getDebugLoc(), "first.inactive.lane"); + + // Subtract 1 to get the last active lane. + VPValue *One = Plan.getOrAddLiveIn( + ConstantInt::get(Type::getInt64Ty(Plan.getContext()), 1)); + VPValue *LastLane = Builder.createNaryOp( + Instruction::Sub, {FirstInactiveLane, One}, + LastActiveL->getDebugLoc(), "last.active.lane"); + + LastActiveL->replaceAllUsesWith(LastLane); + ToRemove.push_back(LastActiveL); + continue; + } + VPValue *VectorStep; VPValue *ScalarStep; if (!match(&R, m_VPInstruction( @@ -3675,7 +3794,7 @@ tryToMatchAndCreateExtendedReduction(VPReductionRecipe *Red, VPCostContext &Ctx, cast(VecOp)->computeCost(VF, Ctx); InstructionCost RedCost = Red->computeCost(VF, Ctx); - if (isa(Red)) { + if (Red->isPartialReduction()) { TargetTransformInfo::PartialReductionExtendKind ExtKind = TargetTransformInfo::getPartialReductionExtendKind(ExtOpc); // FIXME: Move partial reduction creation, costing and clamping @@ -3716,8 +3835,6 @@ tryToMatchAndCreateExtendedReduction(VPReductionRecipe *Red, VPCostContext &Ctx, static VPExpressionRecipe * tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red, VPCostContext &Ctx, VFRange &Range) { - bool IsPartialReduction = isa(Red); - unsigned Opcode = RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind()); if (Opcode != Instruction::Add && Opcode != Instruction::Sub) return nullptr; @@ -3735,7 +3852,7 @@ tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red, Ext0 ? Ctx.Types.inferScalarType(Ext0->getOperand(0)) : RedTy; InstructionCost MulAccCost; - if (IsPartialReduction) { + if (Red->isPartialReduction()) { Type *SrcTy2 = Ext1 ? Ctx.Types.inferScalarType(Ext1->getOperand(0)) : nullptr; // FIXME: Move partial reduction creation, costing and clamping @@ -4010,6 +4127,122 @@ void VPlanTransforms::hoistInvariantLoads(VPlan &Plan) { } } +// Returns the intersection of metadata from a group of loads. +static VPIRMetadata getCommonLoadMetadata(ArrayRef Loads) { + VPIRMetadata CommonMetadata = *Loads.front(); + for (VPReplicateRecipe *Load : drop_begin(Loads)) + CommonMetadata.intersect(*Load); + return CommonMetadata; +} + +void VPlanTransforms::hoistPredicatedLoads(VPlan &Plan, ScalarEvolution &SE, + const Loop *L) { + VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion(); + VPTypeAnalysis TypeInfo(Plan); + VPDominatorTree VPDT(Plan); + + // Group predicated loads by their address SCEV. + DenseMap> LoadsByAddress; + for (VPBlockBase *Block : vp_depth_first_shallow(LoopRegion->getEntry())) { + auto *VPBB = cast(Block); + for (VPRecipeBase &R : *VPBB) { + auto *RepR = dyn_cast(&R); + if (!RepR || RepR->getOpcode() != Instruction::Load || + !RepR->isPredicated()) + continue; + + VPValue *Addr = RepR->getOperand(0); + const SCEV *AddrSCEV = vputils::getSCEVExprForVPValue(Addr, SE, L); + if (!isa(AddrSCEV)) + LoadsByAddress[AddrSCEV].push_back(RepR); + } + } + + // For each address, collect loads with complementary masks, sort by + // dominance, and use the earliest load. + for (auto &[Addr, Loads] : LoadsByAddress) { + if (Loads.size() < 2) + continue; + + // Collect groups of loads with complementary masks. + SmallVector> LoadGroups; + for (VPReplicateRecipe *&LoadI : Loads) { + if (!LoadI) + continue; + + VPValue *MaskI = LoadI->getMask(); + Type *TypeI = TypeInfo.inferScalarType(LoadI); + SmallVector Group; + Group.push_back(LoadI); + LoadI = nullptr; + + // Find all loads with the same type. + for (VPReplicateRecipe *&LoadJ : Loads) { + if (!LoadJ) + continue; + + Type *TypeJ = TypeInfo.inferScalarType(LoadJ); + if (TypeI == TypeJ) { + Group.push_back(LoadJ); + LoadJ = nullptr; + } + } + + // Check if any load in the group has a complementary mask with another, + // that is M1 == NOT(M2) or M2 == NOT(M1). + bool HasComplementaryMask = + any_of(drop_begin(Group), [MaskI](VPReplicateRecipe *Load) { + VPValue *MaskJ = Load->getMask(); + return match(MaskI, m_Not(m_Specific(MaskJ))) || + match(MaskJ, m_Not(m_Specific(MaskI))); + }); + + if (HasComplementaryMask) + LoadGroups.push_back(std::move(Group)); + } + + // For each group, check memory dependencies and hoist the earliest load. + for (auto &Group : LoadGroups) { + // Sort loads by dominance order, with earliest (most dominating) first. + sort(Group, [&VPDT](VPReplicateRecipe *A, VPReplicateRecipe *B) { + return VPDT.properlyDominates(A, B); + }); + + VPReplicateRecipe *EarliestLoad = Group.front(); + VPBasicBlock *FirstBB = EarliestLoad->getParent(); + VPBasicBlock *LastBB = Group.back()->getParent(); + + // Check that the load doesn't alias with stores between first and last. + if (!canHoistLoadWithNoAliasCheck(EarliestLoad, FirstBB, LastBB)) + continue; + + // Find the load with minimum alignment to use. + auto *LoadWithMinAlign = + *min_element(Group, [](VPReplicateRecipe *A, VPReplicateRecipe *B) { + return cast(A->getUnderlyingInstr())->getAlign() < + cast(B->getUnderlyingInstr())->getAlign(); + }); + + // Collect common metadata from all loads in the group. + VPIRMetadata CommonMetadata = getCommonLoadMetadata(Group); + + // Create an unpredicated load with minimum alignment using the earliest + // dominating address and common metadata. + auto *UnpredicatedLoad = new VPReplicateRecipe( + LoadWithMinAlign->getUnderlyingInstr(), EarliestLoad->getOperand(0), + /*IsSingleScalar=*/false, /*Mask=*/nullptr, /*Flags=*/{}, + CommonMetadata); + UnpredicatedLoad->insertBefore(EarliestLoad); + + // Replace all loads in the group with the unpredicated load. + for (VPReplicateRecipe *Load : Group) { + Load->replaceAllUsesWith(UnpredicatedLoad); + Load->eraseFromParent(); + } + } + } +} + void VPlanTransforms::materializeConstantVectorTripCount( VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE) { diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h index 5fd3f756c55e3..ae3797dee1f07 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h @@ -145,6 +145,11 @@ struct VPlanTransforms { GetIntOrFpInductionDescriptor, const TargetLibraryInfo &TLI); + /// Try to legalize reductions with multiple in-loop uses. Currently only + /// min/max reductions used by FindLastIV reductions are supported. Otherwise + /// return false. + static bool handleMultiUseReductions(VPlan &Plan); + /// Try to have all users of fixed-order recurrences appear after the recipe /// defining their previous value, by either sinking users or hoisting recipes /// defining their previous value (and its operands). Then introduce @@ -314,6 +319,12 @@ struct VPlanTransforms { /// plan using noalias metadata. static void hoistInvariantLoads(VPlan &Plan); + /// Hoist predicated loads from the same address to the loop entry block, if + /// they are guaranteed to execute on both paths (i.e., in replicate regions + /// with complementary masks P and NOT P). + static void hoistPredicatedLoads(VPlan &Plan, ScalarEvolution &SE, + const Loop *L); + // Materialize vector trip counts for constants early if it can simply be // computed as (Original TC / VF * UF) * VF * UF. static void diff --git a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp index d76d2ed5f1c76..f215476b1e163 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp @@ -275,11 +275,10 @@ void UnrollState::unrollRecipeByUF(VPRecipeBase &R) { remapOperands(&R, UF - 1); return; } - if (auto *II = dyn_cast(RepR->getUnderlyingValue())) { - if (II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl) { - addUniformForAllParts(RepR); - return; - } + if (match(RepR, + m_Intrinsic())) { + addUniformForAllParts(RepR); + return; } } @@ -352,6 +351,7 @@ void UnrollState::unrollBlock(VPBlockBase *VPB) { VPValue *Op1; if (match(&R, m_VPInstruction(m_VPValue(Op1))) || match(&R, m_FirstActiveLane(m_VPValue(Op1))) || + match(&R, m_LastActiveLane(m_VPValue(Op1))) || match(&R, m_VPInstruction( m_VPValue(), m_VPValue(), m_VPValue(Op1))) || match(&R, m_VPInstruction( @@ -364,17 +364,21 @@ void UnrollState::unrollBlock(VPBlockBase *VPB) { continue; } VPValue *Op0; - if (match(&R, m_VPInstruction( - m_VPValue(Op0), m_VPValue(Op1)))) { + if (match(&R, m_ExtractLane(m_VPValue(Op0), m_VPValue(Op1)))) { addUniformForAllParts(cast(&R)); for (unsigned Part = 1; Part != UF; ++Part) R.addOperand(getValueForPart(Op1, Part)); continue; } if (match(&R, m_ExtractLastElement(m_VPValue(Op0))) || - match(&R, m_VPInstruction( - m_VPValue(Op0)))) { + match(&R, m_ExtractPenultimateElement(m_VPValue(Op0)))) { addUniformForAllParts(cast(&R)); + if (isa(Op0)) { + assert(match(&R, m_ExtractLastElement(m_VPValue())) && + "can only extract last element of FOR"); + continue; + } + if (Plan.hasScalarVFOnly()) { auto *I = cast(&R); // Extracting from end with VF = 1 implies retrieving the last or @@ -466,7 +470,7 @@ void VPlanTransforms::unrollByUF(VPlan &Plan, unsigned UF) { /// definitions for operands of \DefR. static VPValue * cloneForLane(VPlan &Plan, VPBuilder &Builder, Type *IdxTy, - VPRecipeWithIRFlags *DefR, VPLane Lane, + VPSingleDefRecipe *DefR, VPLane Lane, const DenseMap> &Def2LaneDefs) { VPValue *Op; if (match(DefR, m_VPInstruction(m_VPValue(Op)))) { @@ -513,7 +517,7 @@ cloneForLane(VPlan &Plan, VPBuilder &Builder, Type *IdxTy, NewOps.push_back(Ext); } - VPRecipeWithIRFlags *New; + VPSingleDefRecipe *New; if (auto *RepR = dyn_cast(DefR)) { // TODO: have cloning of replicate recipes also provide the desired result // coupled with setting its operands to NewOps (deriving IsSingleScalar and @@ -529,7 +533,6 @@ cloneForLane(VPlan &Plan, VPBuilder &Builder, Type *IdxTy, New->setOperand(Idx, Op); } } - New->transferFlags(*DefR); New->insertBefore(DefR); return New; } @@ -563,7 +566,7 @@ void VPlanTransforms::replicateByVF(VPlan &Plan, ElementCount VF) { cast(&R)->getOpcode() != VPInstruction::Unpack)) continue; - auto *DefR = cast(&R); + auto *DefR = cast(&R); VPBuilder Builder(DefR); if (DefR->getNumUsers() == 0) { // Create single-scalar version of DefR for all lanes. diff --git a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp index 839a304904e8b..c7a0fd7407a4e 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp @@ -195,10 +195,9 @@ bool vputils::isSingleScalar(const VPValue *VPV) { return VPI->isSingleScalar() || VPI->isVectorToScalar() || (preservesUniformity(VPI->getOpcode()) && all_of(VPI->operands(), isSingleScalar)); - if (isa(VPV)) - return false; - if (isa( - VPV)) + if (auto *RR = dyn_cast(VPV)) + return !RR->isPartialReduction(); + if (isa(VPV)) return true; if (auto *Expr = dyn_cast(VPV)) return Expr->isSingleScalar(); @@ -270,7 +269,7 @@ unsigned vputils::getVFScaleFactor(VPRecipeBase *R) { return 1; if (auto *RR = dyn_cast(R)) return RR->getVFScaleFactor(); - if (auto *RR = dyn_cast(R)) + if (auto *RR = dyn_cast(R)) return RR->getVFScaleFactor(); if (auto *ER = dyn_cast(R)) return ER->getVFScaleFactor(); diff --git a/llvm/lib/Transforms/Vectorize/VPlanValue.h b/llvm/lib/Transforms/Vectorize/VPlanValue.h index 63eacd3d75721..b9f5847ec731c 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanValue.h +++ b/llvm/lib/Transforms/Vectorize/VPlanValue.h @@ -349,7 +349,6 @@ class VPDef { VPInterleaveSC, VPReductionEVLSC, VPReductionSC, - VPPartialReductionSC, VPReplicateSC, VPScalarIVStepsSC, VPVectorPointerSC, diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp index 34754a1ea3992..2d63d2a787f88 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp @@ -18,6 +18,7 @@ #include "VPlanDominatorTree.h" #include "VPlanHelpers.h" #include "VPlanPatternMatch.h" +#include "VPlanUtils.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/TypeSwitch.h" @@ -44,6 +45,9 @@ class VPlanVerifier { /// incoming value into EVL's recipe. bool verifyEVLRecipe(const VPInstruction &EVL) const; + /// Verify that \p LastActiveLane's operand is guaranteed to be a prefix-mask. + bool verifyLastActiveLaneRecipe(const VPInstruction &LastActiveLane) const; + bool verifyVPBasicBlock(const VPBasicBlock *VPBB); bool verifyBlock(const VPBlockBase *VPB); @@ -221,6 +225,44 @@ bool VPlanVerifier::verifyEVLRecipe(const VPInstruction &EVL) const { }); } +bool VPlanVerifier::verifyLastActiveLaneRecipe( + const VPInstruction &LastActiveLane) const { + assert(LastActiveLane.getOpcode() == VPInstruction::LastActiveLane && + "must be called with VPInstruction::LastActiveLane"); + + if (LastActiveLane.getNumOperands() < 1) { + errs() << "LastActiveLane must have at least one operand\n"; + return false; + } + + const VPlan &Plan = *LastActiveLane.getParent()->getPlan(); + // All operands must be prefix-mask. Currently we check for header masks or + // EVL-derived masks, as those are currently the only operands in practice, + // but this may need updating in the future. + for (VPValue *Op : LastActiveLane.operands()) { + if (vputils::isHeaderMask(Op, Plan)) + continue; + + // Masks derived from EVL are also fine. + auto BroadcastOrEVL = + m_CombineOr(m_Broadcast(m_EVL(m_VPValue())), m_EVL(m_VPValue())); + if (match(Op, m_CombineOr(m_ICmp(m_StepVector(), BroadcastOrEVL), + m_ICmp(BroadcastOrEVL, m_StepVector())))) + continue; + + errs() << "LastActiveLane operand "; +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) + VPSlotTracker Tracker(&Plan); + Op->printAsOperand(errs(), Tracker); +#endif + errs() << " must be prefix mask (a header mask or an " + "EVL-derived mask currently)\n"; + return false; + } + + return true; +} + bool VPlanVerifier::verifyVPBasicBlock(const VPBasicBlock *VPBB) { if (!verifyPhiRecipes(VPBB)) return false; @@ -313,6 +355,10 @@ bool VPlanVerifier::verifyVPBasicBlock(const VPBasicBlock *VPBB) { return false; } break; + case VPInstruction::LastActiveLane: + if (!verifyLastActiveLaneRecipe(*VPI)) + return false; + break; default: break; } diff --git a/llvm/test/Analysis/CostModel/AArch64/fshl.ll b/llvm/test/Analysis/CostModel/AArch64/fshl.ll index cd6068d382169..61296a8e3c5d3 100644 --- a/llvm/test/Analysis/CostModel/AArch64/fshl.ll +++ b/llvm/test/Analysis/CostModel/AArch64/fshl.ll @@ -349,7 +349,7 @@ entry: define i32 @rotl_i32_3rd_arg_var(i32 %a, i32 %c) { ; CHECK-LABEL: 'rotl_i32_3rd_arg_var' -; CHECK-NEXT: Cost Model: Found costs of 5 for: %r = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %c) +; CHECK-NEXT: Cost Model: Found costs of 2 for: %r = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %c) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 %r ; entry: @@ -369,7 +369,7 @@ entry: define i64 @rotl_i64_3rd_arg_var(i64 %a, i64 %c) { ; CHECK-LABEL: 'rotl_i64_3rd_arg_var' -; CHECK-NEXT: Cost Model: Found costs of 5 for: %r = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %c) +; CHECK-NEXT: Cost Model: Found costs of 2 for: %r = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %c) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i64 %r ; entry: diff --git a/llvm/test/Analysis/CostModel/AArch64/fshr.ll b/llvm/test/Analysis/CostModel/AArch64/fshr.ll index 795371e9f3f68..1aa6de967739b 100644 --- a/llvm/test/Analysis/CostModel/AArch64/fshr.ll +++ b/llvm/test/Analysis/CostModel/AArch64/fshr.ll @@ -297,8 +297,8 @@ entry: ; Rotate tests -define i8 @rotl_i8_3rd_arg_const(i8 %a) { -; CHECK-LABEL: 'rotl_i8_3rd_arg_const' +define i8 @rotr_i8_3rd_arg_const(i8 %a) { +; CHECK-LABEL: 'rotr_i8_3rd_arg_const' ; CHECK-NEXT: Cost Model: Found costs of 2 for: %r = tail call i8 @llvm.fshr.i8(i8 %a, i8 %a, i8 9) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i8 %r ; @@ -307,8 +307,8 @@ entry: ret i8 %r } -define i8 @rotl_i8_3rd_arg_var(i8 %a, i8 %c) { -; CHECK-LABEL: 'rotl_i8_3rd_arg_var' +define i8 @rotr_i8_3rd_arg_var(i8 %a, i8 %c) { +; CHECK-LABEL: 'rotr_i8_3rd_arg_var' ; CHECK-NEXT: Cost Model: Found costs of 5 for: %r = tail call i8 @llvm.fshr.i8(i8 %a, i8 %a, i8 %c) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i8 %r ; @@ -317,8 +317,8 @@ entry: ret i8 %r } -define i16 @rotl_i16_3rd_arg_const(i16 %a) { -; CHECK-LABEL: 'rotl_i16_3rd_arg_const' +define i16 @rotr_i16_3rd_arg_const(i16 %a) { +; CHECK-LABEL: 'rotr_i16_3rd_arg_const' ; CHECK-NEXT: Cost Model: Found costs of 2 for: %r = tail call i16 @llvm.fshr.i16(i16 %a, i16 %a, i16 9) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i16 %r ; @@ -327,8 +327,8 @@ entry: ret i16 %r } -define i16 @rotl_i16_3rd_arg_var(i16 %a, i16 %c) { -; CHECK-LABEL: 'rotl_i16_3rd_arg_var' +define i16 @rotr_i16_3rd_arg_var(i16 %a, i16 %c) { +; CHECK-LABEL: 'rotr_i16_3rd_arg_var' ; CHECK-NEXT: Cost Model: Found costs of 5 for: %r = tail call i16 @llvm.fshr.i16(i16 %a, i16 %a, i16 %c) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i16 %r ; @@ -337,8 +337,8 @@ entry: ret i16 %r } -define i32 @rotl_i32_3rd_arg_const(i32 %a) { -; CHECK-LABEL: 'rotl_i32_3rd_arg_const' +define i32 @rotr_i32_3rd_arg_const(i32 %a) { +; CHECK-LABEL: 'rotr_i32_3rd_arg_const' ; CHECK-NEXT: Cost Model: Found costs of 1 for: %r = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 9) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 %r ; @@ -347,9 +347,9 @@ entry: ret i32 %r } -define i32 @rotl_i32_3rd_arg_var(i32 %a, i32 %c) { -; CHECK-LABEL: 'rotl_i32_3rd_arg_var' -; CHECK-NEXT: Cost Model: Found costs of 5 for: %r = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %c) +define i32 @rotr_i32_3rd_arg_var(i32 %a, i32 %c) { +; CHECK-LABEL: 'rotr_i32_3rd_arg_var' +; CHECK-NEXT: Cost Model: Found costs of 1 for: %r = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %c) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 %r ; entry: @@ -357,8 +357,8 @@ entry: ret i32 %r } -define i64 @rotl_i64_3rd_arg_const(i64 %a) { -; CHECK-LABEL: 'rotl_i64_3rd_arg_const' +define i64 @rotr_i64_3rd_arg_const(i64 %a) { +; CHECK-LABEL: 'rotr_i64_3rd_arg_const' ; CHECK-NEXT: Cost Model: Found costs of 1 for: %r = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 9) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i64 %r ; @@ -367,9 +367,9 @@ entry: ret i64 %r } -define i64 @rotl_i64_3rd_arg_var(i64 %a, i64 %c) { -; CHECK-LABEL: 'rotl_i64_3rd_arg_var' -; CHECK-NEXT: Cost Model: Found costs of 5 for: %r = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %c) +define i64 @rotr_i64_3rd_arg_var(i64 %a, i64 %c) { +; CHECK-LABEL: 'rotr_i64_3rd_arg_var' +; CHECK-NEXT: Cost Model: Found costs of 1 for: %r = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %c) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i64 %r ; entry: @@ -377,8 +377,8 @@ entry: ret i64 %r } -define i128 @rotl_i128_3rd_arg_const(i128 %a) { -; CHECK-LABEL: 'rotl_i128_3rd_arg_const' +define i128 @rotr_i128_3rd_arg_const(i128 %a) { +; CHECK-LABEL: 'rotr_i128_3rd_arg_const' ; CHECK-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:4 SizeLat:4 for: %r = tail call i128 @llvm.fshr.i128(i128 %a, i128 %a, i128 9) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i128 %r ; @@ -387,8 +387,8 @@ entry: ret i128 %r } -define i128 @rotl_i128_3rd_arg_var(i128 %a, i128 %c) { -; CHECK-LABEL: 'rotl_i128_3rd_arg_var' +define i128 @rotr_i128_3rd_arg_var(i128 %a, i128 %c) { +; CHECK-LABEL: 'rotr_i128_3rd_arg_var' ; CHECK-NEXT: Cost Model: Found costs of RThru:10 CodeSize:5 Lat:5 SizeLat:5 for: %r = tail call i128 @llvm.fshr.i128(i128 %a, i128 %a, i128 %c) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i128 %r ; @@ -397,8 +397,8 @@ entry: ret i128 %r } -define <16 x i8> @rotl_v16i8_3rd_arg_vec_const_all_lanes_same(<16 x i8> %a) { -; CHECK-LABEL: 'rotl_v16i8_3rd_arg_vec_const_all_lanes_same' +define <16 x i8> @rotr_v16i8_3rd_arg_vec_const_all_lanes_same(<16 x i8> %a) { +; CHECK-LABEL: 'rotr_v16i8_3rd_arg_vec_const_all_lanes_same' ; CHECK-NEXT: Cost Model: Found costs of 2 for: %r = tail call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %a, <16 x i8> %a, <16 x i8> splat (i8 3)) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret <16 x i8> %r ; @@ -407,8 +407,8 @@ entry: ret <16 x i8> %r } -define <16 x i8> @rotl_v16i8_3rd_arg_vec_const_lanes_different(<16 x i8> %a) { -; CHECK-LABEL: 'rotl_v16i8_3rd_arg_vec_const_lanes_different' +define <16 x i8> @rotr_v16i8_3rd_arg_vec_const_lanes_different(<16 x i8> %a) { +; CHECK-LABEL: 'rotr_v16i8_3rd_arg_vec_const_lanes_different' ; CHECK-NEXT: Cost Model: Found costs of 4 for: %r = tail call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %a, <16 x i8> %a, <16 x i8> ) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret <16 x i8> %r ; @@ -417,8 +417,8 @@ entry: ret <16 x i8> %r } -define <16 x i8> @rotl_v16i8_3rd_arg_var(<16 x i8> %a, <16 x i8> %c) { -; CHECK-LABEL: 'rotl_v16i8_3rd_arg_var' +define <16 x i8> @rotr_v16i8_3rd_arg_var(<16 x i8> %a, <16 x i8> %c) { +; CHECK-LABEL: 'rotr_v16i8_3rd_arg_var' ; CHECK-NEXT: Cost Model: Found costs of 5 for: %r = tail call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %a, <16 x i8> %a, <16 x i8> %c) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret <16 x i8> %r ; @@ -427,8 +427,8 @@ entry: ret <16 x i8> %r } -define <8 x i16> @rotl_v8i16_3rd_arg_vec_const_all_lanes_same(<8 x i16> %a) { -; CHECK-LABEL: 'rotl_v8i16_3rd_arg_vec_const_all_lanes_same' +define <8 x i16> @rotr_v8i16_3rd_arg_vec_const_all_lanes_same(<8 x i16> %a) { +; CHECK-LABEL: 'rotr_v8i16_3rd_arg_vec_const_all_lanes_same' ; CHECK-NEXT: Cost Model: Found costs of 2 for: %r = tail call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %a, <8 x i16> %a, <8 x i16> splat (i16 3)) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret <8 x i16> %r ; @@ -437,8 +437,8 @@ entry: ret <8 x i16> %r } -define <8 x i16> @rotl_v8i16_3rd_arg_vec_const_lanes_different(<8 x i16> %a) { -; CHECK-LABEL: 'rotl_v8i16_3rd_arg_vec_const_lanes_different' +define <8 x i16> @rotr_v8i16_3rd_arg_vec_const_lanes_different(<8 x i16> %a) { +; CHECK-LABEL: 'rotr_v8i16_3rd_arg_vec_const_lanes_different' ; CHECK-NEXT: Cost Model: Found costs of 4 for: %r = tail call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %a, <8 x i16> %a, <8 x i16> ) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret <8 x i16> %r ; @@ -447,8 +447,8 @@ entry: ret <8 x i16> %r } -define <8 x i16> @rotl_v8i16_3rd_arg_var(<8 x i16> %a, <8 x i16> %c) { -; CHECK-LABEL: 'rotl_v8i16_3rd_arg_var' +define <8 x i16> @rotr_v8i16_3rd_arg_var(<8 x i16> %a, <8 x i16> %c) { +; CHECK-LABEL: 'rotr_v8i16_3rd_arg_var' ; CHECK-NEXT: Cost Model: Found costs of 5 for: %r = tail call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %a, <8 x i16> %a, <8 x i16> %c) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret <8 x i16> %r ; @@ -457,8 +457,8 @@ entry: ret <8 x i16> %r } -define <4 x i32> @rotl_v4i32_3rd_arg_vec_const_all_lanes_same(<4 x i32> %a) { -; CHECK-LABEL: 'rotl_v4i32_3rd_arg_vec_const_all_lanes_same' +define <4 x i32> @rotr_v4i32_3rd_arg_vec_const_all_lanes_same(<4 x i32> %a) { +; CHECK-LABEL: 'rotr_v4i32_3rd_arg_vec_const_all_lanes_same' ; CHECK-NEXT: Cost Model: Found costs of 2 for: %r = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %a, <4 x i32> %a, <4 x i32> splat (i32 3)) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret <4 x i32> %r ; @@ -467,8 +467,8 @@ entry: ret <4 x i32> %r } -define <4 x i32> @rotl_v4i32_3rd_arg_vec_const_lanes_different(<4 x i32> %a) { -; CHECK-LABEL: 'rotl_v4i32_3rd_arg_vec_const_lanes_different' +define <4 x i32> @rotr_v4i32_3rd_arg_vec_const_lanes_different(<4 x i32> %a) { +; CHECK-LABEL: 'rotr_v4i32_3rd_arg_vec_const_lanes_different' ; CHECK-NEXT: Cost Model: Found costs of 4 for: %r = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %a, <4 x i32> %a, <4 x i32> ) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret <4 x i32> %r ; @@ -477,8 +477,8 @@ entry: ret <4 x i32> %r } -define <4 x i32> @rotl_v4i32_3rd_arg_var(<4 x i32> %a, <4 x i32> %c) { -; CHECK-LABEL: 'rotl_v4i32_3rd_arg_var' +define <4 x i32> @rotr_v4i32_3rd_arg_var(<4 x i32> %a, <4 x i32> %c) { +; CHECK-LABEL: 'rotr_v4i32_3rd_arg_var' ; CHECK-NEXT: Cost Model: Found costs of 5 for: %r = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %a, <4 x i32> %a, <4 x i32> %c) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret <4 x i32> %r ; @@ -487,8 +487,8 @@ entry: ret <4 x i32> %r } -define <2 x i64> @rotl_v2i64_3rd_arg_vec_const_all_lanes_same(<2 x i64> %a) { -; CHECK-LABEL: 'rotl_v2i64_3rd_arg_vec_const_all_lanes_same' +define <2 x i64> @rotr_v2i64_3rd_arg_vec_const_all_lanes_same(<2 x i64> %a) { +; CHECK-LABEL: 'rotr_v2i64_3rd_arg_vec_const_all_lanes_same' ; CHECK-NEXT: Cost Model: Found costs of 2 for: %r = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %a, <2 x i64> %a, <2 x i64> splat (i64 1)) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret <2 x i64> %r ; @@ -497,8 +497,8 @@ entry: ret <2 x i64> %r } -define <2 x i64> @rotl_v2i64_3rd_arg_vec_const_lanes_different(<2 x i64> %a) { -; CHECK-LABEL: 'rotl_v2i64_3rd_arg_vec_const_lanes_different' +define <2 x i64> @rotr_v2i64_3rd_arg_vec_const_lanes_different(<2 x i64> %a) { +; CHECK-LABEL: 'rotr_v2i64_3rd_arg_vec_const_lanes_different' ; CHECK-NEXT: Cost Model: Found costs of 4 for: %r = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %a, <2 x i64> %a, <2 x i64> ) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret <2 x i64> %r ; @@ -507,8 +507,8 @@ entry: ret <2 x i64> %r } -define <2 x i64> @rotl_v2i64_3rd_arg_var(<2 x i64> %a, <2 x i64> %c) { -; CHECK-LABEL: 'rotl_v2i64_3rd_arg_var' +define <2 x i64> @rotr_v2i64_3rd_arg_var(<2 x i64> %a, <2 x i64> %c) { +; CHECK-LABEL: 'rotr_v2i64_3rd_arg_var' ; CHECK-NEXT: Cost Model: Found costs of 5 for: %r = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %a, <2 x i64> %a, <2 x i64> %c) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret <2 x i64> %r ; @@ -517,8 +517,8 @@ entry: ret <2 x i64> %r } -define <2 x i128> @rotl_v2i128_3rd_arg_vec_const_all_lanes_same(<2 x i128> %a) { -; CHECK-LABEL: 'rotl_v2i128_3rd_arg_vec_const_all_lanes_same' +define <2 x i128> @rotr_v2i128_3rd_arg_vec_const_all_lanes_same(<2 x i128> %a) { +; CHECK-LABEL: 'rotr_v2i128_3rd_arg_vec_const_all_lanes_same' ; CHECK-NEXT: Cost Model: Found costs of RThru:16 CodeSize:4 Lat:4 SizeLat:4 for: %r = tail call <2 x i128> @llvm.fshr.v2i128(<2 x i128> %a, <2 x i128> %a, <2 x i128> splat (i128 1)) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret <2 x i128> %r ; @@ -527,8 +527,8 @@ entry: ret <2 x i128> %r } -define <2 x i128> @rotl_v2i128_3rd_arg_vec_const_lanes_different(<2 x i128> %a) { -; CHECK-LABEL: 'rotl_v2i128_3rd_arg_vec_const_lanes_different' +define <2 x i128> @rotr_v2i128_3rd_arg_vec_const_lanes_different(<2 x i128> %a) { +; CHECK-LABEL: 'rotr_v2i128_3rd_arg_vec_const_lanes_different' ; CHECK-NEXT: Cost Model: Found costs of RThru:16 CodeSize:4 Lat:4 SizeLat:4 for: %r = tail call <2 x i128> @llvm.fshr.v2i128(<2 x i128> %a, <2 x i128> %a, <2 x i128> ) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret <2 x i128> %r ; @@ -537,8 +537,8 @@ entry: ret <2 x i128> %r } -define <2 x i128> @rotl_v2i128_3rd_arg_var(<2 x i128> %a, <2 x i128> %c) { -; CHECK-LABEL: 'rotl_v2i128_3rd_arg_var' +define <2 x i128> @rotr_v2i128_3rd_arg_var(<2 x i128> %a, <2 x i128> %c) { +; CHECK-LABEL: 'rotr_v2i128_3rd_arg_var' ; CHECK-NEXT: Cost Model: Found costs of RThru:20 CodeSize:5 Lat:5 SizeLat:5 for: %r = tail call <2 x i128> @llvm.fshr.v2i128(<2 x i128> %a, <2 x i128> %a, <2 x i128> %c) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret <2 x i128> %r ; diff --git a/llvm/test/Analysis/Delinearization/a.ll b/llvm/test/Analysis/Delinearization/a.ll index 1830a3da77857..5d2d4dc29206e 100644 --- a/llvm/test/Analysis/Delinearization/a.ll +++ b/llvm/test/Analysis/Delinearization/a.ll @@ -15,6 +15,7 @@ define void @foo(i64 %n, i64 %m, i64 %o, ptr nocapture %A) #0 { ; CHECK-NEXT: Base offset: %A ; CHECK-NEXT: ArrayDecl[UnknownSize][%m][%o] with elements of 4 bytes. ; CHECK-NEXT: ArrayRef[{3,+,2}<%for.i>][{-4,+,3}<%for.j>][{7,+,5}<%for.k>] +; CHECK-NEXT: Delinearization validation: Failed ; entry: %cmp32 = icmp sgt i64 %n, 0 diff --git a/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll b/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll index 891d604f5cf13..9e6a4221f8eda 100644 --- a/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll +++ b/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll @@ -11,12 +11,14 @@ define void @mat_mul(ptr %C, ptr %A, ptr %B, i64 %N) !kernel_arg_addr_space !2 ! ; CHECK-NEXT: Base offset: %A ; CHECK-NEXT: ArrayDecl[UnknownSize][%N] with elements of 4 bytes. ; CHECK-NEXT: ArrayRef[%call][{0,+,1}<%for.inc>] +; CHECK-NEXT: Delinearization validation: Succeeded ; CHECK-EMPTY: ; CHECK-NEXT: Inst: %tmp5 = load float, ptr %arrayidx4, align 4 ; CHECK-NEXT: AccessFunction: {(4 * %call1),+,(4 * %N)}<%for.inc> ; CHECK-NEXT: Base offset: %B ; CHECK-NEXT: ArrayDecl[UnknownSize][%N] with elements of 4 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.inc>][%call1] +; CHECK-NEXT: Delinearization validation: Failed ; entry: br label %entry.split diff --git a/llvm/test/Analysis/Delinearization/divide_by_one.ll b/llvm/test/Analysis/Delinearization/divide_by_one.ll index e812e65ba7fd7..3d8e55984291e 100644 --- a/llvm/test/Analysis/Delinearization/divide_by_one.ll +++ b/llvm/test/Analysis/Delinearization/divide_by_one.ll @@ -18,12 +18,14 @@ define void @test(ptr nocapture %dst, i32 %stride, i32 %bs) { ; CHECK-NEXT: Base offset: %dst ; CHECK-NEXT: ArrayDecl[UnknownSize][%stride] with elements of 1 bytes. ; CHECK-NEXT: ArrayRef[{(1 + %bs),+,-1}<%for.cond1.preheader>][{-1,+,1}<%for.body3>] +; CHECK-NEXT: Delinearization validation: Failed ; CHECK-EMPTY: ; CHECK-NEXT: Inst: store i8 %0, ptr %arrayidx7, align 1 ; CHECK-NEXT: AccessFunction: {{\{\{}}(%stride * %bs),+,(-1 * %stride)}<%for.cond1.preheader>,+,1}<%for.body3> ; CHECK-NEXT: Base offset: %dst ; CHECK-NEXT: ArrayDecl[UnknownSize][%stride] with elements of 1 bytes. ; CHECK-NEXT: ArrayRef[{%bs,+,-1}<%for.cond1.preheader>][{0,+,1}<%for.body3>] +; CHECK-NEXT: Delinearization validation: Failed ; entry: %cmp20 = icmp sgt i32 %bs, -1 diff --git a/llvm/test/Analysis/Delinearization/fixed_size_array.ll b/llvm/test/Analysis/Delinearization/fixed_size_array.ll index cecd1eacb1437..250d46c81a25b 100644 --- a/llvm/test/Analysis/Delinearization/fixed_size_array.ll +++ b/llvm/test/Analysis/Delinearization/fixed_size_array.ll @@ -15,6 +15,7 @@ define void @a_i_j_k(ptr %a) { ; CHECK-NEXT: Base offset: %a ; CHECK-NEXT: ArrayDecl[UnknownSize][8][32] with elements of 4 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.i.header>][{0,+,1}<%for.j.header>][{0,+,1}<%for.k>] +; CHECK-NEXT: Delinearization validation: Succeeded ; entry: br label %for.i.header @@ -63,6 +64,7 @@ define void @a_i_nj_k(ptr %a) { ; CHECK-NEXT: Base offset: %a ; CHECK-NEXT: ArrayDecl[UnknownSize][8][32] with elements of 4 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.i.header>][{7,+,-1}<%for.j.header>][{0,+,1}<%for.k>] +; CHECK-NEXT: Delinearization validation: Succeeded ; entry: br label %for.i.header @@ -118,12 +120,14 @@ define void @a_ijk_b_i2jk(ptr %a, ptr %b) { ; CHECK-NEXT: Base offset: %a ; CHECK-NEXT: ArrayDecl[UnknownSize][4][64] with elements of 4 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.i.header>][{0,+,1}<%for.j.header>][{0,+,1}<%for.k>] +; CHECK-NEXT: Delinearization validation: Succeeded ; CHECK-EMPTY: ; CHECK-NEXT: Inst: store i32 1, ptr %b.idx, align 4 ; CHECK-NEXT: AccessFunction: {{\{\{\{}}0,+,1024}<%for.i.header>,+,256}<%for.j.header>,+,4}<%for.k> ; CHECK-NEXT: Base offset: %b ; CHECK-NEXT: ArrayDecl[UnknownSize][4][64] with elements of 4 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.i.header>][{0,+,1}<%for.j.header>][{0,+,1}<%for.k>] +; CHECK-NEXT: Delinearization validation: Succeeded ; entry: br label %for.i.header @@ -180,6 +184,7 @@ define void @a_i_2j1_k(ptr %a) { ; CHECK-NEXT: Base offset: %a ; CHECK-NEXT: ArrayDecl[UnknownSize][4][64] with elements of 4 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.i.header>][{0,+,1}<%for.j.header>][{32,+,1}<%for.k>] +; CHECK-NEXT: Delinearization validation: Succeeded ; entry: br label %for.i.header @@ -284,6 +289,7 @@ define void @a_i_j_3k(ptr %a) { ; CHECK-NEXT: Base offset: %a ; CHECK-NEXT: ArrayDecl[UnknownSize][8][32] with elements of 4 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.i.header>][{0,+,1}<%for.j.header>][{0,+,3}<%for.k>] +; CHECK-NEXT: Delinearization validation: Succeeded ; entry: br label %for.i.header @@ -386,6 +392,7 @@ define void @a_i_i_jk(ptr %a) { ; CHECK-NEXT: Base offset: %a ; CHECK-NEXT: ArrayDecl[UnknownSize][288] with elements of 4 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.i.header>][{{\{\{}}0,+,1}<%for.j.header>,+,1}<%for.k>] +; CHECK-NEXT: Delinearization validation: Succeeded ; entry: br label %for.i.header @@ -436,6 +443,7 @@ define void @a_i_jk_l(ptr %a) { ; CHECK-NEXT: Base offset: %a ; CHECK-NEXT: ArrayDecl[UnknownSize][8][32] with elements of 4 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.i.header>][{{\{\{}}0,+,1}<%for.j.header>,+,1}<%for.k.header>][{0,+,1}<%for.l>] +; CHECK-NEXT: Delinearization validation: Succeeded ; entry: br label %for.i.header diff --git a/llvm/test/Analysis/Delinearization/himeno_1.ll b/llvm/test/Analysis/Delinearization/himeno_1.ll index 5ae5d04505b8c..8655a257d8b74 100644 --- a/llvm/test/Analysis/Delinearization/himeno_1.ll +++ b/llvm/test/Analysis/Delinearization/himeno_1.ll @@ -36,6 +36,7 @@ define void @jacobi(i32 %nn, ptr nocapture %a, ptr nocapture %p) nounwind uwtabl ; CHECK-NEXT: Base offset: %a.base ; CHECK-NEXT: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of 4 bytes. ; CHECK-NEXT: ArrayRef[{1,+,1}<%for.i>][{1,+,1}<%for.j>][{1,+,1}<%for.k>] +; CHECK-NEXT: Delinearization validation: Failed ; entry: %p.rows.ptr = getelementptr inbounds %struct.Mat, ptr %p, i64 0, i32 2 diff --git a/llvm/test/Analysis/Delinearization/himeno_2.ll b/llvm/test/Analysis/Delinearization/himeno_2.ll index 75e4f027c4c6c..21a445eeaf841 100644 --- a/llvm/test/Analysis/Delinearization/himeno_2.ll +++ b/llvm/test/Analysis/Delinearization/himeno_2.ll @@ -36,6 +36,7 @@ define void @jacobi(i32 %nn, ptr nocapture %a, ptr nocapture %p) nounwind uwtabl ; CHECK-NEXT: Base offset: %a.base ; CHECK-NEXT: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of 4 bytes. ; CHECK-NEXT: ArrayRef[{1,+,1}<%for.i>][{1,+,1}<%for.j>][{1,+,1}<%for.k>] +; CHECK-NEXT: Delinearization validation: Failed ; entry: %p.rows.ptr = getelementptr inbounds %struct.Mat, ptr %p, i64 0, i32 2 diff --git a/llvm/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll b/llvm/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll index fc0a6c4e8b952..da993fc35ce7c 100644 --- a/llvm/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll +++ b/llvm/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll @@ -16,6 +16,7 @@ define void @foo(i64 %n, i64 %m, i64 %b, ptr %A) { ; CHECK-NEXT: Base offset: %A ; CHECK-NEXT: ArrayDecl[UnknownSize][%m] with elements of 8 bytes. ; CHECK-NEXT: ArrayRef[{%b,+,2}<%for.i>][{0,+,2}<%for.j>] +; CHECK-NEXT: Delinearization validation: Failed ; entry: br label %for.i diff --git a/llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll b/llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll index 0493a93dfee9d..c3d21de28fa65 100644 --- a/llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll +++ b/llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll @@ -16,6 +16,7 @@ define void @foo(i64 %n, i64 %m, i64 %o, ptr %A) { ; CHECK-NEXT: Base offset: %A ; CHECK-NEXT: ArrayDecl[UnknownSize][%m][%o] with elements of 8 bytes. ; CHECK-NEXT: ArrayRef[{3,+,1}<%for.i>][{-4,+,1}<%for.j>][{7,+,1}<%for.k>] +; CHECK-NEXT: Delinearization validation: Failed ; entry: br label %for.i diff --git a/llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll b/llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll index 2e9c3d77f3281..96ea88df56a9f 100644 --- a/llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll +++ b/llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll @@ -16,6 +16,7 @@ define void @foo(i64 %n, i64 %m, i64 %o, i64 %p, ptr nocapture %A) nounwind uwta ; CHECK-NEXT: Base offset: %A ; CHECK-NEXT: ArrayDecl[UnknownSize][%m][(%o + %p)] with elements of 8 bytes. ; CHECK-NEXT: ArrayRef[{3,+,1}<%for.cond4.preheader.lr.ph.us>][{-4,+,1}<%for.body6.lr.ph.us.us>][{7,+,1}<%for.body6.us.us>] +; CHECK-NEXT: Delinearization validation: Failed ; entry: %add = add nsw i64 %p, %o diff --git a/llvm/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll b/llvm/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll index a31192ef72f04..4d95e2f117e6a 100644 --- a/llvm/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll +++ b/llvm/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll @@ -16,6 +16,7 @@ define void @foo(i64 %n, i64 %m, i64 %o, ptr %A, i64 %p, i64 %q, i64 %r) { ; CHECK-NEXT: Base offset: %A ; CHECK-NEXT: ArrayDecl[UnknownSize][%m][%o] with elements of 8 bytes. ; CHECK-NEXT: ArrayRef[{%p,+,1}<%for.i>][{%q,+,1}<%for.j>][{%r,+,1}<%for.k>] +; CHECK-NEXT: Delinearization validation: Failed ; entry: br label %for.i diff --git a/llvm/test/Analysis/Delinearization/multidim_only_ivs_2d.ll b/llvm/test/Analysis/Delinearization/multidim_only_ivs_2d.ll index 432f7af7e0698..e1ad1c55313a4 100644 --- a/llvm/test/Analysis/Delinearization/multidim_only_ivs_2d.ll +++ b/llvm/test/Analysis/Delinearization/multidim_only_ivs_2d.ll @@ -16,12 +16,14 @@ define void @foo(i64 %n, i64 %m, ptr %A) { ; CHECK-NEXT: Base offset: %A ; CHECK-NEXT: ArrayDecl[UnknownSize][%m] with elements of 8 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>] +; CHECK-NEXT: Delinearization validation: Succeeded ; CHECK-EMPTY: ; CHECK-NEXT: Inst: store double %val, ptr %arrayidx, align 8 ; CHECK-NEXT: AccessFunction: {{\{\{}}0,+,(8 * %m)}<%for.i>,+,8}<%for.j> ; CHECK-NEXT: Base offset: %A ; CHECK-NEXT: ArrayDecl[UnknownSize][%m] with elements of 8 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>] +; CHECK-NEXT: Delinearization validation: Succeeded ; entry: br label %for.i diff --git a/llvm/test/Analysis/Delinearization/multidim_only_ivs_3d.ll b/llvm/test/Analysis/Delinearization/multidim_only_ivs_3d.ll index 966a8222d8a15..d5213e5afb33c 100644 --- a/llvm/test/Analysis/Delinearization/multidim_only_ivs_3d.ll +++ b/llvm/test/Analysis/Delinearization/multidim_only_ivs_3d.ll @@ -16,6 +16,7 @@ define void @foo(i64 %n, i64 %m, i64 %o, ptr %A) { ; CHECK-NEXT: Base offset: %A ; CHECK-NEXT: ArrayDecl[UnknownSize][%m][%o] with elements of 8 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] +; CHECK-NEXT: Delinearization validation: Succeeded ; entry: br label %for.i diff --git a/llvm/test/Analysis/Delinearization/multidim_only_ivs_3d_cast.ll b/llvm/test/Analysis/Delinearization/multidim_only_ivs_3d_cast.ll index da40825984663..1dae34f785be4 100644 --- a/llvm/test/Analysis/Delinearization/multidim_only_ivs_3d_cast.ll +++ b/llvm/test/Analysis/Delinearization/multidim_only_ivs_3d_cast.ll @@ -18,6 +18,7 @@ define void @foo(i32 %n, i32 %m, i32 %o, ptr %A) { ; CHECK-NEXT: Base offset: %A ; CHECK-NEXT: ArrayDecl[UnknownSize][(zext i32 %m to i64)][(zext i32 %o to i64)] with elements of 8 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] +; CHECK-NEXT: Delinearization validation: Failed ; entry: %m_zext = zext i32 %m to i64 diff --git a/llvm/test/Analysis/Delinearization/multidim_two_accesses_different_delinearization.ll b/llvm/test/Analysis/Delinearization/multidim_two_accesses_different_delinearization.ll index da77cd37fede5..011dc40697cb5 100644 --- a/llvm/test/Analysis/Delinearization/multidim_two_accesses_different_delinearization.ll +++ b/llvm/test/Analysis/Delinearization/multidim_two_accesses_different_delinearization.ll @@ -19,12 +19,14 @@ define void @foo(i64 %n, i64 %m, ptr %A) { ; CHECK-NEXT: Base offset: %A ; CHECK-NEXT: ArrayDecl[UnknownSize][%m] with elements of 8 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>] +; CHECK-NEXT: Delinearization validation: Succeeded ; CHECK-EMPTY: ; CHECK-NEXT: Inst: store double 1.000000e+00, ptr %arrayidx1, align 8 ; CHECK-NEXT: AccessFunction: {{\{\{}}0,+,8}<%for.i>,+,(8 * %n)}<%for.j> ; CHECK-NEXT: Base offset: %A ; CHECK-NEXT: ArrayDecl[UnknownSize][%n] with elements of 8 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.j>][{0,+,1}<%for.i>] +; CHECK-NEXT: Delinearization validation: Succeeded ; entry: br label %for.i diff --git a/llvm/test/Analysis/Delinearization/parameter_addrec_product.ll b/llvm/test/Analysis/Delinearization/parameter_addrec_product.ll index 49eeee3bd2119..4ef29701bdf49 100644 --- a/llvm/test/Analysis/Delinearization/parameter_addrec_product.ll +++ b/llvm/test/Analysis/Delinearization/parameter_addrec_product.ll @@ -19,12 +19,14 @@ define void @foo(ptr %A, ptr %p) { ; CHECK-NEXT: Base offset: %A ; CHECK-NEXT: ArrayDecl[UnknownSize][%pval] with elements of 4 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%bb2>][{0,+,1}<%bb4>] +; CHECK-NEXT: Delinearization validation: Failed ; CHECK-EMPTY: ; CHECK-NEXT: Inst: store float %tmp12, ptr %tmp10, align 4 ; CHECK-NEXT: AccessFunction: (4 * (({0,+,1}<%bb2> * %pval) + {0,+,1}<%bb4>)) ; CHECK-NEXT: Base offset: %A ; CHECK-NEXT: ArrayDecl[UnknownSize][%pval] with elements of 4 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%bb2>][{0,+,1}<%bb4>] +; CHECK-NEXT: Delinearization validation: Failed ; bb: br label %bb2 diff --git a/llvm/test/Analysis/Delinearization/terms_with_identity_factor.ll b/llvm/test/Analysis/Delinearization/terms_with_identity_factor.ll index 5b0465f7fb75e..323cce3dff05b 100644 --- a/llvm/test/Analysis/Delinearization/terms_with_identity_factor.ll +++ b/llvm/test/Analysis/Delinearization/terms_with_identity_factor.ll @@ -13,12 +13,14 @@ define void @foo(i32 %m, i32 %n, ptr nocapture %A) #0 { ; CHECK-NEXT: Base offset: %A ; CHECK-NEXT: ArrayDecl[UnknownSize][(sext i32 %n to i64)] with elements of 1 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.body3.lr.ph.us>][{0,+,1}<%for.body3.us>] +; CHECK-NEXT: Delinearization validation: Failed ; CHECK-EMPTY: ; CHECK-NEXT: Inst: store i8 %add4.us, ptr %arrayidx.us, align 1 ; CHECK-NEXT: AccessFunction: {{\{\{}}0,+,(sext i32 %n to i64)}<%for.body3.lr.ph.us>,+,1}<%for.body3.us> ; CHECK-NEXT: Base offset: %A ; CHECK-NEXT: ArrayDecl[UnknownSize][(sext i32 %n to i64)] with elements of 1 bytes. ; CHECK-NEXT: ArrayRef[{0,+,1}<%for.body3.lr.ph.us>][{0,+,1}<%for.body3.us>] +; CHECK-NEXT: Delinearization validation: Failed ; entry: br label %entry.split diff --git a/llvm/test/Analysis/DependenceAnalysis/DifferentOffsets.ll b/llvm/test/Analysis/DependenceAnalysis/DifferentOffsets.ll index 069a540ea0295..91d127cfc09d6 100644 --- a/llvm/test/Analysis/DependenceAnalysis/DifferentOffsets.ll +++ b/llvm/test/Analysis/DependenceAnalysis/DifferentOffsets.ll @@ -34,8 +34,6 @@ define i32 @alias_with_parametric_offset(ptr nocapture %A, i64 %n) { ; CHECK-NEXT: Equal predicate: (zext i2 (trunc i64 %n to i2) to i64) == 0 ; CHECK-NEXT: Src: %0 = load i32, ptr %A, align 1 --> Dst: %0 = load i32, ptr %A, align 1 ; CHECK-NEXT: da analyze - none! -; CHECK-NEXT: Runtime Assumptions: -; CHECK-NEXT: Equal predicate: (zext i2 (trunc i64 %n to i2) to i64) == 0 ; entry: %arrayidx = getelementptr inbounds i8, ptr %A, i64 %n @@ -55,9 +53,6 @@ define i32 @alias_with_parametric_expr(ptr nocapture %A, i64 %n, i64 %m) { ; CHECK-NEXT: Equal predicate: (zext i2 (-2 + (trunc i64 %m to i2)) to i64) == 0 ; CHECK-NEXT: Src: %0 = load i32, ptr %arrayidx1, align 1 --> Dst: %0 = load i32, ptr %arrayidx1, align 1 ; CHECK-NEXT: da analyze - none! -; CHECK-NEXT: Runtime Assumptions: -; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 %m to i2) + (-2 * (trunc i64 %n to i2))) to i64) == 0 -; CHECK-NEXT: Equal predicate: (zext i2 (-2 + (trunc i64 %m to i2)) to i64) == 0 ; entry: %mul = mul nsw i64 %n, 10 @@ -81,8 +76,6 @@ define i32 @gep_i8_vs_i32(ptr nocapture %A, i64 %n, i64 %m) { ; CHECK-NEXT: Equal predicate: (zext i2 (trunc i64 %n to i2) to i64) == 0 ; CHECK-NEXT: Src: store i32 42, ptr %arrayidx1, align 4 --> Dst: store i32 42, ptr %arrayidx1, align 4 ; CHECK-NEXT: da analyze - none! -; CHECK-NEXT: Runtime Assumptions: -; CHECK-NEXT: Equal predicate: (zext i2 (trunc i64 %n to i2) to i64) == 0 ; entry: %arrayidx0 = getelementptr inbounds i8, ptr %A, i64 %n diff --git a/llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll b/llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll index f5be89a51f484..bcf73683e4dab 100644 --- a/llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll +++ b/llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll @@ -50,9 +50,6 @@ define void @test(ptr %A, ptr %B, i1 %arg, i32 %n, i32 %m) align 2 { ; CHECK-NEXT: Equal predicate: (8 * (zext i4 (trunc i32 %v1 to i4) to i32)) == 0 ; CHECK-NEXT: Src: %v32 = load <32 x i32>, ptr %v30, align 128 --> Dst: %v32 = load <32 x i32>, ptr %v30, align 128 ; CHECK-NEXT: da analyze - consistent input [0 S S]! -; CHECK-NEXT: Runtime Assumptions: -; CHECK-NEXT: Equal predicate: (zext i7 (4 * (trunc i32 %v1 to i7) * (1 + (trunc i32 %n to i7))) to i32) == 0 -; CHECK-NEXT: Equal predicate: (8 * (zext i4 (trunc i32 %v1 to i4) to i32)) == 0 ; entry: %v1 = load i32, ptr %B, align 4 diff --git a/llvm/test/Analysis/LoopCacheAnalysis/crash-after-pr164798.ll b/llvm/test/Analysis/LoopCacheAnalysis/crash-after-pr164798.ll new file mode 100644 index 0000000000000..e6b6d1753adb7 --- /dev/null +++ b/llvm/test/Analysis/LoopCacheAnalysis/crash-after-pr164798.ll @@ -0,0 +1,33 @@ +; RUN: opt < %s -passes='print' -disable-output + +; Ensure no crash happens after PR #164798 + +target datalayout = "p21:32:16" + +define i16 @f() { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: + %i.02 = phi i16 [ 0, %entry ], [ %inc8, %for.cond.cleanup3 ] + %idxprom = zext i16 %i.02 to i32 + %arrayidx = getelementptr [18 x i16], ptr addrspace(21) null, i32 %idxprom + br label %for.body4 + +for.cond.cleanup: + ret i16 0 + +for.cond.cleanup3: + %inc8 = add i16 %i.02, 1 + %exitcond3.not = icmp eq i16 %inc8, 0 + br i1 %exitcond3.not, label %for.cond.cleanup, label %for.cond1.preheader + +for.body4: + %j.01 = phi i16 [ 0, %for.cond1.preheader ], [ %inc.2, %for.body4 ] + %idxprom5 = zext i16 %j.01 to i32 + %arrayidx6 = getelementptr i16, ptr addrspace(21) %arrayidx, i32 %idxprom5 + store i16 0, ptr addrspace(21) %arrayidx6, align 1 + %inc.2 = add i16 %j.01, 1 + %exitcond.not.2 = icmp eq i16 %inc.2, 18 + br i1 %exitcond.not.2, label %for.cond.cleanup3, label %for.body4 +} diff --git a/llvm/test/Analysis/ScalarEvolution/addrec-may-wrap-udiv-canonicalize.ll b/llvm/test/Analysis/ScalarEvolution/addrec-may-wrap-udiv-canonicalize.ll new file mode 100644 index 0000000000000..0a6ef0dad4569 --- /dev/null +++ b/llvm/test/Analysis/ScalarEvolution/addrec-may-wrap-udiv-canonicalize.ll @@ -0,0 +1,169 @@ +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6 +; RUN: opt < %s -passes='print' -disable-output 2>&1 | FileCheck %s + +declare void @use(i64) + +define void @test_step2_div4(i64 %n) { +; CHECK-LABEL: 'test_step2_div4' +; CHECK-NEXT: Classifying expressions for: @test_step2_div4 +; CHECK-NEXT: %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] +; CHECK-NEXT: --> {0,+,2}<%loop> U: [0,-1) S: [-9223372036854775808,9223372036854775807) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %div.0 = udiv i64 %iv, 4 +; CHECK-NEXT: --> ({0,+,2}<%loop> /u 4) U: [0,4611686018427387904) S: [0,4611686018427387904) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.1 = add i64 %iv, 1 +; CHECK-NEXT: --> {1,+,2}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %div.1 = udiv i64 %iv.1, 4 +; CHECK-NEXT: --> ({1,+,2}<%loop> /u 4) U: [0,4611686018427387904) S: [0,4611686018427387904) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.2 = add i64 %iv, 2 +; CHECK-NEXT: --> {2,+,2}<%loop> U: [0,-1) S: [-9223372036854775808,9223372036854775807) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %div.2 = udiv i64 %iv.2, 4 +; CHECK-NEXT: --> ({2,+,2}<%loop> /u 4) U: [0,4611686018427387904) S: [0,4611686018427387904) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.neg.1 = add i64 %iv, -1 +; CHECK-NEXT: --> {-1,+,2}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %div.neg.1 = udiv i64 %iv.neg.1, 4 +; CHECK-NEXT: --> ({-1,+,2}<%loop> /u 4) U: [0,4611686018427387904) S: [0,4611686018427387904) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.next = add i64 %iv, 2 +; CHECK-NEXT: --> {2,+,2}<%loop> U: [0,-1) S: [-9223372036854775808,9223372036854775807) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: Determining loop execution counts for: @test_step2_div4 +; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count. +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %div.0 = udiv i64 %iv, 4 + call void @use(i64 %div.0) + %iv.1 = add i64 %iv, 1 + %div.1 = udiv i64 %iv.1, 4 + call void @use(i64 %div.1) + %iv.2 = add i64 %iv, 2 + %div.2 = udiv i64 %iv.2, 4 + call void @use(i64 %div.2) + %iv.neg.1 = add i64 %iv, -1 + %div.neg.1 = udiv i64 %iv.neg.1, 4 + call void @use(i64 %div.neg.1) + %iv.next = add i64 %iv, 2 + %cond = icmp slt i64 %iv, %n + br i1 %cond, label %loop, label %exit + +exit: + ret void +} + +define void @test_step3_div6(i64 %n) { +; CHECK-LABEL: 'test_step3_div6' +; CHECK-NEXT: Classifying expressions for: @test_step3_div6 +; CHECK-NEXT: %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] +; CHECK-NEXT: --> {0,+,3}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %div.0 = udiv i64 %iv, 6 +; CHECK-NEXT: --> ({0,+,3}<%loop> /u 6) U: [0,3074457345618258603) S: [0,3074457345618258603) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.1 = add i64 %iv, 1 +; CHECK-NEXT: --> {1,+,3}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %div.1 = udiv i64 %iv.1, 6 +; CHECK-NEXT: --> ({1,+,3}<%loop> /u 6) U: [0,3074457345618258603) S: [0,3074457345618258603) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.2 = add i64 %iv, 2 +; CHECK-NEXT: --> {2,+,3}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %div.2 = udiv i64 %iv.2, 6 +; CHECK-NEXT: --> ({2,+,3}<%loop> /u 6) U: [0,3074457345618258603) S: [0,3074457345618258603) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.neg.1 = add i64 %iv, -1 +; CHECK-NEXT: --> {-1,+,3}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %div.neg.1 = udiv i64 %iv.neg.1, 6 +; CHECK-NEXT: --> ({-1,+,3}<%loop> /u 6) U: [0,3074457345618258603) S: [0,3074457345618258603) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.next = add i64 %iv, 3 +; CHECK-NEXT: --> {3,+,3}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: Determining loop execution counts for: @test_step3_div6 +; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count. +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %div.0 = udiv i64 %iv, 6 + call void @use(i64 %div.0) + %iv.1 = add i64 %iv, 1 + %div.1 = udiv i64 %iv.1, 6 + call void @use(i64 %div.1) + %iv.2 = add i64 %iv, 2 + %div.2 = udiv i64 %iv.2, 6 + call void @use(i64 %div.2) + %iv.neg.1 = add i64 %iv, -1 + %div.neg.1 = udiv i64 %iv.neg.1, 6 + call void @use(i64 %div.neg.1) + %iv.next = add i64 %iv, 3 + %cond = icmp slt i64 %iv, %n + br i1 %cond, label %loop, label %exit + +exit: + ret void +} + + +define void @test_step4_div4(i64 %n) { +; CHECK-LABEL: 'test_step4_div4' +; CHECK-NEXT: Classifying expressions for: @test_step4_div4 +; CHECK-NEXT: %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] +; CHECK-NEXT: --> {0,+,4}<%loop> U: [0,-3) S: [-9223372036854775808,9223372036854775805) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %div.0 = udiv i64 %iv, 4 +; CHECK-NEXT: --> ({0,+,4}<%loop> /u 4) U: [0,4611686018427387904) S: [0,4611686018427387904) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.1 = add i64 %iv, 1 +; CHECK-NEXT: --> {1,+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %div.1 = udiv i64 %iv.1, 4 +; CHECK-NEXT: --> ({1,+,4}<%loop> /u 4) U: [0,4611686018427387904) S: [0,4611686018427387904) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.2 = add i64 %iv, 2 +; CHECK-NEXT: --> {2,+,4}<%loop> U: [0,-1) S: [-9223372036854775808,9223372036854775807) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %div.2 = udiv i64 %iv.2, 4 +; CHECK-NEXT: --> ({2,+,4}<%loop> /u 4) U: [0,4611686018427387904) S: [0,4611686018427387904) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.3 = add i64 %iv, 3 +; CHECK-NEXT: --> {3,+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %div.3 = udiv i64 %iv.3, 4 +; CHECK-NEXT: --> ({3,+,4}<%loop> /u 4) U: [0,4611686018427387904) S: [0,4611686018427387904) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.4 = add i64 %iv, 4 +; CHECK-NEXT: --> {4,+,4}<%loop> U: [0,-3) S: [-9223372036854775808,9223372036854775805) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %div.4 = udiv i64 %iv.4, 4 +; CHECK-NEXT: --> ({4,+,4}<%loop> /u 4) U: [0,4611686018427387904) S: [0,4611686018427387904) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.5 = add i64 %iv, 5 +; CHECK-NEXT: --> {5,+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %div.5 = udiv i64 %iv.5, 4 +; CHECK-NEXT: --> ({5,+,4}<%loop> /u 4) U: [0,4611686018427387904) S: [0,4611686018427387904) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.next = add i64 %iv, 4 +; CHECK-NEXT: --> {4,+,4}<%loop> U: [0,-3) S: [-9223372036854775808,9223372036854775805) Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: Determining loop execution counts for: @test_step4_div4 +; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count. +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %div.0 = udiv i64 %iv, 4 + call void @use(i64 %div.0) + %iv.1 = add i64 %iv, 1 + %div.1 = udiv i64 %iv.1, 4 + call void @use(i64 %div.1) + %iv.2 = add i64 %iv, 2 + %div.2 = udiv i64 %iv.2, 4 + call void @use(i64 %div.2) + %iv.3 = add i64 %iv, 3 + %div.3 = udiv i64 %iv.3, 4 + call void @use(i64 %div.3) + %iv.4 = add i64 %iv, 4 + %div.4 = udiv i64 %iv.4, 4 + call void @use(i64 %div.4) + %iv.5 = add i64 %iv, 5 + %div.5 = udiv i64 %iv.5, 4 + call void @use(i64 %div.5) + %iv.next = add i64 %iv, 4 + %cond = icmp slt i64 %iv, %n + br i1 %cond, label %loop, label %exit + +exit: + ret void +} diff --git a/llvm/test/Assembler/invalid-ptrauth-const6.ll b/llvm/test/Assembler/invalid-ptrauth-const6.ll new file mode 100644 index 0000000000000..6e8e1d386acc8 --- /dev/null +++ b/llvm/test/Assembler/invalid-ptrauth-const6.ll @@ -0,0 +1,6 @@ +; RUN: not llvm-as < %s 2>&1 | FileCheck %s + +@var = global i32 0 + +; CHECK: error: constant ptrauth deactivation symbol must be a pointer +@ptr = global ptr ptrauth (ptr @var, i32 0, i64 65535, ptr null, i64 0) diff --git a/llvm/test/Bitcode/aarch64-sve-rev-upgrade.ll b/llvm/test/Bitcode/aarch64-sve-rev-upgrade.ll new file mode 100644 index 0000000000000..237df28bb0266 --- /dev/null +++ b/llvm/test/Bitcode/aarch64-sve-rev-upgrade.ll @@ -0,0 +1,111 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S < %s | FileCheck %s +; RUN: llvm-dis < %s.bc | FileCheck %s + +define @rev_nxv16i1( %a) { +; CHECK-LABEL: @rev_nxv16i1( +; CHECK-NEXT: [[RES:%.*]] = call @llvm.vector.reverse.nxv16i1( [[A:%.*]]) +; CHECK-NEXT: ret [[RES]] +; + %res = call @llvm.aarch64.sve.rev.nxv16i1( %a) + ret %res +} + +define @rev_nxv8i1( %a) { +; CHECK-LABEL: @rev_nxv8i1( +; CHECK-NEXT: [[RES:%.*]] = call @llvm.vector.reverse.nxv8i1( [[A:%.*]]) +; CHECK-NEXT: ret [[RES]] +; + %res = call @llvm.aarch64.sve.rev.nxv8i1( %a) + ret %res +} + +define @rev_nxv4i1( %a) { +; CHECK-LABEL: @rev_nxv4i1( +; CHECK-NEXT: [[RES:%.*]] = call @llvm.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: ret [[RES]] +; + %res = call @llvm.aarch64.sve.rev.nxv4i1( %a) + ret %res +} + +define @rev_nxv2i1( %a) { +; CHECK-LABEL: @rev_nxv2i1( +; CHECK-NEXT: [[RES:%.*]] = call @llvm.vector.reverse.nxv2i1( [[A:%.*]]) +; CHECK-NEXT: ret [[RES]] +; + %res = call @llvm.aarch64.sve.rev.nxv2i1( %a) + ret %res +} + +define @rev_i8( %a) { +; CHECK-LABEL: @rev_i8( +; CHECK-NEXT: [[RES:%.*]] = call @llvm.vector.reverse.nxv16i8( [[A:%.*]]) +; CHECK-NEXT: ret [[RES]] +; + %res = call @llvm.aarch64.sve.rev.nxv16i8( %a) + ret %res +} + +define @rev_i16( %a) { +; CHECK-LABEL: @rev_i16( +; CHECK-NEXT: [[RES:%.*]] = call @llvm.vector.reverse.nxv8i16( [[A:%.*]]) +; CHECK-NEXT: ret [[RES]] +; + %res = call @llvm.aarch64.sve.rev.nxv8i16( %a) + ret %res +} + +define @rev_i32( %a) { +; CHECK-LABEL: @rev_i32( +; CHECK-NEXT: [[RES:%.*]] = call @llvm.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: ret [[RES]] +; + %res = call @llvm.aarch64.sve.rev.nxv4i32( %a) + ret %res +} + +define @rev_i64( %a) { +; CHECK-LABEL: @rev_i64( +; CHECK-NEXT: [[RES:%.*]] = call @llvm.vector.reverse.nxv2i64( [[A:%.*]]) +; CHECK-NEXT: ret [[RES]] +; + %res = call @llvm.aarch64.sve.rev.nxv2i64( %a) + ret %res +} + +define @rev_f16( %a) { +; CHECK-LABEL: @rev_f16( +; CHECK-NEXT: [[RES:%.*]] = call @llvm.vector.reverse.nxv8f16( [[A:%.*]]) +; CHECK-NEXT: ret [[RES]] +; + %res = call @llvm.aarch64.sve.rev.nxv8f16( %a) + ret %res +} + +define @rev_f32( %a) { +; CHECK-LABEL: @rev_f32( +; CHECK-NEXT: [[RES:%.*]] = call @llvm.vector.reverse.nxv4f32( [[A:%.*]]) +; CHECK-NEXT: ret [[RES]] +; + %res = call @llvm.aarch64.sve.rev.nxv4f32( %a) + ret %res +} + +define @rev_f64( %a) { +; CHECK-LABEL: @rev_f64( +; CHECK-NEXT: [[RES:%.*]] = call @llvm.vector.reverse.nxv2f64( [[A:%.*]]) +; CHECK-NEXT: ret [[RES]] +; + %res = call @llvm.aarch64.sve.rev.nxv2f64( %a) + ret %res +} + +define @rev_bf16( %a) #0 { +; CHECK-LABEL: @rev_bf16( +; CHECK-NEXT: [[RES:%.*]] = call @llvm.vector.reverse.nxv8bf16( [[A:%.*]]) +; CHECK-NEXT: ret [[RES]] +; + %res = call @llvm.aarch64.sve.rev.nxv8bf16( %a) + ret %res +} diff --git a/llvm/test/Bitcode/aarch64-sve-rev-upgrade.ll.bc b/llvm/test/Bitcode/aarch64-sve-rev-upgrade.ll.bc new file mode 100644 index 0000000000000..fb8ba00e8039a Binary files /dev/null and b/llvm/test/Bitcode/aarch64-sve-rev-upgrade.ll.bc differ diff --git a/llvm/test/Bitcode/compatibility.ll b/llvm/test/Bitcode/compatibility.ll index e21786e5ee330..53cbe2d6ffd37 100644 --- a/llvm/test/Bitcode/compatibility.ll +++ b/llvm/test/Bitcode/compatibility.ll @@ -217,9 +217,13 @@ declare void @g.f1() ; CHECK: @g.sanitize_address_dyninit = global i32 0, sanitize_address_dyninit ; CHECK: @g.sanitize_multiple = global i32 0, sanitize_memtag, sanitize_address_dyninit +@ds = external global i32 + ; ptrauth constant @auth_var = global ptr ptrauth (ptr @g1, i32 0, i64 65535, ptr null) ; CHECK: @auth_var = global ptr ptrauth (ptr @g1, i32 0, i64 65535) +@auth_var.ds = global ptr ptrauth (ptr @g1, i32 0, i64 65535, ptr null, ptr @ds) +; CHECK: @auth_var.ds = global ptr ptrauth (ptr @g1, i32 0, i64 65535, ptr null, ptr @ds) ;; Aliases ; Format: @ = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal] diff --git a/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll b/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll index 5628e17b4936e..01e5b3f6673ae 100644 --- a/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll +++ b/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll @@ -14,6 +14,7 @@ ; CHECK-NEXT: @setcc_v2i1_v2i64(<2 x i64> %x) { +; CHECK-SD-LABEL: setcc_v2i1_v2i64: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmtst v0.2d, v0.2d, v0.2d +; CHECK-SD-NEXT: xtn v0.2s, v0.2d +; CHECK-SD-NEXT: ret +; +; CHECK-CSSC-LABEL: setcc_v2i1_v2i64: +; CHECK-CSSC: // %bb.0: // %entry +; CHECK-CSSC-NEXT: cmtst v0.2d, v0.2d, v0.2d +; CHECK-CSSC-NEXT: xtn v0.2s, v0.2d +; CHECK-CSSC-NEXT: ret +; +entry: + %cmp = icmp ne <2 x i64> %x, zeroinitializer + ret <2 x i1> %cmp +} + +define <4 x i1> @setcc_v4i1_v4i32(<4 x i32> %x) { +; CHECK-SD-LABEL: setcc_v4i1_v4i32: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmtst v0.4s, v0.4s, v0.4s +; CHECK-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-CSSC-LABEL: setcc_v4i1_v4i32: +; CHECK-CSSC: // %bb.0: // %entry +; CHECK-CSSC-NEXT: cmtst v0.4s, v0.4s, v0.4s +; CHECK-CSSC-NEXT: xtn v0.4h, v0.4s +; CHECK-CSSC-NEXT: ret +; +entry: + %cmp = icmp ne <4 x i32> %x, zeroinitializer + ret <4 x i1> %cmp +} + +define <8 x i1> @setcc_v8i1_v8i16(<8 x i16> %x) { +; CHECK-SD-LABEL: setcc_v8i1_v8i16: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmtst v0.8h, v0.8h, v0.8h +; CHECK-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-SD-NEXT: ret +; +; CHECK-CSSC-LABEL: setcc_v8i1_v8i16: +; CHECK-CSSC: // %bb.0: // %entry +; CHECK-CSSC-NEXT: cmtst v0.8h, v0.8h, v0.8h +; CHECK-CSSC-NEXT: xtn v0.8b, v0.8h +; CHECK-CSSC-NEXT: ret +; +entry: + %cmp = icmp ne <8 x i16> %x, zeroinitializer + ret <8 x i1> %cmp +} + +define <16 x i1> @setcc_v16i1_v16i8(<16 x i8> %x) { +; CHECK-SD-LABEL: setcc_v16i1_v16i8: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmtst v0.16b, v0.16b, v0.16b +; CHECK-SD-NEXT: ret +; +; CHECK-CSSC-LABEL: setcc_v16i1_v16i8: +; CHECK-CSSC: // %bb.0: // %entry +; CHECK-CSSC-NEXT: cmtst v0.16b, v0.16b, v0.16b +; CHECK-CSSC-NEXT: ret +; +entry: + %cmp = icmp ne <16 x i8> %x, zeroinitializer + ret <16 x i1> %cmp +} + +define <2 x i8> @setcc_v2i8_v2i64(<2 x i64> %x) { +; CHECK-SD-LABEL: setcc_v2i8_v2i64: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmtst v0.2d, v0.2d, v0.2d +; CHECK-SD-NEXT: movi v1.2s, #1 +; CHECK-SD-NEXT: xtn v0.2s, v0.2d +; CHECK-SD-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-SD-NEXT: ret +; +; CHECK-CSSC-LABEL: setcc_v2i8_v2i64: +; CHECK-CSSC: // %bb.0: // %entry +; CHECK-CSSC-NEXT: cmtst v0.2d, v0.2d, v0.2d +; CHECK-CSSC-NEXT: movi v1.2s, #1 +; CHECK-CSSC-NEXT: xtn v0.2s, v0.2d +; CHECK-CSSC-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-CSSC-NEXT: ret +; +entry: + %cmp = icmp ne <2 x i64> %x, zeroinitializer + %conv = zext <2 x i1> %cmp to <2 x i8> + ret <2 x i8> %conv +} + +define <4 x i16> @setcc_v4i16_v4i32(<4 x i32> %x) { +; CHECK-SD-LABEL: setcc_v4i16_v4i32: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmtst v0.4s, v0.4s, v0.4s +; CHECK-SD-NEXT: movi v1.4h, #1 +; CHECK-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-SD-NEXT: ret +; +; CHECK-CSSC-LABEL: setcc_v4i16_v4i32: +; CHECK-CSSC: // %bb.0: // %entry +; CHECK-CSSC-NEXT: cmtst v0.4s, v0.4s, v0.4s +; CHECK-CSSC-NEXT: movi v1.4h, #1 +; CHECK-CSSC-NEXT: xtn v0.4h, v0.4s +; CHECK-CSSC-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-CSSC-NEXT: ret +; +entry: + %cmp = icmp ne <4 x i32> %x, zeroinitializer + %conv = zext <4 x i1> %cmp to <4 x i16> + ret <4 x i16> %conv +} + +define <4 x i32> @setcc_v4i32_v4i32(<4 x i32> %x) { +; CHECK-SD-LABEL: setcc_v4i32_v4i32: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v1.4s, #1 +; CHECK-SD-NEXT: cmeq v0.4s, v0.4s, #0 +; CHECK-SD-NEXT: bic v0.16b, v1.16b, v0.16b +; CHECK-SD-NEXT: ret +; +; CHECK-CSSC-LABEL: setcc_v4i32_v4i32: +; CHECK-CSSC: // %bb.0: // %entry +; CHECK-CSSC-NEXT: movi v1.4s, #1 +; CHECK-CSSC-NEXT: cmeq v0.4s, v0.4s, #0 +; CHECK-CSSC-NEXT: bic v0.16b, v1.16b, v0.16b +; CHECK-CSSC-NEXT: ret +; +entry: + %cmp = icmp ne <4 x i32> %x, zeroinitializer + %conv = zext <4 x i1> %cmp to <4 x i32> + ret <4 x i32> %conv +} + +; auto icmpi128(int128 x0) { return x0 != 0; } +define i1 @icmpi128(i128 noundef %0) { +; CHECK-SD-LABEL: icmpi128: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: orr x8, x0, x1 +; CHECK-SD-NEXT: cmp x8, #0 +; CHECK-SD-NEXT: cset w0, ne +; CHECK-SD-NEXT: ret +; +; CHECK-CSSC-LABEL: icmpi128: +; CHECK-CSSC: // %bb.0: // %entry +; CHECK-CSSC-NEXT: orr x8, x0, x1 +; CHECK-CSSC-NEXT: umin x0, x8, #1 +; CHECK-CSSC-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-CSSC-NEXT: ret +; +entry: + %2 = icmp ne i128 %0, 0 + ret i1 %2 +} diff --git a/llvm/test/CodeGen/AArch64/addtruncshift.ll b/llvm/test/CodeGen/AArch64/addtruncshift.ll new file mode 100644 index 0000000000000..6dbe0b3d80b9a --- /dev/null +++ b/llvm/test/CodeGen/AArch64/addtruncshift.ll @@ -0,0 +1,114 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-none-elf < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-none-elf -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI + +define <2 x i32> @test_v2i64(<2 x i64> %n) { +; CHECK-SD-LABEL: test_v2i64: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: sshr v0.2d, v0.2d, #35 +; CHECK-SD-NEXT: xtn v0.2s, v0.2d +; CHECK-SD-NEXT: usra v0.2s, v0.2s, #31 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_v2i64: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: ushr v1.2d, v0.2d, #63 +; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #35 +; CHECK-GI-NEXT: xtn v1.2s, v1.2d +; CHECK-GI-NEXT: xtn v0.2s, v0.2d +; CHECK-GI-NEXT: add v0.2s, v1.2s, v0.2s +; CHECK-GI-NEXT: ret +entry: + %shr = lshr <2 x i64> %n, splat (i64 63) + %vmovn.i4 = trunc nuw nsw <2 x i64> %shr to <2 x i32> + %shr1 = ashr <2 x i64> %n, splat (i64 35) + %vmovn.i = trunc nsw <2 x i64> %shr1 to <2 x i32> + %add = add nsw <2 x i32> %vmovn.i4, %vmovn.i + ret <2 x i32> %add +} + +define <4 x i16> @test_v4i32(<4 x i32> %n) { +; CHECK-SD-LABEL: test_v4i32: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: sshr v0.4s, v0.4s, #17 +; CHECK-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-NEXT: usra v0.4h, v0.4h, #15 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_v4i32: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: ushr v1.4s, v0.4s, #31 +; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #17 +; CHECK-GI-NEXT: xtn v1.4h, v1.4s +; CHECK-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-NEXT: add v0.4h, v1.4h, v0.4h +; CHECK-GI-NEXT: ret +entry: + %shr = lshr <4 x i32> %n, splat (i32 31) + %vmovn.i4 = trunc nuw nsw <4 x i32> %shr to <4 x i16> + %shr1 = ashr <4 x i32> %n, splat (i32 17) + %vmovn.i = trunc nsw <4 x i32> %shr1 to <4 x i16> + %add = add nsw <4 x i16> %vmovn.i4, %vmovn.i + ret <4 x i16> %add +} + +define <8 x i8> @test_v8i16(<8 x i16> %n) { +; CHECK-SD-LABEL: test_v8i16: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: sshr v0.8h, v0.8h, #9 +; CHECK-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-SD-NEXT: usra v0.8b, v0.8b, #7 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_v8i16: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: ushr v1.8h, v0.8h, #15 +; CHECK-GI-NEXT: sshr v0.8h, v0.8h, #9 +; CHECK-GI-NEXT: xtn v1.8b, v1.8h +; CHECK-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-GI-NEXT: add v0.8b, v1.8b, v0.8b +; CHECK-GI-NEXT: ret +entry: + %shr = lshr <8 x i16> %n, splat (i16 15) + %vmovn.i4 = trunc nuw nsw <8 x i16> %shr to <8 x i8> + %shr1 = ashr <8 x i16> %n, splat (i16 9) + %vmovn.i = trunc nsw <8 x i16> %shr1 to <8 x i8> + %add = add nsw <8 x i8> %vmovn.i4, %vmovn.i + ret <8 x i8> %add +} + +define <2 x i32> @test_v2i64_smallsrl(<2 x i64> %n) { +; CHECK-LABEL: test_v2i64_smallsrl: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ushr v1.2d, v0.2d, #62 +; CHECK-NEXT: sshr v0.2d, v0.2d, #35 +; CHECK-NEXT: xtn v1.2s, v1.2d +; CHECK-NEXT: xtn v0.2s, v0.2d +; CHECK-NEXT: add v0.2s, v1.2s, v0.2s +; CHECK-NEXT: ret +entry: + %shr = lshr <2 x i64> %n, splat (i64 62) + %vmovn.i4 = trunc nuw nsw <2 x i64> %shr to <2 x i32> + %shr1 = ashr <2 x i64> %n, splat (i64 35) + %vmovn.i = trunc nsw <2 x i64> %shr1 to <2 x i32> + %add = add nsw <2 x i32> %vmovn.i4, %vmovn.i + ret <2 x i32> %add +} + +define <2 x i32> @test_v2i64_smallsra(<2 x i64> %n) { +; CHECK-LABEL: test_v2i64_smallsra: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ushr v1.2d, v0.2d, #63 +; CHECK-NEXT: shrn v0.2s, v0.2d, #27 +; CHECK-NEXT: xtn v1.2s, v1.2d +; CHECK-NEXT: add v0.2s, v1.2s, v0.2s +; CHECK-NEXT: ret +entry: + %shr = lshr <2 x i64> %n, splat (i64 63) + %vmovn.i4 = trunc nuw nsw <2 x i64> %shr to <2 x i32> + %shr1 = ashr <2 x i64> %n, splat (i64 27) + %vmovn.i = trunc nsw <2 x i64> %shr1 to <2 x i32> + %add = add nsw <2 x i32> %vmovn.i4, %vmovn.i + ret <2 x i32> %add +} + diff --git a/llvm/test/CodeGen/AArch64/arm64-fp128.ll b/llvm/test/CodeGen/AArch64/arm64-fp128.ll index 8dd5c3ac05109..498dce138febf 100644 --- a/llvm/test/CodeGen/AArch64/arm64-fp128.ll +++ b/llvm/test/CodeGen/AArch64/arm64-fp128.ll @@ -1197,30 +1197,22 @@ define <2 x half> @vec_round_f16(<2 x fp128> %val) { ; ; CHECK-GI-LABEL: vec_round_f16: ; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: sub sp, sp, #64 -; CHECK-GI-NEXT: str x30, [sp, #48] // 8-byte Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 +; CHECK-GI-NEXT: sub sp, sp, #48 +; CHECK-GI-NEXT: str x30, [sp, #32] // 8-byte Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 48 ; CHECK-GI-NEXT: .cfi_offset w30, -16 -; CHECK-GI-NEXT: mov v2.d[0], x8 ; CHECK-GI-NEXT: str q1, [sp] // 16-byte Spill -; CHECK-GI-NEXT: mov v2.d[1], x8 -; CHECK-GI-NEXT: str q2, [sp, #32] // 16-byte Spill ; CHECK-GI-NEXT: bl __trunctfhf2 ; CHECK-GI-NEXT: // kill: def $h0 killed $h0 def $q0 ; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Spill ; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Reload ; CHECK-GI-NEXT: bl __trunctfhf2 +; CHECK-GI-NEXT: ldr q1, [sp, #16] // 16-byte Reload ; CHECK-GI-NEXT: // kill: def $h0 killed $h0 def $q0 -; CHECK-GI-NEXT: str q0, [sp] // 16-byte Spill -; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Reload -; CHECK-GI-NEXT: bl __trunctfhf2 -; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Reload -; CHECK-GI-NEXT: bl __trunctfhf2 -; CHECK-GI-NEXT: ldp q1, q0, [sp] // 32-byte Folded Reload -; CHECK-GI-NEXT: ldr x30, [sp, #48] // 8-byte Reload -; CHECK-GI-NEXT: mov v0.h[1], v1.h[0] -; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-GI-NEXT: add sp, sp, #64 +; CHECK-GI-NEXT: ldr x30, [sp, #32] // 8-byte Reload +; CHECK-GI-NEXT: mov v1.h[1], v0.h[0] +; CHECK-GI-NEXT: fmov d0, d1 +; CHECK-GI-NEXT: add sp, sp, #48 ; CHECK-GI-NEXT: ret %dst = fptrunc <2 x fp128> %val to <2 x half> ret <2 x half> %dst diff --git a/llvm/test/CodeGen/AArch64/arm64-popcnt.ll b/llvm/test/CodeGen/AArch64/arm64-popcnt.ll index d06e42f5405ef..3d6cc814d157d 100644 --- a/llvm/test/CodeGen/AArch64/arm64-popcnt.ll +++ b/llvm/test/CodeGen/AArch64/arm64-popcnt.ll @@ -414,8 +414,8 @@ define i1 @ctpop32_ne_one_nonzero(i32 %x) { ; CHECK-CSSC-LABEL: ctpop32_ne_one_nonzero: ; CHECK-CSSC: // %bb.0: // %entry ; CHECK-CSSC-NEXT: sub w8, w0, #1 -; CHECK-CSSC-NEXT: tst w0, w8 -; CHECK-CSSC-NEXT: cset w0, ne +; CHECK-CSSC-NEXT: and w8, w0, w8 +; CHECK-CSSC-NEXT: umin w0, w8, #1 ; CHECK-CSSC-NEXT: ret ; ; CHECK-BE-LABEL: ctpop32_ne_one_nonzero: diff --git a/llvm/test/CodeGen/AArch64/arm64-zip.ll b/llvm/test/CodeGen/AArch64/arm64-zip.ll index 9b06620590cda..44411a1032dca 100644 --- a/llvm/test/CodeGen/AArch64/arm64-zip.ll +++ b/llvm/test/CodeGen/AArch64/arm64-zip.ll @@ -355,49 +355,25 @@ define <8 x i16> @combine_v8i16_undef(<4 x i16> %0, <4 x i16> %1) { ret <8 x i16> %3 } -; FIXME: This could be zip1 too, 8,0,9,1... pattern is handled define <16 x i8> @combine_v8i16_8first(<8 x i8> %0, <8 x i8> %1) { -; CHECK-SD-LABEL: combine_v8i16_8first: -; CHECK-SD: // %bb.0: -; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1_q2 -; CHECK-SD-NEXT: adrp x8, .LCPI25_0 -; CHECK-SD-NEXT: fmov d2, d0 -; CHECK-SD-NEXT: ldr q3, [x8, :lo12:.LCPI25_0] -; CHECK-SD-NEXT: tbl.16b v0, { v1, v2 }, v3 -; CHECK-SD-NEXT: ret -; -; CHECK-GI-LABEL: combine_v8i16_8first: -; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q31_q0 -; CHECK-GI-NEXT: adrp x8, .LCPI25_0 -; CHECK-GI-NEXT: fmov d31, d1 -; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI25_0] -; CHECK-GI-NEXT: tbl.16b v0, { v31, v0 }, v2 -; CHECK-GI-NEXT: ret +; CHECK-LABEL: combine_v8i16_8first: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-NEXT: zip1.16b v0, v0, v1 +; CHECK-NEXT: ret %3 = shufflevector <8 x i8> %1, <8 x i8> %0, <16 x i32> ret <16 x i8> %3 } -; FIXME: This could be zip1 too, 8,0,9,1... pattern is handled define <16 x i8> @combine_v8i16_8firstundef(<8 x i8> %0, <8 x i8> %1) { -; CHECK-SD-LABEL: combine_v8i16_8firstundef: -; CHECK-SD: // %bb.0: -; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1_q2 -; CHECK-SD-NEXT: adrp x8, .LCPI26_0 -; CHECK-SD-NEXT: fmov d2, d0 -; CHECK-SD-NEXT: ldr q3, [x8, :lo12:.LCPI26_0] -; CHECK-SD-NEXT: tbl.16b v0, { v1, v2 }, v3 -; CHECK-SD-NEXT: ret -; -; CHECK-GI-LABEL: combine_v8i16_8firstundef: -; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q31_q0 -; CHECK-GI-NEXT: adrp x8, .LCPI26_0 -; CHECK-GI-NEXT: fmov d31, d1 -; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI26_0] -; CHECK-GI-NEXT: tbl.16b v0, { v31, v0 }, v2 -; CHECK-GI-NEXT: ret +; CHECK-LABEL: combine_v8i16_8firstundef: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-NEXT: zip1.16b v0, v0, v1 +; CHECK-NEXT: ret %3 = shufflevector <8 x i8> %1, <8 x i8> %0, <16 x i32> ret <16 x i8> %3 } diff --git a/llvm/test/CodeGen/AArch64/deactivation-symbols.ll b/llvm/test/CodeGen/AArch64/deactivation-symbols.ll new file mode 100644 index 0000000000000..571b1067134b8 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/deactivation-symbols.ll @@ -0,0 +1,73 @@ +; RUN: llc < %s -O0 -mtriple=aarch64-none-linux-gnu -mattr=+pauth | FileCheck --check-prefixes=CHECK,O0 %s +; RUN: llc < %s -O2 -mtriple=aarch64-none-linux-gnu -mattr=+pauth | FileCheck --check-prefixes=CHECK,O2 %s + +@ds = external global i8 + +declare void @f(ptr %p) + +; CHECK: call: +define void @call(ptr %p) { + ; CHECK: [[LABEL:.L.*]]: + ; CHECK-NEXT: .reloc [[LABEL]], R_AARCH64_PATCHINST, ds + ; CHECK-NEXT: bl f + notail call void @f(ptr %p) [ "deactivation-symbol"(ptr @ds) ] + ret void +} + +; CHECK: pauth_sign_zero: +define i64 @pauth_sign_zero(i64 %p) { + ; O0: mov x8, xzr + ; CHECK: [[LABEL:.L.*]]: + ; CHECK-NEXT: .reloc [[LABEL]], R_AARCH64_PATCHINST, ds + ; O0-NEXT: pacia x0, x8 + ; O2-NEXT: paciza x0 + %signed = call i64 @llvm.ptrauth.sign(i64 %p, i32 0, i64 0) [ "deactivation-symbol"(ptr @ds) ] + ret i64 %signed +} + +; CHECK: pauth_sign_const: +define i64 @pauth_sign_const(i64 %p) { + ; CHECK: mov x16, #12345 + ; CHECK-NEXT: [[LABEL:.L.*]]: + ; CHECK-NEXT: .reloc [[LABEL]], R_AARCH64_PATCHINST, ds + ; CHECK-NEXT: pacia x0, x16 + %signed = call i64 @llvm.ptrauth.sign(i64 %p, i32 0, i64 12345) [ "deactivation-symbol"(ptr @ds) ] + ret i64 %signed +} + +; CHECK: pauth_sign: +define i64 @pauth_sign(i64 %p, i64 %d) { + ; CHECK: [[LABEL:.L.*]]: + ; CHECK-NEXT: .reloc [[LABEL]], R_AARCH64_PATCHINST, ds + ; CHECK-NEXT: pacia x0, x1 + %signed = call i64 @llvm.ptrauth.sign(i64 %p, i32 0, i64 %d) [ "deactivation-symbol"(ptr @ds) ] + ret i64 %signed +} + +; CHECK: pauth_auth_zero: +define i64 @pauth_auth_zero(i64 %p) { + ; CHECK: [[LABEL:.L.*]]: + ; CHECK-NEXT: .reloc [[LABEL]], R_AARCH64_PATCHINST, ds + ; CHECK-NEXT: autiza x0 + %authed = call i64 @llvm.ptrauth.auth(i64 %p, i32 0, i64 0) [ "deactivation-symbol"(ptr @ds) ] + ret i64 %authed +} + +; CHECK: pauth_auth_const: +define i64 @pauth_auth_const(i64 %p) { + ; CHECK: mov x8, #12345 + ; CHECK-NEXT: [[LABEL:.L.*]]: + ; CHECK-NEXT: .reloc [[LABEL]], R_AARCH64_PATCHINST, ds + ; CHECK-NEXT: autia x0, x8 + %authed = call i64 @llvm.ptrauth.auth(i64 %p, i32 0, i64 12345) [ "deactivation-symbol"(ptr @ds) ] + ret i64 %authed +} + +; CHECK: pauth_auth: +define i64 @pauth_auth(i64 %p, i64 %d) { + ; CHECK: [[LABEL:.L.*]]: + ; CHECK-NEXT: .reloc [[LABEL]], R_AARCH64_PATCHINST, ds + ; CHECK-NEXT: autia x0, x1 + %authed = call i64 @llvm.ptrauth.auth(i64 %p, i32 0, i64 %d) [ "deactivation-symbol"(ptr @ds) ] + ret i64 %authed +} diff --git a/llvm/test/CodeGen/AArch64/fixed-length-bf16-arith.ll b/llvm/test/CodeGen/AArch64/fixed-length-bf16-arith.ll new file mode 100644 index 0000000000000..e6344b9eb89dc --- /dev/null +++ b/llvm/test/CodeGen/AArch64/fixed-length-bf16-arith.ll @@ -0,0 +1,936 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mattr=+sve,+bf16 < %s | FileCheck %s --check-prefixes=CHECK,NOB16B16 +; RUN: llc -mattr=+sve,+bf16,+sve-b16b16 < %s | FileCheck %s --check-prefixes=CHECK,B16B16 + +target triple = "aarch64-unknown-linux-gnu" + +; +; FABS +; + +define <4 x bfloat> @fabs_v4bf16(<4 x bfloat> %a) { +; CHECK-LABEL: fabs_v4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: bic v0.4h, #128, lsl #8 +; CHECK-NEXT: ret + %res = call <4 x bfloat> @llvm.fabs.v4bf16(<4 x bfloat> %a) + ret <4 x bfloat> %res +} + +define <8 x bfloat> @fabs_v8bf16(<8 x bfloat> %a) { +; CHECK-LABEL: fabs_v8bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: bic v0.8h, #128, lsl #8 +; CHECK-NEXT: ret + %res = call <8 x bfloat> @llvm.fabs.v8bf16(<8 x bfloat> %a) + ret <8 x bfloat> %res +} + +; +; FADD +; + +define <4 x bfloat> @fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) { +; NOB16B16-LABEL: fadd_v4bf16: +; NOB16B16: // %bb.0: +; NOB16B16-NEXT: shll v1.4s, v1.4h, #16 +; NOB16B16-NEXT: shll v0.4s, v0.4h, #16 +; NOB16B16-NEXT: fadd v0.4s, v0.4s, v1.4s +; NOB16B16-NEXT: bfcvtn v0.4h, v0.4s +; NOB16B16-NEXT: ret +; +; B16B16-LABEL: fadd_v4bf16: +; B16B16: // %bb.0: +; B16B16-NEXT: ptrue p0.h, vl4 +; B16B16-NEXT: // kill: def $d0 killed $d0 def $z0 +; B16B16-NEXT: // kill: def $d1 killed $d1 def $z1 +; B16B16-NEXT: bfadd z0.h, p0/m, z0.h, z1.h +; B16B16-NEXT: // kill: def $d0 killed $d0 killed $z0 +; B16B16-NEXT: ret + %res = fadd <4 x bfloat> %a, %b + ret <4 x bfloat> %res +} + +define <8 x bfloat> @fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) { +; NOB16B16-LABEL: fadd_v8bf16: +; NOB16B16: // %bb.0: +; NOB16B16-NEXT: shll v2.4s, v1.4h, #16 +; NOB16B16-NEXT: shll v3.4s, v0.4h, #16 +; NOB16B16-NEXT: shll2 v1.4s, v1.8h, #16 +; NOB16B16-NEXT: shll2 v0.4s, v0.8h, #16 +; NOB16B16-NEXT: fadd v2.4s, v3.4s, v2.4s +; NOB16B16-NEXT: fadd v1.4s, v0.4s, v1.4s +; NOB16B16-NEXT: bfcvtn v0.4h, v2.4s +; NOB16B16-NEXT: bfcvtn2 v0.8h, v1.4s +; NOB16B16-NEXT: ret +; +; B16B16-LABEL: fadd_v8bf16: +; B16B16: // %bb.0: +; B16B16-NEXT: ptrue p0.h, vl8 +; B16B16-NEXT: // kill: def $q0 killed $q0 def $z0 +; B16B16-NEXT: // kill: def $q1 killed $q1 def $z1 +; B16B16-NEXT: bfadd z0.h, p0/m, z0.h, z1.h +; B16B16-NEXT: // kill: def $q0 killed $q0 killed $z0 +; B16B16-NEXT: ret + %res = fadd <8 x bfloat> %a, %b + ret <8 x bfloat> %res +} + +; +; FDIV +; + +define <4 x bfloat> @fdiv_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) { +; CHECK-LABEL: fdiv_v4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: shll v1.4s, v1.4h, #16 +; CHECK-NEXT: shll v0.4s, v0.4h, #16 +; CHECK-NEXT: fdiv v0.4s, v0.4s, v1.4s +; CHECK-NEXT: bfcvtn v0.4h, v0.4s +; CHECK-NEXT: ret + %res = fdiv <4 x bfloat> %a, %b + ret <4 x bfloat> %res +} + +define <8 x bfloat> @fdiv_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) { +; CHECK-LABEL: fdiv_v8bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: shll v2.4s, v1.4h, #16 +; CHECK-NEXT: shll v3.4s, v0.4h, #16 +; CHECK-NEXT: shll2 v1.4s, v1.8h, #16 +; CHECK-NEXT: shll2 v0.4s, v0.8h, #16 +; CHECK-NEXT: fdiv v2.4s, v3.4s, v2.4s +; CHECK-NEXT: fdiv v1.4s, v0.4s, v1.4s +; CHECK-NEXT: bfcvtn v0.4h, v2.4s +; CHECK-NEXT: bfcvtn2 v0.8h, v1.4s +; CHECK-NEXT: ret + %res = fdiv <8 x bfloat> %a, %b + ret <8 x bfloat> %res +} + +; +; FMAX +; + +define <4 x bfloat> @fmax_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) { +; NOB16B16-LABEL: fmax_v4bf16: +; NOB16B16: // %bb.0: +; NOB16B16-NEXT: // kill: def $d1 killed $d1 def $q1 +; NOB16B16-NEXT: // kill: def $d0 killed $d0 def $q0 +; NOB16B16-NEXT: mov h2, v1.h[1] +; NOB16B16-NEXT: mov h3, v0.h[1] +; NOB16B16-NEXT: mov h4, v1.h[2] +; NOB16B16-NEXT: shll v5.4s, v1.4h, #16 +; NOB16B16-NEXT: shll v6.4s, v0.4h, #16 +; NOB16B16-NEXT: mov h7, v0.h[2] +; NOB16B16-NEXT: mov h1, v1.h[3] +; NOB16B16-NEXT: shll v2.4s, v2.4h, #16 +; NOB16B16-NEXT: shll v3.4s, v3.4h, #16 +; NOB16B16-NEXT: shll v4.4s, v4.4h, #16 +; NOB16B16-NEXT: shll v1.4s, v1.4h, #16 +; NOB16B16-NEXT: fmax s2, s3, s2 +; NOB16B16-NEXT: fmax s3, s6, s5 +; NOB16B16-NEXT: shll v5.4s, v7.4h, #16 +; NOB16B16-NEXT: mov h6, v0.h[3] +; NOB16B16-NEXT: fmax s4, s5, s4 +; NOB16B16-NEXT: bfcvt h2, s2 +; NOB16B16-NEXT: bfcvt h0, s3 +; NOB16B16-NEXT: shll v3.4s, v6.4h, #16 +; NOB16B16-NEXT: mov v0.h[1], v2.h[0] +; NOB16B16-NEXT: bfcvt h2, s4 +; NOB16B16-NEXT: fmax s1, s3, s1 +; NOB16B16-NEXT: mov v0.h[2], v2.h[0] +; NOB16B16-NEXT: bfcvt h1, s1 +; NOB16B16-NEXT: mov v0.h[3], v1.h[0] +; NOB16B16-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NOB16B16-NEXT: ret +; +; B16B16-LABEL: fmax_v4bf16: +; B16B16: // %bb.0: +; B16B16-NEXT: ptrue p0.h, vl4 +; B16B16-NEXT: // kill: def $d0 killed $d0 def $z0 +; B16B16-NEXT: // kill: def $d1 killed $d1 def $z1 +; B16B16-NEXT: bfmax z0.h, p0/m, z0.h, z1.h +; B16B16-NEXT: // kill: def $d0 killed $d0 killed $z0 +; B16B16-NEXT: ret + %res = call <4 x bfloat> @llvm.maximum.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) + ret <4 x bfloat> %res +} + +define <8 x bfloat> @fmax_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) { +; NOB16B16-LABEL: fmax_v8bf16: +; NOB16B16: // %bb.0: +; NOB16B16-NEXT: mov h2, v1.h[1] +; NOB16B16-NEXT: mov h3, v0.h[1] +; NOB16B16-NEXT: shll v4.4s, v1.4h, #16 +; NOB16B16-NEXT: shll v5.4s, v0.4h, #16 +; NOB16B16-NEXT: mov h6, v1.h[2] +; NOB16B16-NEXT: mov h7, v0.h[2] +; NOB16B16-NEXT: mov h16, v1.h[3] +; NOB16B16-NEXT: shll v2.4s, v2.4h, #16 +; NOB16B16-NEXT: shll v3.4s, v3.4h, #16 +; NOB16B16-NEXT: fmax s4, s5, s4 +; NOB16B16-NEXT: mov h5, v0.h[3] +; NOB16B16-NEXT: shll v6.4s, v6.4h, #16 +; NOB16B16-NEXT: shll v7.4s, v7.4h, #16 +; NOB16B16-NEXT: fmax s3, s3, s2 +; NOB16B16-NEXT: bfcvt h2, s4 +; NOB16B16-NEXT: fmax s4, s7, s6 +; NOB16B16-NEXT: shll v6.4s, v16.4h, #16 +; NOB16B16-NEXT: shll v5.4s, v5.4h, #16 +; NOB16B16-NEXT: mov h7, v1.h[4] +; NOB16B16-NEXT: mov h16, v0.h[4] +; NOB16B16-NEXT: bfcvt h3, s3 +; NOB16B16-NEXT: fmax s5, s5, s6 +; NOB16B16-NEXT: bfcvt h4, s4 +; NOB16B16-NEXT: mov h6, v0.h[5] +; NOB16B16-NEXT: shll v7.4s, v7.4h, #16 +; NOB16B16-NEXT: shll v16.4s, v16.4h, #16 +; NOB16B16-NEXT: mov v2.h[1], v3.h[0] +; NOB16B16-NEXT: mov h3, v1.h[5] +; NOB16B16-NEXT: bfcvt h5, s5 +; NOB16B16-NEXT: fmax s7, s16, s7 +; NOB16B16-NEXT: mov h16, v0.h[6] +; NOB16B16-NEXT: shll v6.4s, v6.4h, #16 +; NOB16B16-NEXT: mov h0, v0.h[7] +; NOB16B16-NEXT: mov v2.h[2], v4.h[0] +; NOB16B16-NEXT: mov h4, v1.h[6] +; NOB16B16-NEXT: shll v3.4s, v3.4h, #16 +; NOB16B16-NEXT: mov h1, v1.h[7] +; NOB16B16-NEXT: shll v0.4s, v0.4h, #16 +; NOB16B16-NEXT: fmax s3, s6, s3 +; NOB16B16-NEXT: shll v6.4s, v16.4h, #16 +; NOB16B16-NEXT: mov v2.h[3], v5.h[0] +; NOB16B16-NEXT: bfcvt h5, s7 +; NOB16B16-NEXT: shll v4.4s, v4.4h, #16 +; NOB16B16-NEXT: shll v1.4s, v1.4h, #16 +; NOB16B16-NEXT: fmax s4, s6, s4 +; NOB16B16-NEXT: bfcvt h3, s3 +; NOB16B16-NEXT: mov v2.h[4], v5.h[0] +; NOB16B16-NEXT: fmax s0, s0, s1 +; NOB16B16-NEXT: mov v2.h[5], v3.h[0] +; NOB16B16-NEXT: bfcvt h3, s4 +; NOB16B16-NEXT: bfcvt h0, s0 +; NOB16B16-NEXT: mov v2.h[6], v3.h[0] +; NOB16B16-NEXT: mov v2.h[7], v0.h[0] +; NOB16B16-NEXT: mov v0.16b, v2.16b +; NOB16B16-NEXT: ret +; +; B16B16-LABEL: fmax_v8bf16: +; B16B16: // %bb.0: +; B16B16-NEXT: ptrue p0.h, vl8 +; B16B16-NEXT: // kill: def $q0 killed $q0 def $z0 +; B16B16-NEXT: // kill: def $q1 killed $q1 def $z1 +; B16B16-NEXT: bfmax z0.h, p0/m, z0.h, z1.h +; B16B16-NEXT: // kill: def $q0 killed $q0 killed $z0 +; B16B16-NEXT: ret + %res = call <8 x bfloat> @llvm.maximum.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) + ret <8 x bfloat> %res +} + +; +; FMAXNM +; + +define <4 x bfloat> @fmaxnm_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) { +; NOB16B16-LABEL: fmaxnm_v4bf16: +; NOB16B16: // %bb.0: +; NOB16B16-NEXT: // kill: def $d1 killed $d1 def $q1 +; NOB16B16-NEXT: // kill: def $d0 killed $d0 def $q0 +; NOB16B16-NEXT: mov h2, v1.h[1] +; NOB16B16-NEXT: mov h3, v0.h[1] +; NOB16B16-NEXT: mov h4, v1.h[2] +; NOB16B16-NEXT: shll v5.4s, v1.4h, #16 +; NOB16B16-NEXT: shll v6.4s, v0.4h, #16 +; NOB16B16-NEXT: mov h7, v0.h[2] +; NOB16B16-NEXT: mov h1, v1.h[3] +; NOB16B16-NEXT: shll v2.4s, v2.4h, #16 +; NOB16B16-NEXT: shll v3.4s, v3.4h, #16 +; NOB16B16-NEXT: shll v4.4s, v4.4h, #16 +; NOB16B16-NEXT: shll v1.4s, v1.4h, #16 +; NOB16B16-NEXT: fmaxnm s2, s3, s2 +; NOB16B16-NEXT: fmaxnm s3, s6, s5 +; NOB16B16-NEXT: shll v5.4s, v7.4h, #16 +; NOB16B16-NEXT: mov h6, v0.h[3] +; NOB16B16-NEXT: fmaxnm s4, s5, s4 +; NOB16B16-NEXT: bfcvt h2, s2 +; NOB16B16-NEXT: bfcvt h0, s3 +; NOB16B16-NEXT: shll v3.4s, v6.4h, #16 +; NOB16B16-NEXT: mov v0.h[1], v2.h[0] +; NOB16B16-NEXT: bfcvt h2, s4 +; NOB16B16-NEXT: fmaxnm s1, s3, s1 +; NOB16B16-NEXT: mov v0.h[2], v2.h[0] +; NOB16B16-NEXT: bfcvt h1, s1 +; NOB16B16-NEXT: mov v0.h[3], v1.h[0] +; NOB16B16-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NOB16B16-NEXT: ret +; +; B16B16-LABEL: fmaxnm_v4bf16: +; B16B16: // %bb.0: +; B16B16-NEXT: ptrue p0.h, vl4 +; B16B16-NEXT: // kill: def $d0 killed $d0 def $z0 +; B16B16-NEXT: // kill: def $d1 killed $d1 def $z1 +; B16B16-NEXT: bfmaxnm z0.h, p0/m, z0.h, z1.h +; B16B16-NEXT: // kill: def $d0 killed $d0 killed $z0 +; B16B16-NEXT: ret + %res = call <4 x bfloat> @llvm.maxnum.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) + ret <4 x bfloat> %res +} + +define <8 x bfloat> @fmaxnm_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) { +; NOB16B16-LABEL: fmaxnm_v8bf16: +; NOB16B16: // %bb.0: +; NOB16B16-NEXT: mov h2, v1.h[1] +; NOB16B16-NEXT: mov h3, v0.h[1] +; NOB16B16-NEXT: shll v4.4s, v1.4h, #16 +; NOB16B16-NEXT: shll v5.4s, v0.4h, #16 +; NOB16B16-NEXT: mov h6, v1.h[2] +; NOB16B16-NEXT: mov h7, v0.h[2] +; NOB16B16-NEXT: mov h16, v1.h[3] +; NOB16B16-NEXT: shll v2.4s, v2.4h, #16 +; NOB16B16-NEXT: shll v3.4s, v3.4h, #16 +; NOB16B16-NEXT: fmaxnm s4, s5, s4 +; NOB16B16-NEXT: mov h5, v0.h[3] +; NOB16B16-NEXT: shll v6.4s, v6.4h, #16 +; NOB16B16-NEXT: shll v7.4s, v7.4h, #16 +; NOB16B16-NEXT: fmaxnm s3, s3, s2 +; NOB16B16-NEXT: bfcvt h2, s4 +; NOB16B16-NEXT: fmaxnm s4, s7, s6 +; NOB16B16-NEXT: shll v6.4s, v16.4h, #16 +; NOB16B16-NEXT: shll v5.4s, v5.4h, #16 +; NOB16B16-NEXT: mov h7, v1.h[4] +; NOB16B16-NEXT: mov h16, v0.h[4] +; NOB16B16-NEXT: bfcvt h3, s3 +; NOB16B16-NEXT: fmaxnm s5, s5, s6 +; NOB16B16-NEXT: bfcvt h4, s4 +; NOB16B16-NEXT: mov h6, v0.h[5] +; NOB16B16-NEXT: shll v7.4s, v7.4h, #16 +; NOB16B16-NEXT: shll v16.4s, v16.4h, #16 +; NOB16B16-NEXT: mov v2.h[1], v3.h[0] +; NOB16B16-NEXT: mov h3, v1.h[5] +; NOB16B16-NEXT: bfcvt h5, s5 +; NOB16B16-NEXT: fmaxnm s7, s16, s7 +; NOB16B16-NEXT: mov h16, v0.h[6] +; NOB16B16-NEXT: shll v6.4s, v6.4h, #16 +; NOB16B16-NEXT: mov h0, v0.h[7] +; NOB16B16-NEXT: mov v2.h[2], v4.h[0] +; NOB16B16-NEXT: mov h4, v1.h[6] +; NOB16B16-NEXT: shll v3.4s, v3.4h, #16 +; NOB16B16-NEXT: mov h1, v1.h[7] +; NOB16B16-NEXT: shll v0.4s, v0.4h, #16 +; NOB16B16-NEXT: fmaxnm s3, s6, s3 +; NOB16B16-NEXT: shll v6.4s, v16.4h, #16 +; NOB16B16-NEXT: mov v2.h[3], v5.h[0] +; NOB16B16-NEXT: bfcvt h5, s7 +; NOB16B16-NEXT: shll v4.4s, v4.4h, #16 +; NOB16B16-NEXT: shll v1.4s, v1.4h, #16 +; NOB16B16-NEXT: fmaxnm s4, s6, s4 +; NOB16B16-NEXT: bfcvt h3, s3 +; NOB16B16-NEXT: mov v2.h[4], v5.h[0] +; NOB16B16-NEXT: fmaxnm s0, s0, s1 +; NOB16B16-NEXT: mov v2.h[5], v3.h[0] +; NOB16B16-NEXT: bfcvt h3, s4 +; NOB16B16-NEXT: bfcvt h0, s0 +; NOB16B16-NEXT: mov v2.h[6], v3.h[0] +; NOB16B16-NEXT: mov v2.h[7], v0.h[0] +; NOB16B16-NEXT: mov v0.16b, v2.16b +; NOB16B16-NEXT: ret +; +; B16B16-LABEL: fmaxnm_v8bf16: +; B16B16: // %bb.0: +; B16B16-NEXT: ptrue p0.h, vl8 +; B16B16-NEXT: // kill: def $q0 killed $q0 def $z0 +; B16B16-NEXT: // kill: def $q1 killed $q1 def $z1 +; B16B16-NEXT: bfmaxnm z0.h, p0/m, z0.h, z1.h +; B16B16-NEXT: // kill: def $q0 killed $q0 killed $z0 +; B16B16-NEXT: ret + %res = call <8 x bfloat> @llvm.maxnum.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) + ret <8 x bfloat> %res +} + +; +; FMIN +; + +define <4 x bfloat> @fmin_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) { +; NOB16B16-LABEL: fmin_v4bf16: +; NOB16B16: // %bb.0: +; NOB16B16-NEXT: // kill: def $d1 killed $d1 def $q1 +; NOB16B16-NEXT: // kill: def $d0 killed $d0 def $q0 +; NOB16B16-NEXT: mov h2, v1.h[1] +; NOB16B16-NEXT: mov h3, v0.h[1] +; NOB16B16-NEXT: mov h4, v1.h[2] +; NOB16B16-NEXT: shll v5.4s, v1.4h, #16 +; NOB16B16-NEXT: shll v6.4s, v0.4h, #16 +; NOB16B16-NEXT: mov h7, v0.h[2] +; NOB16B16-NEXT: mov h1, v1.h[3] +; NOB16B16-NEXT: shll v2.4s, v2.4h, #16 +; NOB16B16-NEXT: shll v3.4s, v3.4h, #16 +; NOB16B16-NEXT: shll v4.4s, v4.4h, #16 +; NOB16B16-NEXT: shll v1.4s, v1.4h, #16 +; NOB16B16-NEXT: fmin s2, s3, s2 +; NOB16B16-NEXT: fmin s3, s6, s5 +; NOB16B16-NEXT: shll v5.4s, v7.4h, #16 +; NOB16B16-NEXT: mov h6, v0.h[3] +; NOB16B16-NEXT: fmin s4, s5, s4 +; NOB16B16-NEXT: bfcvt h2, s2 +; NOB16B16-NEXT: bfcvt h0, s3 +; NOB16B16-NEXT: shll v3.4s, v6.4h, #16 +; NOB16B16-NEXT: mov v0.h[1], v2.h[0] +; NOB16B16-NEXT: bfcvt h2, s4 +; NOB16B16-NEXT: fmin s1, s3, s1 +; NOB16B16-NEXT: mov v0.h[2], v2.h[0] +; NOB16B16-NEXT: bfcvt h1, s1 +; NOB16B16-NEXT: mov v0.h[3], v1.h[0] +; NOB16B16-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NOB16B16-NEXT: ret +; +; B16B16-LABEL: fmin_v4bf16: +; B16B16: // %bb.0: +; B16B16-NEXT: ptrue p0.h, vl4 +; B16B16-NEXT: // kill: def $d0 killed $d0 def $z0 +; B16B16-NEXT: // kill: def $d1 killed $d1 def $z1 +; B16B16-NEXT: bfmin z0.h, p0/m, z0.h, z1.h +; B16B16-NEXT: // kill: def $d0 killed $d0 killed $z0 +; B16B16-NEXT: ret + %res = call <4 x bfloat> @llvm.minimum.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) + ret <4 x bfloat> %res +} + +define <8 x bfloat> @fmin_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) { +; NOB16B16-LABEL: fmin_v8bf16: +; NOB16B16: // %bb.0: +; NOB16B16-NEXT: mov h2, v1.h[1] +; NOB16B16-NEXT: mov h3, v0.h[1] +; NOB16B16-NEXT: shll v4.4s, v1.4h, #16 +; NOB16B16-NEXT: shll v5.4s, v0.4h, #16 +; NOB16B16-NEXT: mov h6, v1.h[2] +; NOB16B16-NEXT: mov h7, v0.h[2] +; NOB16B16-NEXT: mov h16, v1.h[3] +; NOB16B16-NEXT: shll v2.4s, v2.4h, #16 +; NOB16B16-NEXT: shll v3.4s, v3.4h, #16 +; NOB16B16-NEXT: fmin s4, s5, s4 +; NOB16B16-NEXT: mov h5, v0.h[3] +; NOB16B16-NEXT: shll v6.4s, v6.4h, #16 +; NOB16B16-NEXT: shll v7.4s, v7.4h, #16 +; NOB16B16-NEXT: fmin s3, s3, s2 +; NOB16B16-NEXT: bfcvt h2, s4 +; NOB16B16-NEXT: fmin s4, s7, s6 +; NOB16B16-NEXT: shll v6.4s, v16.4h, #16 +; NOB16B16-NEXT: shll v5.4s, v5.4h, #16 +; NOB16B16-NEXT: mov h7, v1.h[4] +; NOB16B16-NEXT: mov h16, v0.h[4] +; NOB16B16-NEXT: bfcvt h3, s3 +; NOB16B16-NEXT: fmin s5, s5, s6 +; NOB16B16-NEXT: bfcvt h4, s4 +; NOB16B16-NEXT: mov h6, v0.h[5] +; NOB16B16-NEXT: shll v7.4s, v7.4h, #16 +; NOB16B16-NEXT: shll v16.4s, v16.4h, #16 +; NOB16B16-NEXT: mov v2.h[1], v3.h[0] +; NOB16B16-NEXT: mov h3, v1.h[5] +; NOB16B16-NEXT: bfcvt h5, s5 +; NOB16B16-NEXT: fmin s7, s16, s7 +; NOB16B16-NEXT: mov h16, v0.h[6] +; NOB16B16-NEXT: shll v6.4s, v6.4h, #16 +; NOB16B16-NEXT: mov h0, v0.h[7] +; NOB16B16-NEXT: mov v2.h[2], v4.h[0] +; NOB16B16-NEXT: mov h4, v1.h[6] +; NOB16B16-NEXT: shll v3.4s, v3.4h, #16 +; NOB16B16-NEXT: mov h1, v1.h[7] +; NOB16B16-NEXT: shll v0.4s, v0.4h, #16 +; NOB16B16-NEXT: fmin s3, s6, s3 +; NOB16B16-NEXT: shll v6.4s, v16.4h, #16 +; NOB16B16-NEXT: mov v2.h[3], v5.h[0] +; NOB16B16-NEXT: bfcvt h5, s7 +; NOB16B16-NEXT: shll v4.4s, v4.4h, #16 +; NOB16B16-NEXT: shll v1.4s, v1.4h, #16 +; NOB16B16-NEXT: fmin s4, s6, s4 +; NOB16B16-NEXT: bfcvt h3, s3 +; NOB16B16-NEXT: mov v2.h[4], v5.h[0] +; NOB16B16-NEXT: fmin s0, s0, s1 +; NOB16B16-NEXT: mov v2.h[5], v3.h[0] +; NOB16B16-NEXT: bfcvt h3, s4 +; NOB16B16-NEXT: bfcvt h0, s0 +; NOB16B16-NEXT: mov v2.h[6], v3.h[0] +; NOB16B16-NEXT: mov v2.h[7], v0.h[0] +; NOB16B16-NEXT: mov v0.16b, v2.16b +; NOB16B16-NEXT: ret +; +; B16B16-LABEL: fmin_v8bf16: +; B16B16: // %bb.0: +; B16B16-NEXT: ptrue p0.h, vl8 +; B16B16-NEXT: // kill: def $q0 killed $q0 def $z0 +; B16B16-NEXT: // kill: def $q1 killed $q1 def $z1 +; B16B16-NEXT: bfmin z0.h, p0/m, z0.h, z1.h +; B16B16-NEXT: // kill: def $q0 killed $q0 killed $z0 +; B16B16-NEXT: ret + %res = call <8 x bfloat> @llvm.minimum.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) + ret <8 x bfloat> %res +} + +; +; FMINNM +; + +define <4 x bfloat> @fminnm_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) { +; NOB16B16-LABEL: fminnm_v4bf16: +; NOB16B16: // %bb.0: +; NOB16B16-NEXT: // kill: def $d1 killed $d1 def $q1 +; NOB16B16-NEXT: // kill: def $d0 killed $d0 def $q0 +; NOB16B16-NEXT: mov h2, v1.h[1] +; NOB16B16-NEXT: mov h3, v0.h[1] +; NOB16B16-NEXT: mov h4, v1.h[2] +; NOB16B16-NEXT: shll v5.4s, v1.4h, #16 +; NOB16B16-NEXT: shll v6.4s, v0.4h, #16 +; NOB16B16-NEXT: mov h7, v0.h[2] +; NOB16B16-NEXT: mov h1, v1.h[3] +; NOB16B16-NEXT: shll v2.4s, v2.4h, #16 +; NOB16B16-NEXT: shll v3.4s, v3.4h, #16 +; NOB16B16-NEXT: shll v4.4s, v4.4h, #16 +; NOB16B16-NEXT: shll v1.4s, v1.4h, #16 +; NOB16B16-NEXT: fminnm s2, s3, s2 +; NOB16B16-NEXT: fminnm s3, s6, s5 +; NOB16B16-NEXT: shll v5.4s, v7.4h, #16 +; NOB16B16-NEXT: mov h6, v0.h[3] +; NOB16B16-NEXT: fminnm s4, s5, s4 +; NOB16B16-NEXT: bfcvt h2, s2 +; NOB16B16-NEXT: bfcvt h0, s3 +; NOB16B16-NEXT: shll v3.4s, v6.4h, #16 +; NOB16B16-NEXT: mov v0.h[1], v2.h[0] +; NOB16B16-NEXT: bfcvt h2, s4 +; NOB16B16-NEXT: fminnm s1, s3, s1 +; NOB16B16-NEXT: mov v0.h[2], v2.h[0] +; NOB16B16-NEXT: bfcvt h1, s1 +; NOB16B16-NEXT: mov v0.h[3], v1.h[0] +; NOB16B16-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NOB16B16-NEXT: ret +; +; B16B16-LABEL: fminnm_v4bf16: +; B16B16: // %bb.0: +; B16B16-NEXT: ptrue p0.h, vl4 +; B16B16-NEXT: // kill: def $d0 killed $d0 def $z0 +; B16B16-NEXT: // kill: def $d1 killed $d1 def $z1 +; B16B16-NEXT: bfminnm z0.h, p0/m, z0.h, z1.h +; B16B16-NEXT: // kill: def $d0 killed $d0 killed $z0 +; B16B16-NEXT: ret + %res = call <4 x bfloat> @llvm.minnum.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) + ret <4 x bfloat> %res +} + +define <8 x bfloat> @fminnm_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) { +; NOB16B16-LABEL: fminnm_v8bf16: +; NOB16B16: // %bb.0: +; NOB16B16-NEXT: mov h2, v1.h[1] +; NOB16B16-NEXT: mov h3, v0.h[1] +; NOB16B16-NEXT: shll v4.4s, v1.4h, #16 +; NOB16B16-NEXT: shll v5.4s, v0.4h, #16 +; NOB16B16-NEXT: mov h6, v1.h[2] +; NOB16B16-NEXT: mov h7, v0.h[2] +; NOB16B16-NEXT: mov h16, v1.h[3] +; NOB16B16-NEXT: shll v2.4s, v2.4h, #16 +; NOB16B16-NEXT: shll v3.4s, v3.4h, #16 +; NOB16B16-NEXT: fminnm s4, s5, s4 +; NOB16B16-NEXT: mov h5, v0.h[3] +; NOB16B16-NEXT: shll v6.4s, v6.4h, #16 +; NOB16B16-NEXT: shll v7.4s, v7.4h, #16 +; NOB16B16-NEXT: fminnm s3, s3, s2 +; NOB16B16-NEXT: bfcvt h2, s4 +; NOB16B16-NEXT: fminnm s4, s7, s6 +; NOB16B16-NEXT: shll v6.4s, v16.4h, #16 +; NOB16B16-NEXT: shll v5.4s, v5.4h, #16 +; NOB16B16-NEXT: mov h7, v1.h[4] +; NOB16B16-NEXT: mov h16, v0.h[4] +; NOB16B16-NEXT: bfcvt h3, s3 +; NOB16B16-NEXT: fminnm s5, s5, s6 +; NOB16B16-NEXT: bfcvt h4, s4 +; NOB16B16-NEXT: mov h6, v0.h[5] +; NOB16B16-NEXT: shll v7.4s, v7.4h, #16 +; NOB16B16-NEXT: shll v16.4s, v16.4h, #16 +; NOB16B16-NEXT: mov v2.h[1], v3.h[0] +; NOB16B16-NEXT: mov h3, v1.h[5] +; NOB16B16-NEXT: bfcvt h5, s5 +; NOB16B16-NEXT: fminnm s7, s16, s7 +; NOB16B16-NEXT: mov h16, v0.h[6] +; NOB16B16-NEXT: shll v6.4s, v6.4h, #16 +; NOB16B16-NEXT: mov h0, v0.h[7] +; NOB16B16-NEXT: mov v2.h[2], v4.h[0] +; NOB16B16-NEXT: mov h4, v1.h[6] +; NOB16B16-NEXT: shll v3.4s, v3.4h, #16 +; NOB16B16-NEXT: mov h1, v1.h[7] +; NOB16B16-NEXT: shll v0.4s, v0.4h, #16 +; NOB16B16-NEXT: fminnm s3, s6, s3 +; NOB16B16-NEXT: shll v6.4s, v16.4h, #16 +; NOB16B16-NEXT: mov v2.h[3], v5.h[0] +; NOB16B16-NEXT: bfcvt h5, s7 +; NOB16B16-NEXT: shll v4.4s, v4.4h, #16 +; NOB16B16-NEXT: shll v1.4s, v1.4h, #16 +; NOB16B16-NEXT: fminnm s4, s6, s4 +; NOB16B16-NEXT: bfcvt h3, s3 +; NOB16B16-NEXT: mov v2.h[4], v5.h[0] +; NOB16B16-NEXT: fminnm s0, s0, s1 +; NOB16B16-NEXT: mov v2.h[5], v3.h[0] +; NOB16B16-NEXT: bfcvt h3, s4 +; NOB16B16-NEXT: bfcvt h0, s0 +; NOB16B16-NEXT: mov v2.h[6], v3.h[0] +; NOB16B16-NEXT: mov v2.h[7], v0.h[0] +; NOB16B16-NEXT: mov v0.16b, v2.16b +; NOB16B16-NEXT: ret +; +; B16B16-LABEL: fminnm_v8bf16: +; B16B16: // %bb.0: +; B16B16-NEXT: ptrue p0.h, vl8 +; B16B16-NEXT: // kill: def $q0 killed $q0 def $z0 +; B16B16-NEXT: // kill: def $q1 killed $q1 def $z1 +; B16B16-NEXT: bfminnm z0.h, p0/m, z0.h, z1.h +; B16B16-NEXT: // kill: def $q0 killed $q0 killed $z0 +; B16B16-NEXT: ret + %res = call <8 x bfloat> @llvm.minnum.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) + ret <8 x bfloat> %res +} + +; +; FMLA +; + +define <4 x bfloat> @fmla_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat> %c) { +; NOB16B16-LABEL: fmla_v4bf16: +; NOB16B16: // %bb.0: +; NOB16B16-NEXT: // kill: def $d2 killed $d2 def $q2 +; NOB16B16-NEXT: // kill: def $d1 killed $d1 def $q1 +; NOB16B16-NEXT: // kill: def $d0 killed $d0 def $q0 +; NOB16B16-NEXT: mov h3, v2.h[1] +; NOB16B16-NEXT: mov h4, v1.h[1] +; NOB16B16-NEXT: mov h5, v0.h[1] +; NOB16B16-NEXT: shll v6.4s, v2.4h, #16 +; NOB16B16-NEXT: shll v7.4s, v1.4h, #16 +; NOB16B16-NEXT: shll v16.4s, v0.4h, #16 +; NOB16B16-NEXT: mov h17, v2.h[2] +; NOB16B16-NEXT: mov h18, v1.h[2] +; NOB16B16-NEXT: mov h19, v0.h[2] +; NOB16B16-NEXT: mov h2, v2.h[3] +; NOB16B16-NEXT: mov h1, v1.h[3] +; NOB16B16-NEXT: fmadd s6, s16, s7, s6 +; NOB16B16-NEXT: shll v3.4s, v3.4h, #16 +; NOB16B16-NEXT: shll v4.4s, v4.4h, #16 +; NOB16B16-NEXT: shll v5.4s, v5.4h, #16 +; NOB16B16-NEXT: mov h16, v0.h[3] +; NOB16B16-NEXT: shll v7.4s, v19.4h, #16 +; NOB16B16-NEXT: shll v2.4s, v2.4h, #16 +; NOB16B16-NEXT: shll v1.4s, v1.4h, #16 +; NOB16B16-NEXT: fmadd s3, s5, s4, s3 +; NOB16B16-NEXT: shll v4.4s, v17.4h, #16 +; NOB16B16-NEXT: shll v5.4s, v18.4h, #16 +; NOB16B16-NEXT: bfcvt h0, s6 +; NOB16B16-NEXT: fmadd s4, s7, s5, s4 +; NOB16B16-NEXT: shll v5.4s, v16.4h, #16 +; NOB16B16-NEXT: bfcvt h3, s3 +; NOB16B16-NEXT: fmadd s1, s5, s1, s2 +; NOB16B16-NEXT: mov v0.h[1], v3.h[0] +; NOB16B16-NEXT: bfcvt h3, s4 +; NOB16B16-NEXT: bfcvt h1, s1 +; NOB16B16-NEXT: mov v0.h[2], v3.h[0] +; NOB16B16-NEXT: mov v0.h[3], v1.h[0] +; NOB16B16-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NOB16B16-NEXT: ret +; +; B16B16-LABEL: fmla_v4bf16: +; B16B16: // %bb.0: +; B16B16-NEXT: ptrue p0.h, vl4 +; B16B16-NEXT: // kill: def $d0 killed $d0 def $z0 +; B16B16-NEXT: // kill: def $d2 killed $d2 def $z2 +; B16B16-NEXT: // kill: def $d1 killed $d1 def $z1 +; B16B16-NEXT: bfmla z0.h, p0/m, z1.h, z2.h +; B16B16-NEXT: // kill: def $d0 killed $d0 killed $z0 +; B16B16-NEXT: ret + %res = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat> %c) + ret <4 x bfloat> %res +} + +define <8 x bfloat> @fmla_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c) { +; NOB16B16-LABEL: fmla_v8bf16: +; NOB16B16: // %bb.0: +; NOB16B16-NEXT: mov h3, v2.h[1] +; NOB16B16-NEXT: mov h4, v1.h[1] +; NOB16B16-NEXT: mov h5, v0.h[1] +; NOB16B16-NEXT: shll v6.4s, v2.4h, #16 +; NOB16B16-NEXT: shll v7.4s, v1.4h, #16 +; NOB16B16-NEXT: shll v16.4s, v0.4h, #16 +; NOB16B16-NEXT: mov h17, v2.h[2] +; NOB16B16-NEXT: mov h18, v1.h[2] +; NOB16B16-NEXT: mov h19, v0.h[2] +; NOB16B16-NEXT: mov h20, v2.h[3] +; NOB16B16-NEXT: mov h21, v1.h[3] +; NOB16B16-NEXT: fmadd s6, s16, s7, s6 +; NOB16B16-NEXT: shll v3.4s, v3.4h, #16 +; NOB16B16-NEXT: shll v4.4s, v4.4h, #16 +; NOB16B16-NEXT: shll v5.4s, v5.4h, #16 +; NOB16B16-NEXT: mov h7, v0.h[3] +; NOB16B16-NEXT: shll v16.4s, v19.4h, #16 +; NOB16B16-NEXT: mov h19, v0.h[4] +; NOB16B16-NEXT: fmadd s4, s5, s4, s3 +; NOB16B16-NEXT: shll v3.4s, v17.4h, #16 +; NOB16B16-NEXT: shll v5.4s, v18.4h, #16 +; NOB16B16-NEXT: mov h17, v2.h[4] +; NOB16B16-NEXT: mov h18, v1.h[4] +; NOB16B16-NEXT: shll v7.4s, v7.4h, #16 +; NOB16B16-NEXT: shll v19.4s, v19.4h, #16 +; NOB16B16-NEXT: fmadd s5, s16, s5, s3 +; NOB16B16-NEXT: bfcvt h3, s6 +; NOB16B16-NEXT: shll v6.4s, v20.4h, #16 +; NOB16B16-NEXT: bfcvt h4, s4 +; NOB16B16-NEXT: shll v16.4s, v21.4h, #16 +; NOB16B16-NEXT: shll v17.4s, v17.4h, #16 +; NOB16B16-NEXT: shll v18.4s, v18.4h, #16 +; NOB16B16-NEXT: fmadd s6, s7, s16, s6 +; NOB16B16-NEXT: bfcvt h5, s5 +; NOB16B16-NEXT: mov h7, v1.h[5] +; NOB16B16-NEXT: mov v3.h[1], v4.h[0] +; NOB16B16-NEXT: mov h4, v2.h[5] +; NOB16B16-NEXT: mov h16, v0.h[5] +; NOB16B16-NEXT: fmadd s17, s19, s18, s17 +; NOB16B16-NEXT: mov h18, v2.h[6] +; NOB16B16-NEXT: mov h19, v1.h[6] +; NOB16B16-NEXT: mov h2, v2.h[7] +; NOB16B16-NEXT: mov h1, v1.h[7] +; NOB16B16-NEXT: bfcvt h6, s6 +; NOB16B16-NEXT: shll v7.4s, v7.4h, #16 +; NOB16B16-NEXT: mov v3.h[2], v5.h[0] +; NOB16B16-NEXT: mov h5, v0.h[6] +; NOB16B16-NEXT: shll v4.4s, v4.4h, #16 +; NOB16B16-NEXT: shll v16.4s, v16.4h, #16 +; NOB16B16-NEXT: mov h0, v0.h[7] +; NOB16B16-NEXT: shll v2.4s, v2.4h, #16 +; NOB16B16-NEXT: shll v1.4s, v1.4h, #16 +; NOB16B16-NEXT: fmadd s4, s16, s7, s4 +; NOB16B16-NEXT: mov v3.h[3], v6.h[0] +; NOB16B16-NEXT: bfcvt h6, s17 +; NOB16B16-NEXT: shll v7.4s, v18.4h, #16 +; NOB16B16-NEXT: shll v16.4s, v19.4h, #16 +; NOB16B16-NEXT: shll v5.4s, v5.4h, #16 +; NOB16B16-NEXT: shll v0.4s, v0.4h, #16 +; NOB16B16-NEXT: fmadd s5, s5, s16, s7 +; NOB16B16-NEXT: mov v3.h[4], v6.h[0] +; NOB16B16-NEXT: bfcvt h4, s4 +; NOB16B16-NEXT: fmadd s0, s0, s1, s2 +; NOB16B16-NEXT: mov v3.h[5], v4.h[0] +; NOB16B16-NEXT: bfcvt h4, s5 +; NOB16B16-NEXT: bfcvt h0, s0 +; NOB16B16-NEXT: mov v3.h[6], v4.h[0] +; NOB16B16-NEXT: mov v3.h[7], v0.h[0] +; NOB16B16-NEXT: mov v0.16b, v3.16b +; NOB16B16-NEXT: ret +; +; B16B16-LABEL: fmla_v8bf16: +; B16B16: // %bb.0: +; B16B16-NEXT: ptrue p0.h, vl8 +; B16B16-NEXT: // kill: def $q0 killed $q0 def $z0 +; B16B16-NEXT: // kill: def $q2 killed $q2 def $z2 +; B16B16-NEXT: // kill: def $q1 killed $q1 def $z1 +; B16B16-NEXT: bfmla z0.h, p0/m, z1.h, z2.h +; B16B16-NEXT: // kill: def $q0 killed $q0 killed $z0 +; B16B16-NEXT: ret + %res = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c) + ret <8 x bfloat> %res +} + +; +; FMUL +; + +define <4 x bfloat> @fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) { +; NOB16B16-LABEL: fmul_v4bf16: +; NOB16B16: // %bb.0: +; NOB16B16-NEXT: shll v1.4s, v1.4h, #16 +; NOB16B16-NEXT: shll v0.4s, v0.4h, #16 +; NOB16B16-NEXT: fmul v0.4s, v0.4s, v1.4s +; NOB16B16-NEXT: bfcvtn v0.4h, v0.4s +; NOB16B16-NEXT: ret +; +; B16B16-LABEL: fmul_v4bf16: +; B16B16: // %bb.0: +; B16B16-NEXT: ptrue p0.h, vl4 +; B16B16-NEXT: // kill: def $d0 killed $d0 def $z0 +; B16B16-NEXT: // kill: def $d1 killed $d1 def $z1 +; B16B16-NEXT: bfmul z0.h, p0/m, z0.h, z1.h +; B16B16-NEXT: // kill: def $d0 killed $d0 killed $z0 +; B16B16-NEXT: ret + %res = fmul <4 x bfloat> %a, %b + ret <4 x bfloat> %res +} + +define <8 x bfloat> @fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) { +; NOB16B16-LABEL: fmul_v8bf16: +; NOB16B16: // %bb.0: +; NOB16B16-NEXT: shll v2.4s, v1.4h, #16 +; NOB16B16-NEXT: shll v3.4s, v0.4h, #16 +; NOB16B16-NEXT: shll2 v1.4s, v1.8h, #16 +; NOB16B16-NEXT: shll2 v0.4s, v0.8h, #16 +; NOB16B16-NEXT: fmul v2.4s, v3.4s, v2.4s +; NOB16B16-NEXT: fmul v1.4s, v0.4s, v1.4s +; NOB16B16-NEXT: bfcvtn v0.4h, v2.4s +; NOB16B16-NEXT: bfcvtn2 v0.8h, v1.4s +; NOB16B16-NEXT: ret +; +; B16B16-LABEL: fmul_v8bf16: +; B16B16: // %bb.0: +; B16B16-NEXT: ptrue p0.h, vl8 +; B16B16-NEXT: // kill: def $q0 killed $q0 def $z0 +; B16B16-NEXT: // kill: def $q1 killed $q1 def $z1 +; B16B16-NEXT: bfmul z0.h, p0/m, z0.h, z1.h +; B16B16-NEXT: // kill: def $q0 killed $q0 killed $z0 +; B16B16-NEXT: ret + %res = fmul <8 x bfloat> %a, %b + ret <8 x bfloat> %res +} + +; +; FNEG +; + +define <4 x bfloat> @fneg_v4bf16(<4 x bfloat> %a) { +; CHECK-LABEL: fneg_v4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.4h, #128, lsl #8 +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: ret + %res = fneg <4 x bfloat> %a + ret <4 x bfloat> %res +} + +define <8 x bfloat> @fneg_v8bf16(<8 x bfloat> %a) { +; CHECK-LABEL: fneg_v8bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.8h, #128, lsl #8 +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %res = fneg <8 x bfloat> %a + ret <8 x bfloat> %res +} + +; +; FSQRT +; + +define <4 x bfloat> @fsqrt_v4bf16(<4 x bfloat> %a) { +; CHECK-LABEL: fsqrt_v4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: mov h1, v0.h[1] +; CHECK-NEXT: shll v2.4s, v0.4h, #16 +; CHECK-NEXT: mov h3, v0.h[2] +; CHECK-NEXT: mov h0, v0.h[3] +; CHECK-NEXT: fsqrt s2, s2 +; CHECK-NEXT: shll v1.4s, v1.4h, #16 +; CHECK-NEXT: shll v3.4s, v3.4h, #16 +; CHECK-NEXT: shll v0.4s, v0.4h, #16 +; CHECK-NEXT: fsqrt s1, s1 +; CHECK-NEXT: bfcvt h1, s1 +; CHECK-NEXT: fsqrt s3, s3 +; CHECK-NEXT: fsqrt s4, s0 +; CHECK-NEXT: bfcvt h0, s2 +; CHECK-NEXT: mov v0.h[1], v1.h[0] +; CHECK-NEXT: bfcvt h1, s3 +; CHECK-NEXT: mov v0.h[2], v1.h[0] +; CHECK-NEXT: bfcvt h1, s4 +; CHECK-NEXT: mov v0.h[3], v1.h[0] +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: ret + %res = call <4 x bfloat> @llvm.sqrt.v4bf16(<4 x bfloat> %a) + ret <4 x bfloat> %res +} + +define <8 x bfloat> @fsqrt_v8bf16(<8 x bfloat> %a) { +; CHECK-LABEL: fsqrt_v8bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov h1, v0.h[1] +; CHECK-NEXT: shll v2.4s, v0.4h, #16 +; CHECK-NEXT: mov h3, v0.h[2] +; CHECK-NEXT: mov h4, v0.h[3] +; CHECK-NEXT: mov h5, v0.h[4] +; CHECK-NEXT: mov h6, v0.h[5] +; CHECK-NEXT: mov h7, v0.h[6] +; CHECK-NEXT: mov h0, v0.h[7] +; CHECK-NEXT: fsqrt s2, s2 +; CHECK-NEXT: shll v1.4s, v1.4h, #16 +; CHECK-NEXT: shll v3.4s, v3.4h, #16 +; CHECK-NEXT: shll v4.4s, v4.4h, #16 +; CHECK-NEXT: shll v5.4s, v5.4h, #16 +; CHECK-NEXT: shll v6.4s, v6.4h, #16 +; CHECK-NEXT: shll v7.4s, v7.4h, #16 +; CHECK-NEXT: shll v16.4s, v0.4h, #16 +; CHECK-NEXT: bfcvt h0, s2 +; CHECK-NEXT: fsqrt s1, s1 +; CHECK-NEXT: bfcvt h1, s1 +; CHECK-NEXT: mov v0.h[1], v1.h[0] +; CHECK-NEXT: fsqrt s3, s3 +; CHECK-NEXT: bfcvt h1, s3 +; CHECK-NEXT: mov v0.h[2], v1.h[0] +; CHECK-NEXT: fsqrt s4, s4 +; CHECK-NEXT: bfcvt h1, s4 +; CHECK-NEXT: mov v0.h[3], v1.h[0] +; CHECK-NEXT: fsqrt s5, s5 +; CHECK-NEXT: bfcvt h1, s5 +; CHECK-NEXT: mov v0.h[4], v1.h[0] +; CHECK-NEXT: fsqrt s6, s6 +; CHECK-NEXT: bfcvt h1, s6 +; CHECK-NEXT: mov v0.h[5], v1.h[0] +; CHECK-NEXT: fsqrt s7, s7 +; CHECK-NEXT: bfcvt h1, s7 +; CHECK-NEXT: mov v0.h[6], v1.h[0] +; CHECK-NEXT: fsqrt s2, s16 +; CHECK-NEXT: bfcvt h1, s2 +; CHECK-NEXT: mov v0.h[7], v1.h[0] +; CHECK-NEXT: ret + %res = call <8 x bfloat> @llvm.sqrt.v8bf16(<8 x bfloat> %a) + ret <8 x bfloat> %res +} + +; +; FSUB +; + +define <4 x bfloat> @fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) { +; NOB16B16-LABEL: fsub_v4bf16: +; NOB16B16: // %bb.0: +; NOB16B16-NEXT: shll v1.4s, v1.4h, #16 +; NOB16B16-NEXT: shll v0.4s, v0.4h, #16 +; NOB16B16-NEXT: fsub v0.4s, v0.4s, v1.4s +; NOB16B16-NEXT: bfcvtn v0.4h, v0.4s +; NOB16B16-NEXT: ret +; +; B16B16-LABEL: fsub_v4bf16: +; B16B16: // %bb.0: +; B16B16-NEXT: ptrue p0.h, vl4 +; B16B16-NEXT: // kill: def $d0 killed $d0 def $z0 +; B16B16-NEXT: // kill: def $d1 killed $d1 def $z1 +; B16B16-NEXT: bfsub z0.h, p0/m, z0.h, z1.h +; B16B16-NEXT: // kill: def $d0 killed $d0 killed $z0 +; B16B16-NEXT: ret + %res = fsub <4 x bfloat> %a, %b + ret <4 x bfloat> %res +} + +define <8 x bfloat> @fsub_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) { +; NOB16B16-LABEL: fsub_v8bf16: +; NOB16B16: // %bb.0: +; NOB16B16-NEXT: shll v2.4s, v1.4h, #16 +; NOB16B16-NEXT: shll v3.4s, v0.4h, #16 +; NOB16B16-NEXT: shll2 v1.4s, v1.8h, #16 +; NOB16B16-NEXT: shll2 v0.4s, v0.8h, #16 +; NOB16B16-NEXT: fsub v2.4s, v3.4s, v2.4s +; NOB16B16-NEXT: fsub v1.4s, v0.4s, v1.4s +; NOB16B16-NEXT: bfcvtn v0.4h, v2.4s +; NOB16B16-NEXT: bfcvtn2 v0.8h, v1.4s +; NOB16B16-NEXT: ret +; +; B16B16-LABEL: fsub_v8bf16: +; B16B16: // %bb.0: +; B16B16-NEXT: ptrue p0.h, vl8 +; B16B16-NEXT: // kill: def $q0 killed $q0 def $z0 +; B16B16-NEXT: // kill: def $q1 killed $q1 def $z1 +; B16B16-NEXT: bfsub z0.h, p0/m, z0.h, z1.h +; B16B16-NEXT: // kill: def $q0 killed $q0 killed $z0 +; B16B16-NEXT: ret + %res = fsub <8 x bfloat> %a, %b + ret <8 x bfloat> %res +} diff --git a/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll b/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll index 4ab5db450a7f3..282e0503dd7be 100644 --- a/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll +++ b/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll @@ -8,9 +8,9 @@ define {<2 x half>, <2 x half>} @vector_deinterleave_v2f16_v4f16(<4 x half> %vec ; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-SD-NEXT: dup v2.2s, v0.s[1] ; CHECK-SD-NEXT: mov v1.16b, v2.16b +; CHECK-SD-NEXT: zip1 v2.4h, v0.4h, v2.4h ; CHECK-SD-NEXT: mov v1.h[0], v0.h[1] -; CHECK-SD-NEXT: mov v0.h[1], v2.h[0] -; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: fmov d0, d2 ; CHECK-SD-NEXT: // kill: def $d1 killed $d1 killed $q1 ; CHECK-SD-NEXT: ret ; diff --git a/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll b/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll index 1e1e25c04b384..760742a4efad7 100644 --- a/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll +++ b/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll @@ -170,47 +170,12 @@ define <4 x half> @s_to_h(<4 x float> %a) { } define <4 x half> @d_to_h(<4 x double> %a) { -; CHECK-CVT-SD-LABEL: d_to_h: -; CHECK-CVT-SD: // %bb.0: -; CHECK-CVT-SD-NEXT: fcvtxn v0.2s, v0.2d -; CHECK-CVT-SD-NEXT: fcvtxn2 v0.4s, v1.2d -; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-SD-NEXT: ret -; -; CHECK-FP16-SD-LABEL: d_to_h: -; CHECK-FP16-SD: // %bb.0: -; CHECK-FP16-SD-NEXT: fcvtxn v0.2s, v0.2d -; CHECK-FP16-SD-NEXT: fcvtxn2 v0.4s, v1.2d -; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s -; CHECK-FP16-SD-NEXT: ret -; -; CHECK-CVT-GI-LABEL: d_to_h: -; CHECK-CVT-GI: // %bb.0: -; CHECK-CVT-GI-NEXT: mov d2, v0.d[1] -; CHECK-CVT-GI-NEXT: fcvt h0, d0 -; CHECK-CVT-GI-NEXT: mov d3, v1.d[1] -; CHECK-CVT-GI-NEXT: fcvt h1, d1 -; CHECK-CVT-GI-NEXT: fcvt h2, d2 -; CHECK-CVT-GI-NEXT: mov v0.h[1], v2.h[0] -; CHECK-CVT-GI-NEXT: fcvt h2, d3 -; CHECK-CVT-GI-NEXT: mov v0.h[2], v1.h[0] -; CHECK-CVT-GI-NEXT: mov v0.h[3], v2.h[0] -; CHECK-CVT-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-CVT-GI-NEXT: ret -; -; CHECK-FP16-GI-LABEL: d_to_h: -; CHECK-FP16-GI: // %bb.0: -; CHECK-FP16-GI-NEXT: mov d2, v0.d[1] -; CHECK-FP16-GI-NEXT: fcvt h0, d0 -; CHECK-FP16-GI-NEXT: mov d3, v1.d[1] -; CHECK-FP16-GI-NEXT: fcvt h1, d1 -; CHECK-FP16-GI-NEXT: fcvt h2, d2 -; CHECK-FP16-GI-NEXT: mov v0.h[1], v2.h[0] -; CHECK-FP16-GI-NEXT: fcvt h2, d3 -; CHECK-FP16-GI-NEXT: mov v0.h[2], v1.h[0] -; CHECK-FP16-GI-NEXT: mov v0.h[3], v2.h[0] -; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-FP16-GI-NEXT: ret +; CHECK-LABEL: d_to_h: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-NEXT: fcvtxn2 v0.4s, v1.2d +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret %1 = fptrunc <4 x double> %a to <4 x half> ret <4 x half> %1 } diff --git a/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll b/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll index 7b152bcccf1e5..f94f8b449c59b 100644 --- a/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll +++ b/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll @@ -176,71 +176,15 @@ define <8 x half> @s_to_h(<8 x float> %a) { } define <8 x half> @d_to_h(<8 x double> %a) { -; CHECK-CVT-SD-LABEL: d_to_h: -; CHECK-CVT-SD: // %bb.0: -; CHECK-CVT-SD-NEXT: fcvtxn v0.2s, v0.2d -; CHECK-CVT-SD-NEXT: fcvtxn v2.2s, v2.2d -; CHECK-CVT-SD-NEXT: fcvtxn2 v0.4s, v1.2d -; CHECK-CVT-SD-NEXT: fcvtxn2 v2.4s, v3.2d -; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v2.4s -; CHECK-CVT-SD-NEXT: ret -; -; CHECK-FP16-SD-LABEL: d_to_h: -; CHECK-FP16-SD: // %bb.0: -; CHECK-FP16-SD-NEXT: fcvtxn v0.2s, v0.2d -; CHECK-FP16-SD-NEXT: fcvtxn v2.2s, v2.2d -; CHECK-FP16-SD-NEXT: fcvtxn2 v0.4s, v1.2d -; CHECK-FP16-SD-NEXT: fcvtxn2 v2.4s, v3.2d -; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s -; CHECK-FP16-SD-NEXT: fcvtn2 v0.8h, v2.4s -; CHECK-FP16-SD-NEXT: ret -; -; CHECK-CVT-GI-LABEL: d_to_h: -; CHECK-CVT-GI: // %bb.0: -; CHECK-CVT-GI-NEXT: mov d4, v0.d[1] -; CHECK-CVT-GI-NEXT: fcvt h0, d0 -; CHECK-CVT-GI-NEXT: mov d5, v1.d[1] -; CHECK-CVT-GI-NEXT: fcvt h1, d1 -; CHECK-CVT-GI-NEXT: fcvt h4, d4 -; CHECK-CVT-GI-NEXT: mov v0.h[1], v4.h[0] -; CHECK-CVT-GI-NEXT: fcvt h4, d5 -; CHECK-CVT-GI-NEXT: mov v0.h[2], v1.h[0] -; CHECK-CVT-GI-NEXT: mov d1, v2.d[1] -; CHECK-CVT-GI-NEXT: fcvt h2, d2 -; CHECK-CVT-GI-NEXT: mov v0.h[3], v4.h[0] -; CHECK-CVT-GI-NEXT: fcvt h1, d1 -; CHECK-CVT-GI-NEXT: mov v0.h[4], v2.h[0] -; CHECK-CVT-GI-NEXT: mov d2, v3.d[1] -; CHECK-CVT-GI-NEXT: fcvt h3, d3 -; CHECK-CVT-GI-NEXT: mov v0.h[5], v1.h[0] -; CHECK-CVT-GI-NEXT: fcvt h1, d2 -; CHECK-CVT-GI-NEXT: mov v0.h[6], v3.h[0] -; CHECK-CVT-GI-NEXT: mov v0.h[7], v1.h[0] -; CHECK-CVT-GI-NEXT: ret -; -; CHECK-FP16-GI-LABEL: d_to_h: -; CHECK-FP16-GI: // %bb.0: -; CHECK-FP16-GI-NEXT: mov d4, v0.d[1] -; CHECK-FP16-GI-NEXT: fcvt h0, d0 -; CHECK-FP16-GI-NEXT: mov d5, v1.d[1] -; CHECK-FP16-GI-NEXT: fcvt h1, d1 -; CHECK-FP16-GI-NEXT: fcvt h4, d4 -; CHECK-FP16-GI-NEXT: mov v0.h[1], v4.h[0] -; CHECK-FP16-GI-NEXT: fcvt h4, d5 -; CHECK-FP16-GI-NEXT: mov v0.h[2], v1.h[0] -; CHECK-FP16-GI-NEXT: mov d1, v2.d[1] -; CHECK-FP16-GI-NEXT: fcvt h2, d2 -; CHECK-FP16-GI-NEXT: mov v0.h[3], v4.h[0] -; CHECK-FP16-GI-NEXT: fcvt h1, d1 -; CHECK-FP16-GI-NEXT: mov v0.h[4], v2.h[0] -; CHECK-FP16-GI-NEXT: mov d2, v3.d[1] -; CHECK-FP16-GI-NEXT: fcvt h3, d3 -; CHECK-FP16-GI-NEXT: mov v0.h[5], v1.h[0] -; CHECK-FP16-GI-NEXT: fcvt h1, d2 -; CHECK-FP16-GI-NEXT: mov v0.h[6], v3.h[0] -; CHECK-FP16-GI-NEXT: mov v0.h[7], v1.h[0] -; CHECK-FP16-GI-NEXT: ret +; CHECK-LABEL: d_to_h: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-NEXT: fcvtxn v2.2s, v2.2d +; CHECK-NEXT: fcvtxn2 v0.4s, v1.2d +; CHECK-NEXT: fcvtxn2 v2.4s, v3.2d +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-NEXT: ret %1 = fptrunc <8 x double> %a to <8 x half> ret <8 x half> %1 } diff --git a/llvm/test/CodeGen/AArch64/fptrunc.ll b/llvm/test/CodeGen/AArch64/fptrunc.ll index ae86129286ddc..56b20eaac1c80 100644 --- a/llvm/test/CodeGen/AArch64/fptrunc.ll +++ b/llvm/test/CodeGen/AArch64/fptrunc.ll @@ -112,30 +112,22 @@ define <2 x half> @fptrunc_v2f128_v2f16(<2 x fp128> %a) { ; ; CHECK-GI-LABEL: fptrunc_v2f128_v2f16: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: sub sp, sp, #64 -; CHECK-GI-NEXT: str x30, [sp, #48] // 8-byte Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 +; CHECK-GI-NEXT: sub sp, sp, #48 +; CHECK-GI-NEXT: str x30, [sp, #32] // 8-byte Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 48 ; CHECK-GI-NEXT: .cfi_offset w30, -16 -; CHECK-GI-NEXT: mov v2.d[0], x8 ; CHECK-GI-NEXT: str q1, [sp] // 16-byte Spill -; CHECK-GI-NEXT: mov v2.d[1], x8 -; CHECK-GI-NEXT: str q2, [sp, #32] // 16-byte Spill ; CHECK-GI-NEXT: bl __trunctfhf2 ; CHECK-GI-NEXT: // kill: def $h0 killed $h0 def $q0 ; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Spill ; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Reload ; CHECK-GI-NEXT: bl __trunctfhf2 +; CHECK-GI-NEXT: ldr q1, [sp, #16] // 16-byte Reload ; CHECK-GI-NEXT: // kill: def $h0 killed $h0 def $q0 -; CHECK-GI-NEXT: str q0, [sp] // 16-byte Spill -; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Reload -; CHECK-GI-NEXT: bl __trunctfhf2 -; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Reload -; CHECK-GI-NEXT: bl __trunctfhf2 -; CHECK-GI-NEXT: ldp q1, q0, [sp] // 32-byte Folded Reload -; CHECK-GI-NEXT: ldr x30, [sp, #48] // 8-byte Reload -; CHECK-GI-NEXT: mov v0.h[1], v1.h[0] -; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-GI-NEXT: add sp, sp, #64 +; CHECK-GI-NEXT: ldr x30, [sp, #32] // 8-byte Reload +; CHECK-GI-NEXT: mov v1.h[1], v0.h[0] +; CHECK-GI-NEXT: fmov d0, d1 +; CHECK-GI-NEXT: add sp, sp, #48 ; CHECK-GI-NEXT: ret entry: %c = fptrunc <2 x fp128> %a to <2 x half> @@ -260,8 +252,9 @@ define <3 x float> @fptrunc_v3f64_v3f32(<3 x double> %a) { ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-GI-NEXT: fcvt s2, d2 +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 ; CHECK-GI-NEXT: mov v0.d[1], v1.d[0] +; CHECK-GI-NEXT: fcvtn v2.2s, v2.2d ; CHECK-GI-NEXT: fcvtn v1.2s, v0.2d ; CHECK-GI-NEXT: mov v0.s[0], v1.s[0] ; CHECK-GI-NEXT: mov v0.s[1], v1.s[1] @@ -284,61 +277,49 @@ entry: } define <2 x half> @fptrunc_v2f64_v2f16(<2 x double> %a) { -; CHECK-SD-LABEL: fptrunc_v2f64_v2f16: -; CHECK-SD: // %bb.0: // %entry -; CHECK-SD-NEXT: fcvtxn v0.2s, v0.2d -; CHECK-SD-NEXT: fcvtn v0.4h, v0.4s -; CHECK-SD-NEXT: ret -; -; CHECK-GI-LABEL: fptrunc_v2f64_v2f16: -; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: mov d1, v0.d[1] -; CHECK-GI-NEXT: fcvt h0, d0 -; CHECK-GI-NEXT: fcvt h1, d1 -; CHECK-GI-NEXT: mov v0.h[1], v1.h[0] -; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-GI-NEXT: ret +; CHECK-LABEL: fptrunc_v2f64_v2f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret entry: %c = fptrunc <2 x double> %a to <2 x half> ret <2 x half> %c } define <3 x half> @fptrunc_v3f64_v3f16(<3 x double> %a) { -; CHECK-LABEL: fptrunc_v3f64_v3f16: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: fcvt h0, d0 -; CHECK-NEXT: fcvt h1, d1 -; CHECK-NEXT: fcvt h2, d2 -; CHECK-NEXT: mov v0.h[1], v1.h[0] -; CHECK-NEXT: mov v0.h[2], v2.h[0] -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: fptrunc_v3f64_v3f16: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: fcvt h0, d0 +; CHECK-SD-NEXT: fcvt h1, d1 +; CHECK-SD-NEXT: fcvt h2, d2 +; CHECK-SD-NEXT: mov v0.h[1], v1.h[0] +; CHECK-SD-NEXT: mov v0.h[2], v2.h[0] +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: fptrunc_v3f64_v3f16: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: mov v0.d[1], v1.d[0] +; CHECK-GI-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-GI-NEXT: fcvtxn2 v0.4s, v2.2d +; CHECK-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-GI-NEXT: ret entry: %c = fptrunc <3 x double> %a to <3 x half> ret <3 x half> %c } define <4 x half> @fptrunc_v4f64_v4f16(<4 x double> %a) { -; CHECK-SD-LABEL: fptrunc_v4f64_v4f16: -; CHECK-SD: // %bb.0: // %entry -; CHECK-SD-NEXT: fcvtxn v0.2s, v0.2d -; CHECK-SD-NEXT: fcvtxn2 v0.4s, v1.2d -; CHECK-SD-NEXT: fcvtn v0.4h, v0.4s -; CHECK-SD-NEXT: ret -; -; CHECK-GI-LABEL: fptrunc_v4f64_v4f16: -; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: mov d2, v0.d[1] -; CHECK-GI-NEXT: fcvt h0, d0 -; CHECK-GI-NEXT: mov d3, v1.d[1] -; CHECK-GI-NEXT: fcvt h1, d1 -; CHECK-GI-NEXT: fcvt h2, d2 -; CHECK-GI-NEXT: mov v0.h[1], v2.h[0] -; CHECK-GI-NEXT: fcvt h2, d3 -; CHECK-GI-NEXT: mov v0.h[2], v1.h[0] -; CHECK-GI-NEXT: mov v0.h[3], v2.h[0] -; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-GI-NEXT: ret +; CHECK-LABEL: fptrunc_v4f64_v4f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-NEXT: fcvtxn2 v0.4s, v1.2d +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret entry: %c = fptrunc <4 x double> %a to <4 x half> ret <4 x half> %c diff --git a/llvm/test/CodeGen/AArch64/insert-extend.ll b/llvm/test/CodeGen/AArch64/insert-extend.ll index e128abf4d7376..1f2bacea5edef 100644 --- a/llvm/test/CodeGen/AArch64/insert-extend.ll +++ b/llvm/test/CodeGen/AArch64/insert-extend.ll @@ -66,57 +66,57 @@ define i32 @large(ptr nocapture noundef readonly %p1, i32 noundef %st1, ptr noca ; CHECK-NEXT: ldr d5, [x11, x9] ; CHECK-NEXT: shll2 v6.4s, v0.8h, #16 ; CHECK-NEXT: usubl v2.8h, v2.8b, v3.8b +; CHECK-NEXT: shll2 v7.4s, v1.8h, #16 ; CHECK-NEXT: usubl v3.8h, v4.8b, v5.8b -; CHECK-NEXT: shll2 v4.4s, v1.8h, #16 ; CHECK-NEXT: saddw v0.4s, v6.4s, v0.4h -; CHECK-NEXT: shll2 v6.4s, v2.8h, #16 +; CHECK-NEXT: shll2 v4.4s, v2.8h, #16 +; CHECK-NEXT: saddw v1.4s, v7.4s, v1.4h ; CHECK-NEXT: shll2 v5.4s, v3.8h, #16 -; CHECK-NEXT: saddw v1.4s, v4.4s, v1.4h -; CHECK-NEXT: rev64 v4.4s, v0.4s -; CHECK-NEXT: saddw v2.4s, v6.4s, v2.4h +; CHECK-NEXT: rev64 v6.4s, v0.4s +; CHECK-NEXT: saddw v2.4s, v4.4s, v2.4h +; CHECK-NEXT: rev64 v7.4s, v1.4s ; CHECK-NEXT: saddw v3.4s, v5.4s, v3.4h -; CHECK-NEXT: rev64 v5.4s, v1.4s -; CHECK-NEXT: rev64 v6.4s, v2.4s -; CHECK-NEXT: sub v4.4s, v0.4s, v4.4s +; CHECK-NEXT: rev64 v4.4s, v2.4s +; CHECK-NEXT: sub v6.4s, v0.4s, v6.4s ; CHECK-NEXT: addp v0.4s, v1.4s, v0.4s -; CHECK-NEXT: rev64 v7.4s, v3.4s -; CHECK-NEXT: sub v5.4s, v1.4s, v5.4s -; CHECK-NEXT: sub v6.4s, v2.4s, v6.4s +; CHECK-NEXT: rev64 v5.4s, v3.4s +; CHECK-NEXT: sub v7.4s, v1.4s, v7.4s +; CHECK-NEXT: sub v4.4s, v2.4s, v4.4s ; CHECK-NEXT: addp v2.4s, v3.4s, v2.4s -; CHECK-NEXT: zip1 v16.4s, v5.4s, v4.4s -; CHECK-NEXT: sub v7.4s, v3.4s, v7.4s -; CHECK-NEXT: trn1 v4.4s, v5.4s, v4.4s -; CHECK-NEXT: zip2 v3.4s, v6.4s, v7.4s -; CHECK-NEXT: mov v6.s[1], v7.s[0] +; CHECK-NEXT: zip1 v16.4s, v7.4s, v6.4s +; CHECK-NEXT: sub v5.4s, v3.4s, v5.4s +; CHECK-NEXT: trn1 v3.4s, v7.4s, v6.4s +; CHECK-NEXT: zip1 v6.4s, v4.4s, v5.4s +; CHECK-NEXT: zip2 v4.4s, v4.4s, v5.4s +; CHECK-NEXT: ext v5.16b, v7.16b, v16.16b, #8 ; CHECK-NEXT: ext v7.16b, v2.16b, v2.16b, #8 -; CHECK-NEXT: ext v5.16b, v5.16b, v16.16b, #8 -; CHECK-NEXT: mov v3.d[1], v4.d[1] -; CHECK-NEXT: uzp1 v1.4s, v7.4s, v0.4s -; CHECK-NEXT: uzp2 v4.4s, v7.4s, v0.4s +; CHECK-NEXT: mov v4.d[1], v3.d[1] ; CHECK-NEXT: mov v6.d[1], v5.d[1] +; CHECK-NEXT: uzp1 v1.4s, v7.4s, v0.4s +; CHECK-NEXT: uzp2 v3.4s, v7.4s, v0.4s ; CHECK-NEXT: addp v0.4s, v2.4s, v0.4s -; CHECK-NEXT: sub v1.4s, v1.4s, v4.4s +; CHECK-NEXT: add v5.4s, v4.4s, v6.4s +; CHECK-NEXT: sub v4.4s, v6.4s, v4.4s +; CHECK-NEXT: sub v1.4s, v1.4s, v3.4s ; CHECK-NEXT: rev64 v7.4s, v0.4s -; CHECK-NEXT: add v5.4s, v3.4s, v6.4s -; CHECK-NEXT: sub v3.4s, v6.4s, v3.4s +; CHECK-NEXT: rev64 v3.4s, v5.4s +; CHECK-NEXT: rev64 v6.4s, v4.4s ; CHECK-NEXT: rev64 v2.4s, v1.4s -; CHECK-NEXT: rev64 v4.4s, v5.4s -; CHECK-NEXT: rev64 v6.4s, v3.4s ; CHECK-NEXT: addp v16.4s, v0.4s, v5.4s ; CHECK-NEXT: sub v0.4s, v0.4s, v7.4s -; CHECK-NEXT: zip1 v21.4s, v16.4s, v16.4s -; CHECK-NEXT: sub v4.4s, v5.4s, v4.4s -; CHECK-NEXT: addp v5.4s, v1.4s, v3.4s -; CHECK-NEXT: sub v3.4s, v3.4s, v6.4s +; CHECK-NEXT: sub v3.4s, v5.4s, v3.4s +; CHECK-NEXT: addp v5.4s, v1.4s, v4.4s +; CHECK-NEXT: sub v4.4s, v4.4s, v6.4s ; CHECK-NEXT: sub v1.4s, v1.4s, v2.4s ; CHECK-NEXT: ext v7.16b, v0.16b, v16.16b, #4 -; CHECK-NEXT: ext v2.16b, v16.16b, v4.16b, #4 -; CHECK-NEXT: ext v6.16b, v5.16b, v3.16b, #4 -; CHECK-NEXT: mov v19.16b, v4.16b +; CHECK-NEXT: zip1 v21.4s, v16.4s, v16.4s +; CHECK-NEXT: ext v2.16b, v16.16b, v3.16b, #4 +; CHECK-NEXT: ext v6.16b, v5.16b, v4.16b, #4 +; CHECK-NEXT: mov v19.16b, v3.16b ; CHECK-NEXT: ext v17.16b, v1.16b, v5.16b, #8 -; CHECK-NEXT: mov v20.16b, v3.16b -; CHECK-NEXT: trn2 v0.4s, v21.4s, v0.4s +; CHECK-NEXT: mov v20.16b, v4.16b ; CHECK-NEXT: ext v7.16b, v7.16b, v7.16b, #4 +; CHECK-NEXT: trn2 v0.4s, v21.4s, v0.4s ; CHECK-NEXT: mov v19.s[2], v16.s[3] ; CHECK-NEXT: zip2 v2.4s, v2.4s, v16.4s ; CHECK-NEXT: zip2 v6.4s, v6.4s, v5.4s @@ -125,8 +125,8 @@ define i32 @large(ptr nocapture noundef readonly %p1, i32 noundef %st1, ptr noca ; CHECK-NEXT: mov v1.s[2], v5.s[1] ; CHECK-NEXT: mov v21.16b, v7.16b ; CHECK-NEXT: sub v7.4s, v0.4s, v7.4s -; CHECK-NEXT: ext v2.16b, v4.16b, v2.16b, #12 -; CHECK-NEXT: ext v3.16b, v3.16b, v6.16b, #12 +; CHECK-NEXT: ext v2.16b, v3.16b, v2.16b, #12 +; CHECK-NEXT: ext v3.16b, v4.16b, v6.16b, #12 ; CHECK-NEXT: uzp2 v4.4s, v17.4s, v18.4s ; CHECK-NEXT: mov v6.16b, v1.16b ; CHECK-NEXT: mov v17.16b, v19.16b diff --git a/llvm/test/CodeGen/AArch64/insert-subvector.ll b/llvm/test/CodeGen/AArch64/insert-subvector.ll index 6828fa9f1508c..88b6ea4f0cb19 100644 --- a/llvm/test/CodeGen/AArch64/insert-subvector.ll +++ b/llvm/test/CodeGen/AArch64/insert-subvector.ll @@ -102,10 +102,7 @@ define <8 x i8> @insert_v8i8_4_1(float %tmp, <8 x i8> %b, <8 x i8> %a) { define <8 x i8> @insert_v8i8_4_2(float %tmp, <8 x i8> %b, <8 x i8> %a) { ; CHECK-LABEL: insert_v8i8_4_2: ; CHECK: // %bb.0: -; CHECK-NEXT: fmov d0, d1 -; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: mov v0.s[1], v2.s[0] -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: zip1 v0.2s, v1.2s, v2.2s ; CHECK-NEXT: ret %s2 = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> ret <8 x i8> %s2 @@ -124,8 +121,7 @@ define <16 x i8> @insert_v16i8_8_1(float %tmp, <16 x i8> %b, <16 x i8> %a) { define <16 x i8> @insert_v16i8_8_2(float %tmp, <16 x i8> %b, <16 x i8> %a) { ; CHECK-LABEL: insert_v16i8_8_2: ; CHECK: // %bb.0: -; CHECK-NEXT: mov v0.16b, v1.16b -; CHECK-NEXT: mov v0.d[1], v2.d[0] +; CHECK-NEXT: zip1 v0.2d, v1.2d, v2.2d ; CHECK-NEXT: ret %s2 = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> ret <16 x i8> %s2 @@ -201,10 +197,7 @@ define <4 x i16> @insert_v4i16_2_1(float %tmp, <4 x i16> %b, <4 x i16> %a) { define <4 x i16> @insert_v4i16_2_2(float %tmp, <4 x i16> %b, <4 x i16> %a) { ; CHECK-LABEL: insert_v4i16_2_2: ; CHECK: // %bb.0: -; CHECK-NEXT: fmov d0, d1 -; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: mov v0.s[1], v2.s[0] -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: zip1 v0.2s, v1.2s, v2.2s ; CHECK-NEXT: ret %s2 = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> ret <4 x i16> %s2 @@ -223,8 +216,7 @@ define <8 x i16> @insert_v8i16_4_1(float %tmp, <8 x i16> %b, <8 x i16> %a) { define <8 x i16> @insert_v8i16_4_2(float %tmp, <8 x i16> %b, <8 x i16> %a) { ; CHECK-LABEL: insert_v8i16_4_2: ; CHECK: // %bb.0: -; CHECK-NEXT: mov v0.16b, v1.16b -; CHECK-NEXT: mov v0.d[1], v2.d[0] +; CHECK-NEXT: zip1 v0.2d, v1.2d, v2.2d ; CHECK-NEXT: ret %s2 = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> ret <8 x i16> %s2 @@ -245,8 +237,7 @@ define <4 x i32> @insert_v4i32_2_1(float %tmp, <4 x i32> %b, <4 x i32> %a) { define <4 x i32> @insert_v4i32_2_2(float %tmp, <4 x i32> %b, <4 x i32> %a) { ; CHECK-LABEL: insert_v4i32_2_2: ; CHECK: // %bb.0: -; CHECK-NEXT: mov v0.16b, v1.16b -; CHECK-NEXT: mov v0.d[1], v2.d[0] +; CHECK-NEXT: zip1 v0.2d, v1.2d, v2.2d ; CHECK-NEXT: ret %s2 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> ret <4 x i32> %s2 @@ -337,10 +328,8 @@ define <8 x i8> @load_v8i8_4_1(float %tmp, <8 x i8> %b, ptr %a) { define <8 x i8> @load_v8i8_4_2(float %tmp, <8 x i8> %b, ptr %a) { ; CHECK-LABEL: load_v8i8_4_2: ; CHECK: // %bb.0: -; CHECK-NEXT: fmov d0, d1 -; CHECK-NEXT: ldr s2, [x0] -; CHECK-NEXT: mov v0.s[1], v2.s[0] -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: ldr s0, [x0] +; CHECK-NEXT: zip1 v0.2s, v1.2s, v0.2s ; CHECK-NEXT: ret %l = load <4 x i8>, ptr %a %s1 = shufflevector <4 x i8> %l, <4 x i8> poison, <8 x i32> @@ -465,10 +454,8 @@ define <4 x i8> @load_v4i8_2_2(float %tmp, <4 x i8> %b, ptr %a) { ; CHECK-LABEL: load_v4i8_2_2: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr h0, [x0] -; CHECK-NEXT: zip1 v2.8b, v0.8b, v0.8b -; CHECK-NEXT: fmov d0, d1 -; CHECK-NEXT: mov v0.s[1], v2.s[0] -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: zip1 v0.8b, v0.8b, v0.8b +; CHECK-NEXT: zip1 v0.2s, v1.2s, v0.2s ; CHECK-NEXT: ret %l = load <2 x i8>, ptr %a %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <4 x i32> @@ -558,10 +545,8 @@ define <4 x i16> @load_v4i16_2_1(float %tmp, <4 x i16> %b, ptr %a) { define <4 x i16> @load_v4i16_2_2(float %tmp, <4 x i16> %b, ptr %a) { ; CHECK-LABEL: load_v4i16_2_2: ; CHECK: // %bb.0: -; CHECK-NEXT: fmov d0, d1 -; CHECK-NEXT: ldr s2, [x0] -; CHECK-NEXT: mov v0.s[1], v2.s[0] -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: ldr s0, [x0] +; CHECK-NEXT: zip1 v0.2s, v1.2s, v0.2s ; CHECK-NEXT: ret %l = load <2 x i16>, ptr %a %s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <4 x i32> diff --git a/llvm/test/CodeGen/AArch64/neon-extadd-extract.ll b/llvm/test/CodeGen/AArch64/neon-extadd-extract.ll index 64cb3603f53a1..5753798e87512 100644 --- a/llvm/test/CodeGen/AArch64/neon-extadd-extract.ll +++ b/llvm/test/CodeGen/AArch64/neon-extadd-extract.ll @@ -771,3 +771,31 @@ entry: %m = mul <1 x i64> %s0, %t1 ret <1 x i64> %m } + +define <2 x i8> @extract_scalable_vec() vscale_range(1,16) "target-features"="+sve" { +; CHECK-SD-LABEL: extract_scalable_vec: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: mov x8, xzr +; CHECK-SD-NEXT: index z1.s, #2, #3 +; CHECK-SD-NEXT: ldr h0, [x8] +; CHECK-SD-NEXT: ushll v0.8h, v0.8b, #0 +; CHECK-SD-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-SD-NEXT: mul v0.2s, v0.2s, v1.2s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: extract_scalable_vec: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov x8, xzr +; CHECK-GI-NEXT: mov x9, #1 // =0x1 +; CHECK-GI-NEXT: ld1 { v0.b }[0], [x8] +; CHECK-GI-NEXT: ldr b1, [x9] +; CHECK-GI-NEXT: adrp x8, .LCPI36_0 +; CHECK-GI-NEXT: mov v0.s[1], v1.s[0] +; CHECK-GI-NEXT: ldr d1, [x8, :lo12:.LCPI36_0] +; CHECK-GI-NEXT: mul v0.2s, v0.2s, v1.2s +; CHECK-GI-NEXT: ret +entry: + %0 = load <2 x i8>, ptr null, align 2 + %mul = mul <2 x i8> %0, + ret <2 x i8> %mul +} diff --git a/llvm/test/CodeGen/AArch64/neon-widen-shuffle.ll b/llvm/test/CodeGen/AArch64/neon-widen-shuffle.ll index afcced5dcb9ab..b05e5773cdbd1 100644 --- a/llvm/test/CodeGen/AArch64/neon-widen-shuffle.ll +++ b/llvm/test/CodeGen/AArch64/neon-widen-shuffle.ll @@ -24,7 +24,7 @@ entry: define <4 x i32> @shuffle3(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: shuffle3: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: mov v0.d[0], v1.d[1] +; CHECK-NEXT: zip2 v0.2d, v1.2d, v0.2d ; CHECK-NEXT: ret entry: %res = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> @@ -113,8 +113,7 @@ define <8 x i16> @shuffle10(<8 x i16> %a) { define <4 x i16> @shuffle11(<8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: shuffle11: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: mov v1.s[1], v0.s[0] -; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: zip1 v0.2s, v1.2s, v0.2s ; CHECK-NEXT: ret entry: %res = shufflevector <8 x i16> %a, <8 x i16> %b, <4 x i32> diff --git a/llvm/test/CodeGen/AArch64/ptrauth-irelative.ll b/llvm/test/CodeGen/AArch64/ptrauth-irelative.ll new file mode 100644 index 0000000000000..4ee1c19a86490 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/ptrauth-irelative.ll @@ -0,0 +1,95 @@ +; RUN: llc -mtriple aarch64-linux-gnu -mattr=+pauth -filetype=asm -o - %s | FileCheck %s + +; CHECK: nullref: +; CHECK-NEXT: [[PLACE:.*]]: +; CHECK-NEXT: .section .text.startup +; CHECK-NEXT: [[FUNC:.*]]: +; CHECK-NEXT: movz x0, #0 +; CHECK-NEXT: mov x1, #1 +; CHECK-NEXT: b __emupac_pacda +; CHECK-NEXT: .section .rodata +; CHECK-NEXT: .xword [[FUNC]]@FUNCINIT +@nullref = constant ptr ptrauth (ptr null, i32 2, i64 1, ptr null), align 8 + +@dsolocal = external dso_local global i8 + +; CHECK: dsolocalref: +; CHECK-NEXT: [[PLACE:.*]]: +; CHECK-NEXT: .section .text.startup +; CHECK-NEXT: [[FUNC:.*]]: +; CHECK-NEXT: adrp x0, dsolocal +; CHECK-NEXT: add x0, x0, :lo12:dsolocal +; CHECK-NEXT: mov x1, #2 +; CHECK-NEXT: b __emupac_pacda +; CHECK-NEXT: .section .rodata +; CHECK-NEXT: .xword [[FUNC]]@FUNCINIT +@dsolocalref = constant ptr ptrauth (ptr @dsolocal, i32 2, i64 2, ptr null), align 8 + +@ds = external global i8 + +; CHECK: dsolocalrefds: +; CHECK-NEXT: [[PLACE:.*]]: +; CHECK-NEXT: .section .text.startup +; CHECK-NEXT: [[FUNC:.*]]: +; CHECK-NEXT: adrp x0, dsolocal +; CHECK-NEXT: add x0, x0, :lo12:dsolocal +; CHECK-NEXT: mov x1, #2 +; CHECK-NEXT: [[LABEL:.L.*]]: +; CHECK-NEXT: .reloc [[LABEL]], R_AARCH64_PATCHINST, ds +; CHECK-NEXT: b __emupac_pacda +; CHECK-NEXT: ret +; CHECK-NEXT: .section .rodata +; CHECK-NEXT: .xword [[FUNC]]@FUNCINIT +@dsolocalrefds = constant ptr ptrauth (ptr @dsolocal, i32 2, i64 2, ptr null, ptr @ds), align 8 + +; CHECK: dsolocalref8: +; CHECK-NEXT: [[PLACE:.*]]: +; CHECK-NEXT: .section .text.startup +; CHECK-NEXT: [[FUNC:.*]]: +; CHECK-NEXT: adrp x0, dsolocal+8 +; CHECK-NEXT: add x0, x0, :lo12:dsolocal+8 +; CHECK-NEXT: mov x1, #3 +; CHECK-NEXT: b __emupac_pacda +; CHECK-NEXT: .section .rodata +; CHECK-NEXT: .xword [[FUNC]]@FUNCINIT +@dsolocalref8 = constant ptr ptrauth (ptr getelementptr (i8, ptr @dsolocal, i64 8), i32 2, i64 3, ptr null), align 8 + +; CHECK: disc: +; CHECK-NEXT: [[PLACE:.*]]: +; CHECK-NEXT: .section .text.startup +; CHECK-NEXT: [[FUNC:.*]]: +; CHECK-NEXT: adrp x0, dsolocal +; CHECK-NEXT: add x0, x0, :lo12:dsolocal +; CHECK-NEXT: adrp x1, [[PLACE]] +; CHECK-NEXT: add x1, x1, :lo12:[[PLACE]] +; CHECK-NEXT: b __emupac_pacda +; CHECK-NEXT: .section .rodata +; CHECK-NEXT: .xword [[FUNC]]@FUNCINIT +@disc = constant ptr ptrauth (ptr @dsolocal, i32 2, i64 0, ptr @disc), align 8 + +@global = external global i8 + +; CHECK: globalref: +; CHECK-NEXT: [[PLACE:.*]]: +; CHECK-NEXT: .section .text.startup +; CHECK-NEXT: [[FUNC:.*]]: +; CHECK-NEXT: adrp x0, :got:global +; CHECK-NEXT: ldr x0, [x0, :got_lo12:global] +; CHECK-NEXT: mov x1, #4 +; CHECK-NEXT: b __emupac_pacda +; CHECK-NEXT: .section .rodata +; CHECK-NEXT: .xword [[FUNC]]@FUNCINIT +@globalref = constant ptr ptrauth (ptr @global, i32 2, i64 4, ptr null), align 8 + +; CHECK: globalref8: +; CHECK-NEXT: [[PLACE:.*]]: +; CHECK-NEXT: .section .text.startup +; CHECK-NEXT: [[FUNC:.*]]: +; CHECK-NEXT: adrp x0, :got:global +; CHECK-NEXT: ldr x0, [x0, :got_lo12:global] +; CHECK-NEXT: add x0, x0, #8 +; CHECK-NEXT: mov x1, #5 +; CHECK-NEXT: b __emupac_pacda +; CHECK-NEXT: .section .rodata +; CHECK-NEXT: .xword [[FUNC]]@FUNCINIT +@globalref8 = constant ptr ptrauth (ptr getelementptr (i8, ptr @global, i64 8), i32 2, i64 5, ptr null), align 8 diff --git a/llvm/test/CodeGen/AArch64/ptrauth-type-info-vptr-discr.ll b/llvm/test/CodeGen/AArch64/ptrauth-type-info-vptr-discr.ll index fbd777911aecb..31ef6cba6fbdd 100644 --- a/llvm/test/CodeGen/AArch64/ptrauth-type-info-vptr-discr.ll +++ b/llvm/test/CodeGen/AArch64/ptrauth-type-info-vptr-discr.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple aarch64-linux-gnu -mattr=+pauth -filetype=asm -o - %s | FileCheck --check-prefix=ELF %s +; RUN: llc -mtriple aarch64-linux-musl -mattr=+pauth -filetype=asm -o - %s | FileCheck --check-prefix=ELF %s ; RUN: llc -mtriple aarch64-apple-darwin -mattr=+pauth -filetype=asm -o - %s | FileCheck --check-prefix=MACHO %s ; ELF-LABEL: _ZTI10Disc: diff --git a/llvm/test/CodeGen/AArch64/reduce-shuffle.ll b/llvm/test/CodeGen/AArch64/reduce-shuffle.ll index 354edc4ff7ab4..072f6f4e8f73e 100644 --- a/llvm/test/CodeGen/AArch64/reduce-shuffle.ll +++ b/llvm/test/CodeGen/AArch64/reduce-shuffle.ll @@ -34,27 +34,26 @@ define i32 @v1(ptr nocapture noundef readonly %p1, i32 noundef %i1, ptr nocaptur ; CHECK-NEXT: saddw v3.4s, v7.4s, v3.4h ; CHECK-NEXT: uzp2 v4.4s, v1.4s, v2.4s ; CHECK-NEXT: zip1 v5.4s, v3.4s, v0.4s -; CHECK-NEXT: mov v6.16b, v2.16b -; CHECK-NEXT: trn1 v7.4s, v3.4s, v0.4s +; CHECK-NEXT: trn1 v6.4s, v3.4s, v0.4s ; CHECK-NEXT: zip2 v0.4s, v3.4s, v0.4s -; CHECK-NEXT: ext v17.16b, v1.16b, v1.16b, #12 -; CHECK-NEXT: zip2 v18.4s, v1.4s, v2.4s -; CHECK-NEXT: zip2 v16.4s, v2.4s, v1.4s -; CHECK-NEXT: mov v6.s[1], v1.s[0] +; CHECK-NEXT: ext v16.16b, v1.16b, v1.16b, #12 +; CHECK-NEXT: zip2 v17.4s, v1.4s, v2.4s +; CHECK-NEXT: zip2 v7.4s, v2.4s, v1.4s +; CHECK-NEXT: zip1 v18.4s, v2.4s, v1.4s ; CHECK-NEXT: uzp2 v4.4s, v4.4s, v1.4s ; CHECK-NEXT: ext v3.16b, v3.16b, v5.16b, #8 ; CHECK-NEXT: mov v1.s[0], v2.s[1] -; CHECK-NEXT: ext v2.16b, v2.16b, v17.16b, #12 -; CHECK-NEXT: mov v18.d[1], v7.d[1] -; CHECK-NEXT: mov v16.d[1], v7.d[1] +; CHECK-NEXT: ext v2.16b, v2.16b, v16.16b, #12 +; CHECK-NEXT: mov v17.d[1], v6.d[1] +; CHECK-NEXT: mov v7.d[1], v6.d[1] ; CHECK-NEXT: mov v4.d[1], v0.d[1] -; CHECK-NEXT: mov v6.d[1], v3.d[1] +; CHECK-NEXT: mov v18.d[1], v3.d[1] ; CHECK-NEXT: mov v1.d[1], v5.d[1] ; CHECK-NEXT: mov v2.d[1], v0.d[1] -; CHECK-NEXT: add v0.4s, v4.4s, v18.4s -; CHECK-NEXT: add v3.4s, v1.4s, v6.4s -; CHECK-NEXT: sub v1.4s, v6.4s, v1.4s -; CHECK-NEXT: sub v2.4s, v16.4s, v2.4s +; CHECK-NEXT: add v0.4s, v4.4s, v17.4s +; CHECK-NEXT: add v3.4s, v1.4s, v18.4s +; CHECK-NEXT: sub v1.4s, v18.4s, v1.4s +; CHECK-NEXT: sub v2.4s, v7.4s, v2.4s ; CHECK-NEXT: rev64 v4.4s, v0.4s ; CHECK-NEXT: rev64 v5.4s, v3.4s ; CHECK-NEXT: sub v6.4s, v1.4s, v2.4s @@ -239,99 +238,98 @@ define i32 @v2(ptr nocapture noundef readonly %p1, i32 noundef %i1, ptr nocaptur ; CHECK-NEXT: add x10, x10, x8 ; CHECK-NEXT: ldr d3, [x11] ; CHECK-NEXT: add x11, x11, x9 -; CHECK-NEXT: ldr d4, [x10, x8] -; CHECK-NEXT: ldr d6, [x10] -; CHECK-NEXT: ldr d5, [x11, x9] -; CHECK-NEXT: ldr d7, [x11] +; CHECK-NEXT: ldr d4, [x10] +; CHECK-NEXT: ldr d6, [x10, x8] +; CHECK-NEXT: ldr d5, [x11] +; CHECK-NEXT: ldr d7, [x11, x9] ; CHECK-NEXT: usubl v0.8h, v0.8b, v1.8b ; CHECK-NEXT: usubl v1.8h, v2.8b, v3.8b ; CHECK-NEXT: usubl v2.8h, v4.8b, v5.8b ; CHECK-NEXT: usubl v3.8h, v6.8b, v7.8b ; CHECK-NEXT: shll2 v4.4s, v0.8h, #16 ; CHECK-NEXT: shll2 v5.4s, v1.8h, #16 -; CHECK-NEXT: shll2 v6.4s, v2.8h, #16 -; CHECK-NEXT: shll2 v7.4s, v3.8h, #16 +; CHECK-NEXT: shll2 v6.4s, v3.8h, #16 +; CHECK-NEXT: shll2 v7.4s, v2.8h, #16 ; CHECK-NEXT: saddw v0.4s, v4.4s, v0.4h ; CHECK-NEXT: saddw v1.4s, v5.4s, v1.4h -; CHECK-NEXT: saddw v2.4s, v6.4s, v2.4h -; CHECK-NEXT: saddw v3.4s, v7.4s, v3.4h +; CHECK-NEXT: saddw v3.4s, v6.4s, v3.4h +; CHECK-NEXT: saddw v2.4s, v7.4s, v2.4h ; CHECK-NEXT: zip1 v4.4s, v1.4s, v0.4s ; CHECK-NEXT: trn1 v18.4s, v1.4s, v0.4s ; CHECK-NEXT: zip2 v0.4s, v1.4s, v0.4s -; CHECK-NEXT: uzp2 v5.4s, v2.4s, v3.4s -; CHECK-NEXT: mov v6.16b, v2.16b -; CHECK-NEXT: mov v16.16b, v3.16b -; CHECK-NEXT: zip2 v7.4s, v2.4s, v3.4s -; CHECK-NEXT: mov v6.s[0], v3.s[1] +; CHECK-NEXT: uzp2 v5.4s, v3.4s, v2.4s +; CHECK-NEXT: mov v7.16b, v3.16b +; CHECK-NEXT: zip1 v6.4s, v2.4s, v3.4s +; CHECK-NEXT: zip2 v16.4s, v3.4s, v2.4s ; CHECK-NEXT: ext v17.16b, v1.16b, v4.16b, #8 -; CHECK-NEXT: mov v16.s[1], v2.s[0] -; CHECK-NEXT: uzp2 v1.4s, v5.4s, v2.4s -; CHECK-NEXT: ext v5.16b, v2.16b, v2.16b, #12 -; CHECK-NEXT: zip2 v2.4s, v3.4s, v2.4s -; CHECK-NEXT: mov v7.d[1], v18.d[1] -; CHECK-NEXT: mov v6.d[1], v4.d[1] -; CHECK-NEXT: mov v16.d[1], v17.d[1] +; CHECK-NEXT: mov v7.s[0], v2.s[1] +; CHECK-NEXT: ext v1.16b, v3.16b, v3.16b, #12 +; CHECK-NEXT: uzp2 v5.4s, v5.4s, v3.4s +; CHECK-NEXT: zip2 v3.4s, v2.4s, v3.4s +; CHECK-NEXT: mov v16.d[1], v18.d[1] +; CHECK-NEXT: mov v6.d[1], v17.d[1] +; CHECK-NEXT: mov v7.d[1], v4.d[1] +; CHECK-NEXT: ext v1.16b, v2.16b, v1.16b, #12 +; CHECK-NEXT: mov v5.d[1], v0.d[1] +; CHECK-NEXT: mov v3.d[1], v18.d[1] +; CHECK-NEXT: add v2.4s, v7.4s, v6.4s ; CHECK-NEXT: mov v1.d[1], v0.d[1] -; CHECK-NEXT: ext v3.16b, v3.16b, v5.16b, #12 -; CHECK-NEXT: mov v2.d[1], v18.d[1] -; CHECK-NEXT: add v4.4s, v6.4s, v16.4s -; CHECK-NEXT: add v1.4s, v1.4s, v7.4s -; CHECK-NEXT: mov v3.d[1], v0.d[1] -; CHECK-NEXT: rev64 v5.4s, v4.4s -; CHECK-NEXT: rev64 v0.4s, v1.4s -; CHECK-NEXT: sub v2.4s, v2.4s, v3.4s -; CHECK-NEXT: sub v3.4s, v16.4s, v6.4s -; CHECK-NEXT: mov v5.d[1], v4.d[1] -; CHECK-NEXT: mov v0.d[1], v1.d[1] -; CHECK-NEXT: add v6.4s, v2.4s, v3.4s -; CHECK-NEXT: sub v2.4s, v3.4s, v2.4s -; CHECK-NEXT: add v1.4s, v1.4s, v5.4s -; CHECK-NEXT: sub v0.4s, v4.4s, v0.4s -; CHECK-NEXT: zip1 v3.4s, v1.4s, v6.4s -; CHECK-NEXT: uzp2 v4.4s, v1.4s, v6.4s -; CHECK-NEXT: zip2 v16.4s, v1.4s, v6.4s -; CHECK-NEXT: zip1 v5.4s, v0.4s, v2.4s -; CHECK-NEXT: trn1 v7.4s, v0.4s, v2.4s -; CHECK-NEXT: zip2 v2.4s, v0.4s, v2.4s -; CHECK-NEXT: trn2 v3.4s, v1.4s, v3.4s -; CHECK-NEXT: uzp2 v4.4s, v4.4s, v1.4s -; CHECK-NEXT: mov v1.s[1], v6.s[1] +; CHECK-NEXT: add v4.4s, v5.4s, v16.4s +; CHECK-NEXT: rev64 v5.4s, v2.4s +; CHECK-NEXT: rev64 v0.4s, v4.4s +; CHECK-NEXT: sub v1.4s, v3.4s, v1.4s +; CHECK-NEXT: sub v3.4s, v6.4s, v7.4s +; CHECK-NEXT: mov v5.d[1], v2.d[1] +; CHECK-NEXT: add v6.4s, v1.4s, v3.4s +; CHECK-NEXT: sub v1.4s, v3.4s, v1.4s +; CHECK-NEXT: mov v0.d[1], v4.d[1] +; CHECK-NEXT: add v4.4s, v4.4s, v5.4s +; CHECK-NEXT: sub v0.4s, v2.4s, v0.4s +; CHECK-NEXT: zip1 v2.4s, v4.4s, v6.4s +; CHECK-NEXT: uzp2 v3.4s, v4.4s, v6.4s +; CHECK-NEXT: zip2 v16.4s, v4.4s, v6.4s +; CHECK-NEXT: zip1 v5.4s, v0.4s, v1.4s +; CHECK-NEXT: trn1 v7.4s, v0.4s, v1.4s +; CHECK-NEXT: zip2 v1.4s, v0.4s, v1.4s +; CHECK-NEXT: trn2 v2.4s, v4.4s, v2.4s +; CHECK-NEXT: uzp2 v3.4s, v3.4s, v4.4s +; CHECK-NEXT: mov v4.s[1], v6.s[1] ; CHECK-NEXT: ext v0.16b, v0.16b, v5.16b, #8 ; CHECK-NEXT: mov v16.d[1], v7.d[1] -; CHECK-NEXT: mov v4.d[1], v2.d[1] -; CHECK-NEXT: mov v1.d[1], v5.d[1] -; CHECK-NEXT: mov v3.d[1], v0.d[1] -; CHECK-NEXT: add v0.4s, v16.4s, v4.4s -; CHECK-NEXT: sub v4.4s, v4.4s, v16.4s -; CHECK-NEXT: add v2.4s, v1.4s, v3.4s -; CHECK-NEXT: sub v1.4s, v3.4s, v1.4s -; CHECK-NEXT: ext v3.16b, v0.16b, v0.16b, #4 -; CHECK-NEXT: zip2 v6.4s, v0.4s, v4.4s -; CHECK-NEXT: zip2 v7.4s, v4.4s, v0.4s -; CHECK-NEXT: ext v5.16b, v2.16b, v2.16b, #4 -; CHECK-NEXT: zip2 v16.4s, v1.4s, v2.4s -; CHECK-NEXT: zip2 v17.4s, v2.4s, v1.4s -; CHECK-NEXT: zip1 v0.4s, v0.4s, v4.4s -; CHECK-NEXT: ext v18.16b, v3.16b, v4.16b, #8 -; CHECK-NEXT: ext v19.16b, v5.16b, v1.16b, #8 -; CHECK-NEXT: zip1 v1.4s, v2.4s, v1.4s +; CHECK-NEXT: mov v3.d[1], v1.d[1] +; CHECK-NEXT: mov v4.d[1], v5.d[1] +; CHECK-NEXT: mov v2.d[1], v0.d[1] +; CHECK-NEXT: add v0.4s, v16.4s, v3.4s +; CHECK-NEXT: sub v3.4s, v3.4s, v16.4s +; CHECK-NEXT: add v1.4s, v4.4s, v2.4s +; CHECK-NEXT: sub v2.4s, v2.4s, v4.4s +; CHECK-NEXT: ext v4.16b, v0.16b, v0.16b, #4 +; CHECK-NEXT: zip2 v6.4s, v0.4s, v3.4s +; CHECK-NEXT: zip2 v7.4s, v3.4s, v0.4s +; CHECK-NEXT: ext v5.16b, v1.16b, v1.16b, #4 +; CHECK-NEXT: zip2 v16.4s, v2.4s, v1.4s +; CHECK-NEXT: zip2 v17.4s, v1.4s, v2.4s +; CHECK-NEXT: zip1 v0.4s, v0.4s, v3.4s +; CHECK-NEXT: zip1 v1.4s, v1.4s, v2.4s +; CHECK-NEXT: ext v18.16b, v4.16b, v3.16b, #8 +; CHECK-NEXT: ext v19.16b, v5.16b, v2.16b, #8 ; CHECK-NEXT: add v2.4s, v16.4s, v7.4s -; CHECK-NEXT: sub v4.4s, v6.4s, v17.4s -; CHECK-NEXT: ext v3.16b, v18.16b, v3.16b, #4 +; CHECK-NEXT: sub v3.4s, v6.4s, v17.4s +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: ext v4.16b, v18.16b, v4.16b, #4 +; CHECK-NEXT: cmlt v1.8h, v3.8h, #0 ; CHECK-NEXT: cmlt v6.8h, v2.8h, #0 ; CHECK-NEXT: ext v5.16b, v19.16b, v5.16b, #4 -; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s -; CHECK-NEXT: cmlt v1.8h, v4.8h, #0 ; CHECK-NEXT: add v2.4s, v6.4s, v2.4s -; CHECK-NEXT: add v4.4s, v1.4s, v4.4s -; CHECK-NEXT: add v3.4s, v5.4s, v3.4s +; CHECK-NEXT: add v3.4s, v1.4s, v3.4s +; CHECK-NEXT: add v4.4s, v5.4s, v4.4s ; CHECK-NEXT: cmlt v5.8h, v0.8h, #0 +; CHECK-NEXT: eor v1.16b, v3.16b, v1.16b ; CHECK-NEXT: eor v2.16b, v2.16b, v6.16b -; CHECK-NEXT: eor v1.16b, v4.16b, v1.16b -; CHECK-NEXT: cmlt v7.8h, v3.8h, #0 +; CHECK-NEXT: cmlt v7.8h, v4.8h, #0 ; CHECK-NEXT: add v0.4s, v5.4s, v0.4s ; CHECK-NEXT: add v1.4s, v2.4s, v1.4s -; CHECK-NEXT: add v3.4s, v7.4s, v3.4s +; CHECK-NEXT: add v3.4s, v7.4s, v4.4s ; CHECK-NEXT: eor v0.16b, v0.16b, v5.16b ; CHECK-NEXT: eor v2.16b, v3.16b, v7.16b ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s diff --git a/llvm/test/CodeGen/AArch64/sve-indexed-arithmetic.ll b/llvm/test/CodeGen/AArch64/sve-indexed-arithmetic.ll index bcf5063bdda04..eafa44a35d024 100644 --- a/llvm/test/CodeGen/AArch64/sve-indexed-arithmetic.ll +++ b/llvm/test/CodeGen/AArch64/sve-indexed-arithmetic.ll @@ -22,26 +22,13 @@ define void @fmul_indexed_bf16_256b(ptr %a, ptr %b, ptr %c) #0 { ; CHECK-LABEL: fmul_indexed_bf16_256b: ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q2, q3, [x1] ; CHECK-NEXT: dup v0.8h, v0.h[2] ; CHECK-NEXT: dup v1.8h, v1.h[2] -; CHECK-NEXT: shll v4.4s, v2.4h, #16 -; CHECK-NEXT: shll v6.4s, v3.4h, #16 -; CHECK-NEXT: shll2 v2.4s, v2.8h, #16 -; CHECK-NEXT: shll2 v3.4s, v3.8h, #16 -; CHECK-NEXT: shll v5.4s, v0.4h, #16 -; CHECK-NEXT: shll v7.4s, v1.4h, #16 -; CHECK-NEXT: shll2 v0.4s, v0.8h, #16 -; CHECK-NEXT: shll2 v1.4s, v1.8h, #16 -; CHECK-NEXT: fmul v4.4s, v4.4s, v5.4s -; CHECK-NEXT: fmul v5.4s, v6.4s, v7.4s -; CHECK-NEXT: fmul v0.4s, v2.4s, v0.4s -; CHECK-NEXT: fmul v1.4s, v3.4s, v1.4s -; CHECK-NEXT: bfcvtn v2.4h, v4.4s -; CHECK-NEXT: bfcvtn v3.4h, v5.4s -; CHECK-NEXT: bfcvtn2 v2.8h, v0.4s -; CHECK-NEXT: bfcvtn2 v3.8h, v1.4s -; CHECK-NEXT: stp q2, q3, [x2] +; CHECK-NEXT: bfmul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: bfmul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret %ld.a = load <16 x bfloat>, ptr %a %ld.b = load <16 x bfloat>, ptr %b @@ -124,43 +111,16 @@ define void @fmla_indexed_bf16_256b(ptr %a, ptr %b, ptr %c) #0 { ; CHECK-LABEL: fmla_indexed_bf16_256b: ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: ldp q4, q5, [x2] ; CHECK-NEXT: dup v0.8h, v0.h[2] ; CHECK-NEXT: dup v1.8h, v1.h[2] -; CHECK-NEXT: shll v4.4s, v2.4h, #16 -; CHECK-NEXT: shll v6.4s, v3.4h, #16 -; CHECK-NEXT: shll2 v2.4s, v2.8h, #16 -; CHECK-NEXT: shll2 v3.4s, v3.8h, #16 -; CHECK-NEXT: shll v5.4s, v0.4h, #16 -; CHECK-NEXT: shll v7.4s, v1.4h, #16 -; CHECK-NEXT: shll2 v0.4s, v0.8h, #16 -; CHECK-NEXT: shll2 v1.4s, v1.8h, #16 -; CHECK-NEXT: fmul v4.4s, v4.4s, v5.4s -; CHECK-NEXT: fmul v5.4s, v6.4s, v7.4s -; CHECK-NEXT: fmul v0.4s, v2.4s, v0.4s -; CHECK-NEXT: fmul v1.4s, v3.4s, v1.4s -; CHECK-NEXT: bfcvtn v2.4h, v4.4s -; CHECK-NEXT: bfcvtn v3.4h, v5.4s -; CHECK-NEXT: bfcvtn2 v2.8h, v0.4s -; CHECK-NEXT: bfcvtn2 v3.8h, v1.4s -; CHECK-NEXT: ldp q0, q1, [x2] -; CHECK-NEXT: shll v4.4s, v0.4h, #16 -; CHECK-NEXT: shll v5.4s, v2.4h, #16 -; CHECK-NEXT: shll v6.4s, v1.4h, #16 -; CHECK-NEXT: shll v7.4s, v3.4h, #16 -; CHECK-NEXT: shll2 v0.4s, v0.8h, #16 -; CHECK-NEXT: shll2 v2.4s, v2.8h, #16 -; CHECK-NEXT: shll2 v1.4s, v1.8h, #16 -; CHECK-NEXT: shll2 v3.4s, v3.8h, #16 -; CHECK-NEXT: fadd v4.4s, v5.4s, v4.4s -; CHECK-NEXT: fadd v5.4s, v7.4s, v6.4s -; CHECK-NEXT: fadd v0.4s, v2.4s, v0.4s -; CHECK-NEXT: fadd v1.4s, v3.4s, v1.4s -; CHECK-NEXT: bfcvtn v2.4h, v4.4s -; CHECK-NEXT: bfcvtn v3.4h, v5.4s -; CHECK-NEXT: bfcvtn2 v2.8h, v0.4s -; CHECK-NEXT: bfcvtn2 v3.8h, v1.4s -; CHECK-NEXT: stp q2, q3, [x2] +; CHECK-NEXT: bfmul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: bfmul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: bfadd z0.h, p0/m, z0.h, z4.h +; CHECK-NEXT: bfadd z1.h, p0/m, z1.h, z5.h +; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret %ld.a = load <16 x bfloat>, ptr %a %ld.b = load <16 x bfloat>, ptr %b @@ -251,43 +211,16 @@ define void @fmls_indexed_bf16_256b(ptr %a, ptr %b, ptr %c) #0 { ; CHECK-LABEL: fmls_indexed_bf16_256b: ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: ldp q4, q5, [x2] ; CHECK-NEXT: dup v0.8h, v0.h[2] ; CHECK-NEXT: dup v1.8h, v1.h[2] -; CHECK-NEXT: shll v4.4s, v2.4h, #16 -; CHECK-NEXT: shll v6.4s, v3.4h, #16 -; CHECK-NEXT: shll2 v2.4s, v2.8h, #16 -; CHECK-NEXT: shll2 v3.4s, v3.8h, #16 -; CHECK-NEXT: shll v5.4s, v0.4h, #16 -; CHECK-NEXT: shll v7.4s, v1.4h, #16 -; CHECK-NEXT: shll2 v0.4s, v0.8h, #16 -; CHECK-NEXT: shll2 v1.4s, v1.8h, #16 -; CHECK-NEXT: fmul v4.4s, v4.4s, v5.4s -; CHECK-NEXT: fmul v5.4s, v6.4s, v7.4s -; CHECK-NEXT: fmul v0.4s, v2.4s, v0.4s -; CHECK-NEXT: fmul v1.4s, v3.4s, v1.4s -; CHECK-NEXT: bfcvtn v2.4h, v4.4s -; CHECK-NEXT: bfcvtn v3.4h, v5.4s -; CHECK-NEXT: bfcvtn2 v2.8h, v0.4s -; CHECK-NEXT: bfcvtn2 v3.8h, v1.4s -; CHECK-NEXT: ldp q0, q1, [x2] -; CHECK-NEXT: shll v4.4s, v0.4h, #16 -; CHECK-NEXT: shll v5.4s, v2.4h, #16 -; CHECK-NEXT: shll v6.4s, v1.4h, #16 -; CHECK-NEXT: shll v7.4s, v3.4h, #16 -; CHECK-NEXT: shll2 v0.4s, v0.8h, #16 -; CHECK-NEXT: shll2 v2.4s, v2.8h, #16 -; CHECK-NEXT: shll2 v1.4s, v1.8h, #16 -; CHECK-NEXT: shll2 v3.4s, v3.8h, #16 -; CHECK-NEXT: fsub v4.4s, v4.4s, v5.4s -; CHECK-NEXT: fsub v5.4s, v6.4s, v7.4s -; CHECK-NEXT: fsub v0.4s, v0.4s, v2.4s -; CHECK-NEXT: fsub v1.4s, v1.4s, v3.4s -; CHECK-NEXT: bfcvtn v2.4h, v4.4s -; CHECK-NEXT: bfcvtn v3.4h, v5.4s -; CHECK-NEXT: bfcvtn2 v2.8h, v0.4s -; CHECK-NEXT: bfcvtn2 v3.8h, v1.4s -; CHECK-NEXT: stp q2, q3, [x2] +; CHECK-NEXT: bfmul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: bfmul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: bfsub z0.h, p0/m, z0.h, z4.h +; CHECK-NEXT: bfsub z1.h, p0/m, z1.h, z5.h +; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret %ld.a = load <16 x bfloat>, ptr %a %ld.b = load <16 x bfloat>, ptr %b diff --git a/llvm/test/CodeGen/AArch64/sve-masked-compressstore-sve2p2.ll b/llvm/test/CodeGen/AArch64/sve-masked-compressstore-sve2p2.ll new file mode 100644 index 0000000000000..92ecc3c83e2c5 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-masked-compressstore-sve2p2.ll @@ -0,0 +1,17 @@ +; RUN: llc -mtriple=aarch64 -mattr=+sve2p2 < %s + +;; These masked.compressstore operations could be natively supported with +sve2p2 +;; (or by promoting to 32/64 bit elements + a truncstore), but currently are not +;; supported. + +; XFAIL: * + +define void @test_compressstore_nxv8i16(ptr %p, %vec, %mask) { + tail call void @llvm.masked.compressstore.nxv8i16( %vec, ptr align 2 %p, %mask) + ret void +} + +define void @test_compressstore_nxv16i8(ptr %p, %vec, %mask) { + tail call void @llvm.masked.compressstore.nxv16i8( %vec, ptr align 1 %p, %mask) + ret void +} diff --git a/llvm/test/CodeGen/AArch64/sve-masked-compressstore.ll b/llvm/test/CodeGen/AArch64/sve-masked-compressstore.ll new file mode 100644 index 0000000000000..c698658afc8c4 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-masked-compressstore.ll @@ -0,0 +1,280 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple=aarch64 -mattr=+sve < %s | FileCheck %s --check-prefixes=CHECK,CHECK-BASE +; RUN: llc -mtriple=aarch64 -aarch64-sve-vector-bits-min=256 -mattr=+sve < %s | FileCheck %s --check-prefixes=CHECK,CHECK-VL256 + +;; Full SVE vectors (supported with +sve) + +define void @test_compressstore_nxv4i32(ptr %p, %vec, %mask) { +; CHECK-LABEL: test_compressstore_nxv4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: cntp x8, p0, p0.s +; CHECK-NEXT: compact z0.s, p0, z0.s +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + tail call void @llvm.masked.compressstore.nxv4i32( %vec, ptr align 4 %p, %mask) + ret void +} + +define void @test_compressstore_nxv2i64(ptr %p, %vec, %mask) { +; CHECK-LABEL: test_compressstore_nxv2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: cntp x8, p0, p0.d +; CHECK-NEXT: compact z0.d, p0, z0.d +; CHECK-NEXT: whilelo p0.d, xzr, x8 +; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + tail call void @llvm.masked.compressstore.nxv2i64( %vec, ptr align 8 %p, %mask) + ret void +} + +define void @test_compressstore_nxv4f32(ptr %p, %vec, %mask) { +; CHECK-LABEL: test_compressstore_nxv4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: cntp x8, p0, p0.s +; CHECK-NEXT: compact z0.s, p0, z0.s +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + tail call void @llvm.masked.compressstore.nxv4f32( %vec, ptr align 4 %p, %mask) + ret void +} + +define void @test_compressstore_nxv2f64(ptr %p, %vec, %mask) { +; CHECK-LABEL: test_compressstore_nxv2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: cntp x8, p0, p0.d +; CHECK-NEXT: compact z0.d, p0, z0.d +; CHECK-NEXT: whilelo p0.d, xzr, x8 +; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + tail call void @llvm.masked.compressstore.nxv2f64( %vec, ptr align 8 %p, %mask) + ret void +} + +;; SVE vectors that will be split + +define void @test_compressstore_nxv8i32(ptr %p, %vec, %mask) { +; CHECK-LABEL: test_compressstore_nxv8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: cntp x8, p1, p1.s +; CHECK-NEXT: compact z1.s, p1, z1.s +; CHECK-NEXT: cntp x9, p0, p0.s +; CHECK-NEXT: compact z0.s, p0, z0.s +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: whilelo p1.s, xzr, x9 +; CHECK-NEXT: st1w { z1.s }, p0, [x0, x9, lsl #2] +; CHECK-NEXT: st1w { z0.s }, p1, [x0] +; CHECK-NEXT: ret + tail call void @llvm.masked.compressstore.nxv8i32( %vec, ptr align 4 %p, %mask) + ret void +} + +;; Unpacked SVE vector types + +define void @test_compressstore_nxv2f32(ptr %p, %vec, %mask) { +; CHECK-LABEL: test_compressstore_nxv2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: cntp x8, p0, p0.d +; CHECK-NEXT: compact z0.d, p0, z0.d +; CHECK-NEXT: whilelo p0.d, xzr, x8 +; CHECK-NEXT: st1w { z0.d }, p0, [x0] +; CHECK-NEXT: ret + tail call void @llvm.masked.compressstore.nxv2f32( %vec, ptr align 4 %p, %mask) + ret void +} + +;; SVE vector types promoted to 32/64-bit (non-exhaustive) + +define void @test_compressstore_nxv2i8(ptr %p, %vec, %mask) { +; CHECK-LABEL: test_compressstore_nxv2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: cntp x8, p0, p0.d +; CHECK-NEXT: compact z0.d, p0, z0.d +; CHECK-NEXT: whilelo p0.d, xzr, x8 +; CHECK-NEXT: st1b { z0.d }, p0, [x0] +; CHECK-NEXT: ret + tail call void @llvm.masked.compressstore.nxv2i8( %vec, ptr align 1 %p, %mask) + ret void +} + +define void @test_compressstore_nxv4i16(ptr %p, %vec, %mask) { +; CHECK-LABEL: test_compressstore_nxv4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: cntp x8, p0, p0.s +; CHECK-NEXT: compact z0.s, p0, z0.s +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: st1h { z0.s }, p0, [x0] +; CHECK-NEXT: ret + tail call void @llvm.masked.compressstore.nxv4i16( %vec, ptr align 2 %p, %mask) + ret void +} + +;; NEON vector types (promoted to SVE) + +define void @test_compressstore_v2f64(ptr %p, <2 x double> %vec, <2 x i1> %mask) { +; CHECK-LABEL: test_compressstore_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ushll v1.2d, v1.2s, #0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: shl v1.2d, v1.2d, #63 +; CHECK-NEXT: cmpne p0.d, p0/z, z1.d, #0 +; CHECK-NEXT: cntp x8, p0, p0.d +; CHECK-NEXT: compact z0.d, p0, z0.d +; CHECK-NEXT: whilelo p0.d, xzr, x8 +; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + tail call void @llvm.masked.compressstore.v2f64(<2 x double> %vec, ptr align 8 %p, <2 x i1> %mask) + ret void +} + +define void @test_compressstore_v4i32(ptr %p, <4 x i32> %vec, <4 x i1> %mask) { +; CHECK-LABEL: test_compressstore_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ushll v1.4s, v1.4h, #0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: shl v1.4s, v1.4s, #31 +; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, #0 +; CHECK-NEXT: cntp x8, p0, p0.s +; CHECK-NEXT: compact z0.s, p0, z0.s +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + tail call void @llvm.masked.compressstore.v4i32(<4 x i32> %vec, ptr align 4 %p, <4 x i1> %mask) + ret void +} + +define void @test_compressstore_v2i64(ptr %p, <2 x i64> %vec, <2 x i1> %mask) { +; CHECK-LABEL: test_compressstore_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ushll v1.2d, v1.2s, #0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: shl v1.2d, v1.2d, #63 +; CHECK-NEXT: cmpne p0.d, p0/z, z1.d, #0 +; CHECK-NEXT: cntp x8, p0, p0.d +; CHECK-NEXT: compact z0.d, p0, z0.d +; CHECK-NEXT: whilelo p0.d, xzr, x8 +; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + tail call void @llvm.masked.compressstore.v2i64(<2 x i64> %vec, ptr align 8 %p, <2 x i1> %mask) + ret void +} + +define void @test_compressstore_v8i32(ptr %p, <8 x i32> %vec, <8 x i1> %mask) { +; CHECK-BASE-LABEL: test_compressstore_v8i32: +; CHECK-BASE: // %bb.0: +; CHECK-BASE-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-BASE-NEXT: zip1 v3.8b, v2.8b, v0.8b +; CHECK-BASE-NEXT: adrp x8, .LCPI11_0 +; CHECK-BASE-NEXT: zip2 v2.8b, v2.8b, v0.8b +; CHECK-BASE-NEXT: ldr d5, [x8, :lo12:.LCPI11_0] +; CHECK-BASE-NEXT: ptrue p0.s +; CHECK-BASE-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-BASE-NEXT: ptrue p1.s, vl4 +; CHECK-BASE-NEXT: shl v4.4h, v3.4h, #15 +; CHECK-BASE-NEXT: ushll v2.4s, v2.4h, #0 +; CHECK-BASE-NEXT: ushll v3.4s, v3.4h, #0 +; CHECK-BASE-NEXT: cmlt v4.4h, v4.4h, #0 +; CHECK-BASE-NEXT: shl v2.4s, v2.4s, #31 +; CHECK-BASE-NEXT: shl v3.4s, v3.4s, #31 +; CHECK-BASE-NEXT: and v4.8b, v4.8b, v5.8b +; CHECK-BASE-NEXT: addv h4, v4.4h +; CHECK-BASE-NEXT: fmov w8, s4 +; CHECK-BASE-NEXT: and w8, w8, #0xf +; CHECK-BASE-NEXT: fmov s4, w8 +; CHECK-BASE-NEXT: cnt z4.s, p0/m, z4.s +; CHECK-BASE-NEXT: cmpne p0.s, p1/z, z2.s, #0 +; CHECK-BASE-NEXT: cmpne p1.s, p1/z, z3.s, #0 +; CHECK-BASE-NEXT: cntp x8, p0, p0.s +; CHECK-BASE-NEXT: compact z1.s, p0, z1.s +; CHECK-BASE-NEXT: compact z0.s, p1, z0.s +; CHECK-BASE-NEXT: cntp x9, p1, p1.s +; CHECK-BASE-NEXT: fmov w10, s4 +; CHECK-BASE-NEXT: whilelo p0.s, xzr, x8 +; CHECK-BASE-NEXT: whilelo p1.s, xzr, x9 +; CHECK-BASE-NEXT: st1w { z1.s }, p0, [x0, x10, lsl #2] +; CHECK-BASE-NEXT: st1w { z0.s }, p1, [x0] +; CHECK-BASE-NEXT: ret +; +; CHECK-VL256-LABEL: test_compressstore_v8i32: +; CHECK-VL256: // %bb.0: +; CHECK-VL256-NEXT: // kill: def $d2 killed $d2 def $z2 +; CHECK-VL256-NEXT: ptrue p0.s, vl8 +; CHECK-VL256-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-VL256-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-VL256-NEXT: uunpklo z2.h, z2.b +; CHECK-VL256-NEXT: ptrue p1.s, vl4 +; CHECK-VL256-NEXT: splice z0.s, p1, z0.s, z1.s +; CHECK-VL256-NEXT: uunpklo z2.s, z2.h +; CHECK-VL256-NEXT: lsl z2.s, z2.s, #31 +; CHECK-VL256-NEXT: asr z2.s, z2.s, #31 +; CHECK-VL256-NEXT: cmpne p0.s, p0/z, z2.s, #0 +; CHECK-VL256-NEXT: cntp x8, p0, p0.s +; CHECK-VL256-NEXT: compact z0.s, p0, z0.s +; CHECK-VL256-NEXT: whilelo p0.s, xzr, x8 +; CHECK-VL256-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-VL256-NEXT: ret + tail call void @llvm.masked.compressstore.v8i32(<8 x i32> %vec, ptr align 4 %p, <8 x i1> %mask) + ret void +} + +define void @test_compressstore_v4i64(ptr %p, <4 x i64> %vec, <4 x i1> %mask) { +; CHECK-BASE-LABEL: test_compressstore_v4i64: +; CHECK-BASE: // %bb.0: +; CHECK-BASE-NEXT: ushll v2.4s, v2.4h, #0 +; CHECK-BASE-NEXT: index z4.s, #1, #1 +; CHECK-BASE-NEXT: ptrue p0.s +; CHECK-BASE-NEXT: ptrue p1.d, vl2 +; CHECK-BASE-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-BASE-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-BASE-NEXT: shl v3.2s, v2.2s, #31 +; CHECK-BASE-NEXT: cmlt v3.2s, v3.2s, #0 +; CHECK-BASE-NEXT: and v3.8b, v3.8b, v4.8b +; CHECK-BASE-NEXT: ushll2 v4.2d, v2.4s, #0 +; CHECK-BASE-NEXT: ushll v2.2d, v2.2s, #0 +; CHECK-BASE-NEXT: addp v3.2s, v3.2s, v3.2s +; CHECK-BASE-NEXT: shl v2.2d, v2.2d, #63 +; CHECK-BASE-NEXT: fmov w8, s3 +; CHECK-BASE-NEXT: shl v3.2d, v4.2d, #63 +; CHECK-BASE-NEXT: and w8, w8, #0x3 +; CHECK-BASE-NEXT: fmov s4, w8 +; CHECK-BASE-NEXT: cnt z4.s, p0/m, z4.s +; CHECK-BASE-NEXT: cmpne p0.d, p1/z, z3.d, #0 +; CHECK-BASE-NEXT: cmpne p1.d, p1/z, z2.d, #0 +; CHECK-BASE-NEXT: cntp x8, p0, p0.d +; CHECK-BASE-NEXT: compact z1.d, p0, z1.d +; CHECK-BASE-NEXT: compact z0.d, p1, z0.d +; CHECK-BASE-NEXT: cntp x9, p1, p1.d +; CHECK-BASE-NEXT: fmov w10, s4 +; CHECK-BASE-NEXT: whilelo p0.d, xzr, x8 +; CHECK-BASE-NEXT: whilelo p1.d, xzr, x9 +; CHECK-BASE-NEXT: st1d { z1.d }, p0, [x0, x10, lsl #3] +; CHECK-BASE-NEXT: st1d { z0.d }, p1, [x0] +; CHECK-BASE-NEXT: ret +; +; CHECK-VL256-LABEL: test_compressstore_v4i64: +; CHECK-VL256: // %bb.0: +; CHECK-VL256-NEXT: // kill: def $d2 killed $d2 def $z2 +; CHECK-VL256-NEXT: ptrue p0.d, vl4 +; CHECK-VL256-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-VL256-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-VL256-NEXT: uunpklo z2.s, z2.h +; CHECK-VL256-NEXT: ptrue p1.d, vl2 +; CHECK-VL256-NEXT: splice z0.d, p1, z0.d, z1.d +; CHECK-VL256-NEXT: uunpklo z2.d, z2.s +; CHECK-VL256-NEXT: lsl z2.d, z2.d, #63 +; CHECK-VL256-NEXT: asr z2.d, z2.d, #63 +; CHECK-VL256-NEXT: cmpne p0.d, p0/z, z2.d, #0 +; CHECK-VL256-NEXT: cntp x8, p0, p0.d +; CHECK-VL256-NEXT: compact z0.d, p0, z0.d +; CHECK-VL256-NEXT: whilelo p0.d, xzr, x8 +; CHECK-VL256-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-VL256-NEXT: ret + tail call void @llvm.masked.compressstore.v4i64(<4 x i64> %vec, ptr align 8 %p, <4 x i1> %mask) + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll b/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll index ef7a13819a799..ebbeab94066d6 100644 --- a/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll +++ b/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll @@ -541,9 +541,10 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg ; GFX908-NEXT: s_lshr_b32 s2, s0, 16 ; GFX908-NEXT: v_cvt_f32_f16_e32 v19, s2 ; GFX908-NEXT: s_lshl_b64 s[6:7], s[4:5], 5 -; GFX908-NEXT: v_mov_b32_e32 v0, 0 ; GFX908-NEXT: s_lshl_b64 s[14:15], s[10:11], 5 +; GFX908-NEXT: v_mov_b32_e32 v0, 0 ; GFX908-NEXT: s_and_b64 s[0:1], exec, s[0:1] +; GFX908-NEXT: s_or_b32 s14, s14, 28 ; GFX908-NEXT: s_lshl_b64 s[16:17], s[8:9], 5 ; GFX908-NEXT: v_mov_b32_e32 v1, 0 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -609,13 +610,13 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg ; GFX908-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX908-NEXT: s_add_u32 s22, s20, s9 ; GFX908-NEXT: s_addc_u32 s23, s21, s13 -; GFX908-NEXT: global_load_dword v21, v17, s[22:23] offset:16 glc +; GFX908-NEXT: global_load_dword v21, v17, s[22:23] offset:-12 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: global_load_dword v20, v17, s[22:23] offset:20 glc +; GFX908-NEXT: global_load_dword v20, v17, s[22:23] offset:-8 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: global_load_dword v12, v17, s[22:23] offset:24 glc +; GFX908-NEXT: global_load_dword v12, v17, s[22:23] offset:-4 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: global_load_dword v12, v17, s[22:23] offset:28 glc +; GFX908-NEXT: global_load_dword v12, v17, s[22:23] glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: ds_read_b64 v[12:13], v17 ; GFX908-NEXT: ds_read_b64 v[14:15], v0 @@ -709,6 +710,7 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg ; GFX90A-NEXT: s_lshl_b64 s[6:7], s[4:5], 5 ; GFX90A-NEXT: s_lshl_b64 s[14:15], s[10:11], 5 ; GFX90A-NEXT: s_and_b64 s[0:1], exec, s[0:1] +; GFX90A-NEXT: s_or_b32 s14, s14, 28 ; GFX90A-NEXT: s_lshl_b64 s[16:17], s[8:9], 5 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_readfirstlane_b32 s2, v18 @@ -769,13 +771,13 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX90A-NEXT: s_add_u32 s22, s20, s9 ; GFX90A-NEXT: s_addc_u32 s23, s21, s13 -; GFX90A-NEXT: global_load_dword v21, v19, s[22:23] offset:16 glc +; GFX90A-NEXT: global_load_dword v21, v19, s[22:23] offset:-12 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: global_load_dword v20, v19, s[22:23] offset:20 glc +; GFX90A-NEXT: global_load_dword v20, v19, s[22:23] offset:-8 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: global_load_dword v14, v19, s[22:23] offset:24 glc +; GFX90A-NEXT: global_load_dword v14, v19, s[22:23] offset:-4 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: global_load_dword v14, v19, s[22:23] offset:28 glc +; GFX90A-NEXT: global_load_dword v14, v19, s[22:23] glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: ds_read_b64 v[14:15], v19 ; GFX90A-NEXT: ds_read_b64 v[16:17], v0 diff --git a/llvm/test/CodeGen/AMDGPU/dagcombine-reassociate-bug.ll b/llvm/test/CodeGen/AMDGPU/dagcombine-reassociate-bug.ll index 1b447571efaf2..af1c64321222b 100644 --- a/llvm/test/CodeGen/AMDGPU/dagcombine-reassociate-bug.ll +++ b/llvm/test/CodeGen/AMDGPU/dagcombine-reassociate-bug.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=amdgcn -amdgpu-scalar-ir-passes=false < %s | FileCheck %s +; RUN: llc -mtriple=amdgcn < %s | FileCheck %s ; Test for a bug where DAGCombiner::ReassociateOps() was creating adds ; with offset in the first operand and base pointers in the second. diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines-gfx1200.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines-gfx1200.ll index 7d1dfae5e46b2..62c62c9159bac 100644 --- a/llvm/test/CodeGen/AMDGPU/fneg-combines-gfx1200.ll +++ b/llvm/test/CodeGen/AMDGPU/fneg-combines-gfx1200.ll @@ -1,5 +1,5 @@ -; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -start-before=amdgpu-unify-divergent-exit-nodes -< %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-SDAG %s -; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -start-before=amdgpu-unify-divergent-exit-nodes -< %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-GISEL %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -start-before=amdgpu-unify-divergent-exit-nodes < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-SDAG %s +; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -start-before=amdgpu-unify-divergent-exit-nodes < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-GISEL %s ; -------------------------------------------------------------------------------- ; fminimum tests diff --git a/llvm/test/CodeGen/AMDGPU/global-address.ll b/llvm/test/CodeGen/AMDGPU/global-address.ll index f3db48edbb6bb..c7d0ee98e0392 100644 --- a/llvm/test/CodeGen/AMDGPU/global-address.ll +++ b/llvm/test/CodeGen/AMDGPU/global-address.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 -; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -< %s | FileCheck -check-prefix=GFX11-PAL-SDAG %s -; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -< %s | FileCheck -check-prefix=GFX11-PAL-GISEL %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11-PAL-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11-PAL-GISEL %s ; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250-PAL %s ; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250-PAL %s -; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -< %s | FileCheck -check-prefix=GFX11-HSA %s -; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -< %s | FileCheck -check-prefix=GFX11-HSA %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11-HSA %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11-HSA %s ; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250-HSA %s ; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250-HSA %s diff --git a/llvm/test/CodeGen/AMDGPU/idot2.ll b/llvm/test/CodeGen/AMDGPU/idot2.ll index bf65657ff841c..22907ca28c47f 100644 --- a/llvm/test/CodeGen/AMDGPU/idot2.ll +++ b/llvm/test/CodeGen/AMDGPU/idot2.ll @@ -2396,7 +2396,7 @@ define amdgpu_kernel void @udot2_MultipleUses_mul2(ptr addrspace(1) %src1, ; GFX9-NODL-NEXT: v_mul_u32_u24_e32 v4, v2, v1 ; GFX9-NODL-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NODL-NEXT: v_mad_u32_u24 v1, v2, v1, s0 -; GFX9-NODL-NEXT: v_add3_u32 v1, v1, v4, v3 +; GFX9-NODL-NEXT: v_add3_u32 v1, v4, v1, v3 ; GFX9-NODL-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NODL-NEXT: s_endpgm ; @@ -2417,7 +2417,7 @@ define amdgpu_kernel void @udot2_MultipleUses_mul2(ptr addrspace(1) %src1, ; GFX9-DL-NEXT: v_mul_u32_u24_e32 v4, v2, v1 ; GFX9-DL-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-DL-NEXT: v_mad_u32_u24 v1, v2, v1, s0 -; GFX9-DL-NEXT: v_add3_u32 v1, v1, v4, v3 +; GFX9-DL-NEXT: v_add3_u32 v1, v4, v1, v3 ; GFX9-DL-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-DL-NEXT: s_endpgm ; @@ -2442,7 +2442,7 @@ define amdgpu_kernel void @udot2_MultipleUses_mul2(ptr addrspace(1) %src1, ; GFX10-DL-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-DL-NEXT: v_mad_u32_u24 v0, v3, v0, s0 ; GFX10-DL-NEXT: v_mov_b32_e32 v3, 0 -; GFX10-DL-NEXT: v_add3_u32 v0, v0, v2, v1 +; GFX10-DL-NEXT: v_add3_u32 v0, v2, v0, v1 ; GFX10-DL-NEXT: global_store_dword v3, v0, s[6:7] ; GFX10-DL-NEXT: s_endpgm ptr addrspace(1) %src2, @@ -2553,7 +2553,7 @@ define amdgpu_kernel void @idot2_MultipleUses_mul2(ptr addrspace(1) %src1, ; GFX9-NODL-NEXT: v_mul_i32_i24_e32 v4, v2, v1 ; GFX9-NODL-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NODL-NEXT: v_mad_i32_i24 v1, v2, v1, s0 -; GFX9-NODL-NEXT: v_add3_u32 v1, v1, v4, v3 +; GFX9-NODL-NEXT: v_add3_u32 v1, v4, v1, v3 ; GFX9-NODL-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NODL-NEXT: s_endpgm ; @@ -2574,7 +2574,7 @@ define amdgpu_kernel void @idot2_MultipleUses_mul2(ptr addrspace(1) %src1, ; GFX9-DL-NEXT: v_mul_i32_i24_e32 v4, v2, v1 ; GFX9-DL-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-DL-NEXT: v_mad_i32_i24 v1, v2, v1, s0 -; GFX9-DL-NEXT: v_add3_u32 v1, v1, v4, v3 +; GFX9-DL-NEXT: v_add3_u32 v1, v4, v1, v3 ; GFX9-DL-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-DL-NEXT: s_endpgm ; @@ -2599,7 +2599,7 @@ define amdgpu_kernel void @idot2_MultipleUses_mul2(ptr addrspace(1) %src1, ; GFX10-DL-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-DL-NEXT: v_mad_i32_i24 v0, v3, v0, s0 ; GFX10-DL-NEXT: v_mov_b32_e32 v3, 0 -; GFX10-DL-NEXT: v_add3_u32 v0, v0, v2, v1 +; GFX10-DL-NEXT: v_add3_u32 v0, v2, v0, v1 ; GFX10-DL-NEXT: global_store_dword v3, v0, s[6:7] ; GFX10-DL-NEXT: s_endpgm ptr addrspace(1) %src2, diff --git a/llvm/test/CodeGen/AMDGPU/idot4s.ll b/llvm/test/CodeGen/AMDGPU/idot4s.ll index 1a22fa805a5a7..fd1f7b000472a 100644 --- a/llvm/test/CodeGen/AMDGPU/idot4s.ll +++ b/llvm/test/CodeGen/AMDGPU/idot4s.ll @@ -3268,19 +3268,19 @@ define amdgpu_kernel void @idot4_nonstandard_signed(ptr addrspace(1) %src1, ; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 ; GFX7-NEXT: s_mov_b32 s2, -1 ; GFX7-NEXT: s_waitcnt vmcnt(1) -; GFX7-NEXT: v_bfe_i32 v3, v2, 8, 8 ; GFX7-NEXT: v_bfe_i32 v1, v2, 0, 8 +; GFX7-NEXT: v_bfe_i32 v3, v2, 8, 8 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v0 +; GFX7-NEXT: v_bfe_i32 v4, v2, 16, 8 ; GFX7-NEXT: v_bfe_u32 v6, v0, 8, 8 ; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; GFX7-NEXT: v_bfe_i32 v4, v2, 16, 8 -; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v0 -; GFX7-NEXT: v_mul_u32_u24_e32 v3, v6, v3 +; GFX7-NEXT: v_mul_u32_u24_e32 v1, v1, v5 ; GFX7-NEXT: v_ashrrev_i32_e32 v2, 24, v2 ; GFX7-NEXT: v_bfe_u32 v7, v0, 16, 8 ; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; GFX7-NEXT: v_mad_u32_u24 v1, v1, v5, v3 +; GFX7-NEXT: v_mad_u32_u24 v1, v6, v3, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX7-NEXT: v_mad_u32_u24 v1, v7, v4, v1 ; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2 @@ -3307,18 +3307,18 @@ define amdgpu_kernel void @idot4_nonstandard_signed(ptr addrspace(1) %src1, ; GFX8-NEXT: v_mov_b32_e32 v0, s4 ; GFX8-NEXT: v_mov_b32_e32 v1, s5 ; GFX8-NEXT: s_waitcnt vmcnt(1) -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 8, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 8, v3 ; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v3 -; GFX8-NEXT: v_bfe_i32 v6, v3, 0, 8 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v3 +; GFX8-NEXT: v_bfe_i32 v7, v7, 0, 8 ; GFX8-NEXT: v_bfe_i32 v5, v5, 0, 8 -; GFX8-NEXT: v_bfe_i32 v3, v3, 0, 8 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v9, 8, v2 -; GFX8-NEXT: v_and_b32_e32 v7, 0xff, v2 -; GFX8-NEXT: v_mul_lo_u16_sdwa v8, v9, sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0 +; GFX8-NEXT: v_lshrrev_b32_e32 v8, 8, v2 +; GFX8-NEXT: v_mul_lo_u16_sdwa v6, sext(v3), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0 +; GFX8-NEXT: v_and_b32_e32 v8, 0xff, v8 ; GFX8-NEXT: v_and_b32_sdwa v4, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_mad_u16 v6, v6, v7, v8 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v3 +; GFX8-NEXT: v_mad_u16 v6, v8, v7, v6 +; GFX8-NEXT: v_bfe_i32 v3, v3, 0, 8 ; GFX8-NEXT: v_mad_u16 v4, v4, v5, v6 ; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v2 ; GFX8-NEXT: v_mad_u16 v2, v3, v2, v4 @@ -3337,19 +3337,19 @@ define amdgpu_kernel void @idot4_nonstandard_signed(ptr addrspace(1) %src1, ; GFX9-NODL-NEXT: s_movk_i32 s0, 0xff ; GFX9-NODL-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NODL-NEXT: s_waitcnt vmcnt(1) -; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v6, 8, v1 +; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v5, 8, v1 ; GFX9-NODL-NEXT: s_waitcnt vmcnt(0) -; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v7, 8, v2 +; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v6, 8, v2 ; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v3, 16, v1 -; GFX9-NODL-NEXT: v_bfe_i32 v4, v1, 0, 8 -; GFX9-NODL-NEXT: v_and_b32_e32 v5, 0xff, v2 -; GFX9-NODL-NEXT: v_mul_lo_u16_sdwa v6, v7, sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0 -; GFX9-NODL-NEXT: v_and_b32_sdwa v8, v2, s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NODL-NEXT: v_mul_lo_u16_sdwa v4, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0 +; GFX9-NODL-NEXT: v_bfe_i32 v5, v5, 0, 8 +; GFX9-NODL-NEXT: v_and_b32_e32 v6, 0xff, v6 +; GFX9-NODL-NEXT: v_and_b32_sdwa v7, v2, s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v1, 24, v1 ; GFX9-NODL-NEXT: v_bfe_i32 v3, v3, 0, 8 -; GFX9-NODL-NEXT: v_mad_legacy_u16 v4, v4, v5, v6 +; GFX9-NODL-NEXT: v_mad_legacy_u16 v4, v6, v5, v4 ; GFX9-NODL-NEXT: v_bfe_i32 v1, v1, 0, 8 -; GFX9-NODL-NEXT: v_mad_legacy_u16 v3, v8, v3, v4 +; GFX9-NODL-NEXT: v_mad_legacy_u16 v3, v7, v3, v4 ; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v2, 24, v2 ; GFX9-NODL-NEXT: v_mad_legacy_u16 v1, v1, v2, v3 ; GFX9-NODL-NEXT: v_bfe_i32 v1, v1, 0, 16 @@ -3367,19 +3367,19 @@ define amdgpu_kernel void @idot4_nonstandard_signed(ptr addrspace(1) %src1, ; GFX9-DL-NEXT: s_movk_i32 s0, 0xff ; GFX9-DL-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-DL-NEXT: s_waitcnt vmcnt(1) -; GFX9-DL-NEXT: v_lshrrev_b32_e32 v6, 8, v1 +; GFX9-DL-NEXT: v_lshrrev_b32_e32 v5, 8, v1 ; GFX9-DL-NEXT: s_waitcnt vmcnt(0) -; GFX9-DL-NEXT: v_lshrrev_b32_e32 v7, 8, v2 +; GFX9-DL-NEXT: v_lshrrev_b32_e32 v6, 8, v2 ; GFX9-DL-NEXT: v_lshrrev_b32_e32 v3, 16, v1 -; GFX9-DL-NEXT: v_bfe_i32 v4, v1, 0, 8 -; GFX9-DL-NEXT: v_and_b32_e32 v5, 0xff, v2 -; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v6, v7, sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0 -; GFX9-DL-NEXT: v_and_b32_sdwa v8, v2, s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v4, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0 +; GFX9-DL-NEXT: v_bfe_i32 v5, v5, 0, 8 +; GFX9-DL-NEXT: v_and_b32_e32 v6, 0xff, v6 +; GFX9-DL-NEXT: v_and_b32_sdwa v7, v2, s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; GFX9-DL-NEXT: v_lshrrev_b32_e32 v1, 24, v1 ; GFX9-DL-NEXT: v_bfe_i32 v3, v3, 0, 8 -; GFX9-DL-NEXT: v_mad_legacy_u16 v4, v4, v5, v6 +; GFX9-DL-NEXT: v_mad_legacy_u16 v4, v6, v5, v4 ; GFX9-DL-NEXT: v_bfe_i32 v1, v1, 0, 8 -; GFX9-DL-NEXT: v_mad_legacy_u16 v3, v8, v3, v4 +; GFX9-DL-NEXT: v_mad_legacy_u16 v3, v7, v3, v4 ; GFX9-DL-NEXT: v_lshrrev_b32_e32 v2, 24, v2 ; GFX9-DL-NEXT: v_mad_legacy_u16 v1, v1, v2, v3 ; GFX9-DL-NEXT: v_bfe_i32 v1, v1, 0, 16 @@ -3392,28 +3392,28 @@ define amdgpu_kernel void @idot4_nonstandard_signed(ptr addrspace(1) %src1, ; GFX10-DL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX10-DL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34 ; GFX10-DL-NEXT: v_lshlrev_b32_e32 v0, 2, v0 -; GFX10-DL-NEXT: v_mov_b32_e32 v4, 0xff +; GFX10-DL-NEXT: v_mov_b32_e32 v6, 0xff ; GFX10-DL-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-DL-NEXT: s_clause 0x1 ; GFX10-DL-NEXT: global_load_dword v1, v0, s[0:1] ; GFX10-DL-NEXT: global_load_dword v2, v0, s[2:3] ; GFX10-DL-NEXT: s_waitcnt vmcnt(1) -; GFX10-DL-NEXT: v_lshrrev_b32_e32 v0, 8, v1 +; GFX10-DL-NEXT: v_bfe_i32 v0, v1, 0, 8 ; GFX10-DL-NEXT: s_waitcnt vmcnt(0) -; GFX10-DL-NEXT: v_lshrrev_b32_e32 v3, 8, v2 -; GFX10-DL-NEXT: v_lshrrev_b32_e32 v5, 16, v1 -; GFX10-DL-NEXT: v_bfe_i32 v6, v1, 0, 8 -; GFX10-DL-NEXT: v_and_b32_e32 v7, 0xff, v2 -; GFX10-DL-NEXT: v_bfe_i32 v0, v0, 0, 8 -; GFX10-DL-NEXT: v_and_b32_e32 v3, 0xff, v3 +; GFX10-DL-NEXT: v_and_b32_e32 v3, 0xff, v2 +; GFX10-DL-NEXT: v_lshrrev_b32_e32 v4, 8, v1 +; GFX10-DL-NEXT: v_lshrrev_b32_e32 v5, 8, v2 +; GFX10-DL-NEXT: v_lshrrev_b32_e32 v7, 16, v1 ; GFX10-DL-NEXT: v_lshrrev_b32_e32 v1, 24, v1 -; GFX10-DL-NEXT: v_mul_lo_u16 v0, v3, v0 -; GFX10-DL-NEXT: v_and_b32_sdwa v3, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-DL-NEXT: v_bfe_i32 v4, v5, 0, 8 +; GFX10-DL-NEXT: v_mul_lo_u16 v0, v0, v3 +; GFX10-DL-NEXT: v_bfe_i32 v3, v4, 0, 8 +; GFX10-DL-NEXT: v_and_b32_e32 v4, 0xff, v5 +; GFX10-DL-NEXT: v_and_b32_sdwa v5, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX10-DL-NEXT: v_bfe_i32 v6, v7, 0, 8 ; GFX10-DL-NEXT: v_bfe_i32 v1, v1, 0, 8 ; GFX10-DL-NEXT: v_lshrrev_b32_e32 v2, 24, v2 -; GFX10-DL-NEXT: v_mad_u16 v0, v6, v7, v0 -; GFX10-DL-NEXT: v_mad_u16 v0, v3, v4, v0 +; GFX10-DL-NEXT: v_mad_u16 v0, v4, v3, v0 +; GFX10-DL-NEXT: v_mad_u16 v0, v5, v6, v0 ; GFX10-DL-NEXT: v_mad_u16 v0, v1, v2, v0 ; GFX10-DL-NEXT: v_mov_b32_e32 v1, 0 ; GFX10-DL-NEXT: v_bfe_i32 v0, v0, 0, 16 @@ -3429,34 +3429,32 @@ define amdgpu_kernel void @idot4_nonstandard_signed(ptr addrspace(1) %src1, ; GFX11-DL-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; GFX11-DL-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-DL-TRUE16-NEXT: s_clause 0x1 -; GFX11-DL-TRUE16-NEXT: global_load_b32 v3, v0, s[0:1] -; GFX11-DL-TRUE16-NEXT: global_load_b32 v4, v0, s[2:3] +; GFX11-DL-TRUE16-NEXT: global_load_b32 v2, v0, s[0:1] +; GFX11-DL-TRUE16-NEXT: global_load_b32 v3, v0, s[2:3] ; GFX11-DL-TRUE16-NEXT: s_waitcnt vmcnt(1) -; GFX11-DL-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 8, v3 +; GFX11-DL-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 8, v2 +; GFX11-DL-TRUE16-NEXT: v_bfe_i32 v1, v2, 0, 8 ; GFX11-DL-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-DL-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v4 -; GFX11-DL-TRUE16-NEXT: v_bfe_i32 v5, v3, 0, 8 -; GFX11-DL-TRUE16-NEXT: v_mov_b16_e32 v6.l, v3.h -; GFX11-DL-TRUE16-NEXT: v_bfe_i32 v2, v0, 0, 8 -; GFX11-DL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11-DL-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v1.l -; GFX11-DL-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v4.l -; GFX11-DL-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l -; GFX11-DL-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.l +; GFX11-DL-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v3.l +; GFX11-DL-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 8, v3 +; GFX11-DL-TRUE16-NEXT: v_mov_b16_e32 v6.l, v2.h +; GFX11-DL-TRUE16-NEXT: v_bfe_i32 v4, v4, 0, 8 +; GFX11-DL-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v3.h +; GFX11-DL-TRUE16-NEXT: v_mul_lo_u16 v0.l, v1.l, v0.l +; GFX11-DL-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v5.l ; GFX11-DL-TRUE16-NEXT: v_bfe_i32 v5, v6, 0, 8 -; GFX11-DL-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 24, v3 -; GFX11-DL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-DL-TRUE16-NEXT: v_mul_lo_u16 v0.l, v0.l, v1.l -; GFX11-DL-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v4.h -; GFX11-DL-TRUE16-NEXT: v_mov_b16_e32 v3.l, v5.l -; GFX11-DL-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 24, v4 -; GFX11-DL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-DL-TRUE16-NEXT: v_mad_u16 v0.l, v2.l, v0.h, v0.l -; GFX11-DL-TRUE16-NEXT: v_bfe_i32 v2, v6, 0, 8 -; GFX11-DL-TRUE16-NEXT: v_mad_u16 v0.l, v1.l, v3.l, v0.l +; GFX11-DL-TRUE16-NEXT: v_mov_b16_e32 v1.l, v4.l +; GFX11-DL-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 24, v2 +; GFX11-DL-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 24, v3 +; GFX11-DL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-DL-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.l +; GFX11-DL-TRUE16-NEXT: v_mad_u16 v0.l, v0.h, v1.l, v0.l +; GFX11-DL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-DL-TRUE16-NEXT: v_bfe_i32 v4, v4, 0, 8 +; GFX11-DL-TRUE16-NEXT: v_mad_u16 v0.l, v1.h, v2.l, v0.l ; GFX11-DL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-DL-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l -; GFX11-DL-TRUE16-NEXT: v_mad_u16 v0.l, v1.l, v4.l, v0.l +; GFX11-DL-TRUE16-NEXT: v_mov_b16_e32 v1.l, v4.l +; GFX11-DL-TRUE16-NEXT: v_mad_u16 v0.l, v1.l, v3.l, v0.l ; GFX11-DL-TRUE16-NEXT: v_mov_b32_e32 v1, 0 ; GFX11-DL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-DL-TRUE16-NEXT: v_bfe_i32 v0, v0, 0, 16 @@ -3475,25 +3473,24 @@ define amdgpu_kernel void @idot4_nonstandard_signed(ptr addrspace(1) %src1, ; GFX11-DL-FAKE16-NEXT: global_load_b32 v1, v0, s[0:1] ; GFX11-DL-FAKE16-NEXT: global_load_b32 v0, v0, s[2:3] ; GFX11-DL-FAKE16-NEXT: s_waitcnt vmcnt(1) -; GFX11-DL-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 8, v1 +; GFX11-DL-FAKE16-NEXT: v_bfe_i32 v2, v1, 0, 8 ; GFX11-DL-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-DL-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX11-DL-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v1 -; GFX11-DL-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX11-DL-FAKE16-NEXT: v_bfe_i32 v6, v1, 0, 8 -; GFX11-DL-FAKE16-NEXT: v_bfe_i32 v2, v2, 0, 8 -; GFX11-DL-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3 -; GFX11-DL-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v0 -; GFX11-DL-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 24, v1 -; GFX11-DL-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 24, v0 -; GFX11-DL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4) -; GFX11-DL-FAKE16-NEXT: v_mul_lo_u16 v2, v3, v2 +; GFX11-DL-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v0 +; GFX11-DL-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 8, v1 +; GFX11-DL-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 8, v0 +; GFX11-DL-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1 +; GFX11-DL-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 +; GFX11-DL-FAKE16-NEXT: v_mul_lo_u16 v2, v2, v3 ; GFX11-DL-FAKE16-NEXT: v_bfe_i32 v3, v4, 0, 8 ; GFX11-DL-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v5 -; GFX11-DL-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8 -; GFX11-DL-FAKE16-NEXT: v_mad_u16 v2, v6, v7, v2 -; GFX11-DL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-DL-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 24, v1 +; GFX11-DL-FAKE16-NEXT: v_bfe_i32 v5, v6, 0, 8 +; GFX11-DL-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v7 +; GFX11-DL-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX11-DL-FAKE16-NEXT: v_mad_u16 v2, v4, v3, v2 +; GFX11-DL-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8 +; GFX11-DL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-DL-FAKE16-NEXT: v_mad_u16 v2, v6, v5, v2 ; GFX11-DL-FAKE16-NEXT: v_mad_u16 v0, v1, v0, v2 ; GFX11-DL-FAKE16-NEXT: v_mov_b32_e32 v1, 0 ; GFX11-DL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) diff --git a/llvm/test/CodeGen/AMDGPU/idot8u.ll b/llvm/test/CodeGen/AMDGPU/idot8u.ll index c03802e144d5b..dfc1e3c088129 100644 --- a/llvm/test/CodeGen/AMDGPU/idot8u.ll +++ b/llvm/test/CodeGen/AMDGPU/idot8u.ll @@ -1684,7 +1684,7 @@ define amdgpu_kernel void @udot8_multiuses_mul1(ptr addrspace(1) %src1, ; GFX9-NEXT: v_mul_u32_u24_e32 v4, v4, v11 ; GFX9-NEXT: v_add3_u32 v2, v2, v7, v6 ; GFX9-NEXT: v_add3_u32 v2, v2, v5, v4 -; GFX9-NEXT: v_add3_u32 v1, v1, v17, v2 +; GFX9-NEXT: v_add3_u32 v1, v17, v1, v2 ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm ; @@ -1735,7 +1735,7 @@ define amdgpu_kernel void @udot8_multiuses_mul1(ptr addrspace(1) %src1, ; GFX9-DL-NEXT: v_mul_u32_u24_e32 v4, v4, v11 ; GFX9-DL-NEXT: v_add3_u32 v2, v2, v7, v6 ; GFX9-DL-NEXT: v_add3_u32 v2, v2, v5, v4 -; GFX9-DL-NEXT: v_add3_u32 v1, v1, v17, v2 +; GFX9-DL-NEXT: v_add3_u32 v1, v17, v1, v2 ; GFX9-DL-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-DL-NEXT: s_endpgm ; @@ -1789,7 +1789,7 @@ define amdgpu_kernel void @udot8_multiuses_mul1(ptr addrspace(1) %src1, ; GFX10-DL-NEXT: v_add3_u32 v0, v0, v6, v5 ; GFX10-DL-NEXT: v_add3_u32 v0, v0, v1, v2 ; GFX10-DL-NEXT: v_mov_b32_e32 v1, 0 -; GFX10-DL-NEXT: v_add3_u32 v0, v13, v3, v0 +; GFX10-DL-NEXT: v_add3_u32 v0, v3, v13, v0 ; GFX10-DL-NEXT: global_store_dword v1, v0, s[6:7] ; GFX10-DL-NEXT: s_endpgm ptr addrspace(1) %src2, diff --git a/llvm/test/CodeGen/AMDGPU/lower-kernel-lds-constexpr.ll b/llvm/test/CodeGen/AMDGPU/lower-kernel-lds-constexpr.ll index 4fef9624d8ad6..459615139d745 100644 --- a/llvm/test/CodeGen/AMDGPU/lower-kernel-lds-constexpr.ll +++ b/llvm/test/CodeGen/AMDGPU/lower-kernel-lds-constexpr.ll @@ -14,13 +14,13 @@ ; Use constant from different kernels ;. -; CHECK: @llvm.amdgcn.kernel.k0.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k0.lds.t poison, align 2 -; CHECK: @llvm.amdgcn.kernel.k1.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k1.lds.t poison, align 2 -; CHECK: @llvm.amdgcn.kernel.k2.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k2.lds.t poison, align 4 -; CHECK: @llvm.amdgcn.kernel.k3.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k3.lds.t poison, align 16 -; CHECK: @llvm.amdgcn.kernel.k4.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k4.lds.t poison, align 2 -; CHECK: @llvm.amdgcn.kernel.k5.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k5.lds.t poison, align 16 -; CHECK: @llvm.amdgcn.kernel.k6.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k6.lds.t poison, align 16 +; CHECK: @llvm.amdgcn.kernel.k0.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k0.lds.t poison, align 2, !absolute_symbol !0 +; CHECK: @llvm.amdgcn.kernel.k1.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k1.lds.t poison, align 2, !absolute_symbol !0 +; CHECK: @llvm.amdgcn.kernel.k2.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k2.lds.t poison, align 4, !absolute_symbol !0 +; CHECK: @llvm.amdgcn.kernel.k3.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k3.lds.t poison, align 16, !absolute_symbol !0 +; CHECK: @llvm.amdgcn.kernel.k4.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k4.lds.t poison, align 2, !absolute_symbol !0 +; CHECK: @llvm.amdgcn.kernel.k5.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k5.lds.t poison, align 16, !absolute_symbol !0 +; CHECK: @llvm.amdgcn.kernel.k6.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k6.lds.t poison, align 16, !absolute_symbol !0 ;. define amdgpu_kernel void @k0(i64 %x) { ; CHECK-LABEL: @k0( @@ -67,7 +67,7 @@ define amdgpu_kernel void @k3(i64 %x) { ; CHECK-LABEL: @k3( ; CHECK-NEXT: %1 = getelementptr inbounds [32 x i8], ptr addrspace(3) @llvm.amdgcn.kernel.k3.lds, i32 0, i32 16 ; CHECK-NEXT: %ptr1 = addrspacecast ptr addrspace(3) %1 to ptr -; CHECK-NEXT: store i64 1, ptr %ptr1, align 1 +; CHECK-NEXT: store i64 1, ptr %ptr1, align 16 ; CHECK-NEXT: %2 = getelementptr inbounds [32 x i8], ptr addrspace(3) @llvm.amdgcn.kernel.k3.lds, i32 0, i32 24 ; CHECK-NEXT: %ptr2 = addrspacecast ptr addrspace(3) %2 to ptr ; CHECK-NEXT: store i64 2, ptr %ptr2, align 8 @@ -98,9 +98,9 @@ define amdgpu_kernel void @k4(i64 %x) { ; Multiple constexpr use in a same instruction. define amdgpu_kernel void @k5() { ; CHECK-LABEL: @k5( -; CHECK-NEXT: %1 = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.k5.lds to ptr -; CHECK-NEXT: %2 = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.k5.lds to ptr -; CHECK-NEXT: call void poison(ptr %1, ptr %2) +; CHECK-NEXT: %1 = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.k5.lds to ptr +; CHECK-NEXT: call void poison(ptr %1, ptr %1) +; CHECK-NEXT: ret void ; call void poison(ptr addrspacecast (ptr addrspace(3) @lds.4 to ptr), ptr addrspacecast (ptr addrspace(3) @lds.4 to ptr)) ret void @@ -113,13 +113,22 @@ define amdgpu_kernel void @k5() { ; expression operands of store should be replaced by equivalent instruction sequences. define amdgpu_kernel void @k6() { ; CHECK-LABEL: @k6( - -; CHECK-NEXT: %1 = getelementptr inbounds [4 x i32], ptr addrspace(3) @llvm.amdgcn.kernel.k6.lds, i32 0, i32 2 -; CHECK-NEXT: %2 = ptrtoint ptr addrspace(3) %1 to i32 -; CHECK-NEXT: %3 = getelementptr inbounds [4 x i32], ptr addrspace(3) @llvm.amdgcn.kernel.k6.lds, i32 0, i32 2 -; CHECK-NEXT: store i32 %2, ptr addrspace(3) %3, align 8 -; CHECK-NEXT: ret void +; CHECK-NEXT: %1 = getelementptr inbounds [4 x i32], ptr addrspace(3) @llvm.amdgcn.kernel.k6.lds, i32 0, i32 2 +; CHECK-NEXT: %2 = ptrtoint ptr addrspace(3) %1 to i32 +; CHECK-NEXT: %3 = getelementptr inbounds [4 x i32], ptr addrspace(3) @llvm.amdgcn.kernel.k6.lds, i32 0, i32 2 +; CHECK-NEXT: store i32 %2, ptr addrspace(3) %3, align 8 +; CHECK-NEXT: ret void ; + store i32 ptrtoint (ptr addrspace(3) getelementptr inbounds ([4 x i32], ptr addrspace(3) @lds.5, i32 0, i32 2) to i32), ptr addrspace(3) getelementptr inbounds ([4 x i32], ptr addrspace(3) @lds.5, i32 0, i32 2) ret void } +;. +; CHECK: attributes #0 = { "amdgpu-lds-size"="2" } +; CHECK: attributes #1 = { "amdgpu-lds-size"="4" } +; CHECK: attributes #2 = { "amdgpu-lds-size"="32" } +; CHECK: attributes #3 = { "amdgpu-lds-size"="2020" } +; CHECK: attributes #4 = { "amdgpu-lds-size"="16" } +;. +; CHECK: !0 = !{i32 0, i32 1} +;. diff --git a/llvm/test/CodeGen/AMDGPU/lower-module-lds-constantexpr.ll b/llvm/test/CodeGen/AMDGPU/lower-module-lds-constantexpr.ll index a2761193c2d65..deb2d00e8bd81 100644 --- a/llvm/test/CodeGen/AMDGPU/lower-module-lds-constantexpr.ll +++ b/llvm/test/CodeGen/AMDGPU/lower-module-lds-constantexpr.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s ; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s @@ -9,73 +10,78 @@ @kern = addrspace(3) global float poison, align 4 ; @a_func is only used from a non-kernel function so is rewritten -; CHECK-NOT: @a_func ; @b_both is used from a non-kernel function so is rewritten -; CHECK-NOT: @b_both ; sorted both < func, so @b_both at null and @a_func at 4 @b_both = addrspace(3) global float poison, align 4 -; CHECK: @llvm.amdgcn.module.lds = internal addrspace(3) global %llvm.amdgcn.module.lds.t poison, align 4 -; CHECK: @llvm.amdgcn.kernel.timestwo.lds = internal addrspace(3) global %llvm.amdgcn.kernel.timestwo.lds.t poison, align 4 -; CHECK-LABEL: @get_func() -; CHECK: %0 = addrspacecast ptr addrspace(3) @llvm.amdgcn.module.lds to ptr -; CHECK: %1 = ptrtoint ptr %0 to i64 -; CHECK: %2 = addrspacecast ptr addrspace(3) @llvm.amdgcn.module.lds to ptr -; CHECK: %3 = ptrtoint ptr %2 to i64 -; CHECK: %4 = add i64 %1, %3 -; CHECK: %5 = inttoptr i64 %4 to ptr -; CHECK: %6 = load i32, ptr %5, align 4 -; CHECK: ret i32 %6 define i32 @get_func() local_unnamed_addr #0 { +; CHECK-LABEL: define i32 @get_func() local_unnamed_addr { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(3) @llvm.amdgcn.module.lds to ptr +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[TMP0]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +; CHECK-NEXT: ret i32 [[TMP4]] +; entry: %0 = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr addrspacecast (ptr addrspace(3) @a_func to ptr) to i64), i64 ptrtoint (ptr addrspacecast (ptr addrspace(3) @a_func to ptr) to i64)) to ptr), align 4 ret i32 %0 } -; CHECK-LABEL: @set_func(i32 %x) -; CHECK: %0 = addrspacecast ptr addrspace(3) getelementptr inbounds (%llvm.amdgcn.module.lds.t, ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1) to ptr -; CHECK: %1 = ptrtoint ptr %0 to i64 -; CHECK: %2 = addrspacecast ptr addrspace(3) getelementptr inbounds (%llvm.amdgcn.module.lds.t, ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1) to ptr -; CHECK: %3 = ptrtoint ptr %2 to i64 -; CHECK: %4 = add i64 %1, %3 -; CHECK: %5 = inttoptr i64 %4 to ptr -; CHECK: store i32 %x, ptr %5, align 4 -; CHECK: ret void define void @set_func(i32 %x) { +; CHECK-LABEL: define void @set_func( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1) to ptr +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[TMP0]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr +; CHECK-NEXT: store i32 [[X]], ptr [[TMP3]], align 4 +; CHECK-NEXT: ret void +; entry: store i32 %x, ptr inttoptr (i64 add (i64 ptrtoint (ptr addrspacecast (ptr addrspace(3) @b_both to ptr) to i64), i64 ptrtoint (ptr addrspacecast (ptr addrspace(3) @b_both to ptr) to i64)) to ptr), align 4 ret void } -; CHECK-LABEL: @timestwo() #0 -; CHECK-NOT: call void @llvm.donothing() -; CHECK: %1 = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.timestwo.lds to ptr -; CHECK: %2 = ptrtoint ptr %1 to i64 -; CHECK: %3 = addrspacecast ptr addrspace(3) getelementptr inbounds (%llvm.amdgcn.kernel.timestwo.lds.t, ptr addrspace(3) @llvm.amdgcn.kernel.timestwo.lds, i32 0, i32 1) to ptr -; CHECK: %4 = ptrtoint ptr %3 to i64 -; CHECK: %5 = add i64 %2, %4 -; CHECK: %6 = inttoptr i64 %5 to ptr -; CHECK: %ld = load i32, ptr %6, align 4 -; CHECK: %mul = mul i32 %ld, 2 -; CHECK: %7 = addrspacecast ptr addrspace(3) getelementptr inbounds (%llvm.amdgcn.kernel.timestwo.lds.t, ptr addrspace(3) @llvm.amdgcn.kernel.timestwo.lds, i32 0, i32 1) to ptr -; CHECK: %8 = ptrtoint ptr %7 to i64 -; CHECK: %9 = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.timestwo.lds to ptr -; CHECK: %10 = ptrtoint ptr %9 to i64 -; CHECK: %11 = add i64 %8, %10 -; CHECK: %12 = inttoptr i64 %11 to ptr -; CHECK: store i32 %mul, ptr %12, align 4 -; CHECK: ret void define amdgpu_kernel void @timestwo() { +; CHECK-LABEL: define amdgpu_kernel void @timestwo( +; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.timestwo.lds to ptr +; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[TMP1]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = addrspacecast ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_KERNEL_TIMESTWO_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.kernel.timestwo.lds, i32 0, i32 1) to ptr +; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[TMP3]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP2]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: [[LD:%.*]] = load i32, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[LD]], 2 +; CHECK-NEXT: [[TMP7:%.*]] = addrspacecast ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_KERNEL_TIMESTWO_LDS_T]], ptr addrspace(3) @llvm.amdgcn.kernel.timestwo.lds, i32 0, i32 1) to ptr +; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[TMP7]] to i64 +; CHECK-NEXT: [[TMP9:%.*]] = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.timestwo.lds to ptr +; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP9]] to i64 +; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP8]], [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: store i32 [[MUL]], ptr [[TMP12]], align 4 +; CHECK-NEXT: ret void +; %ld = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr addrspacecast (ptr addrspace(3) @b_both to ptr) to i64), i64 ptrtoint (ptr addrspacecast (ptr addrspace(3) @kern to ptr) to i64)) to ptr), align 4 %mul = mul i32 %ld, 2 store i32 %mul, ptr inttoptr (i64 add (i64 ptrtoint (ptr addrspacecast (ptr addrspace(3) @kern to ptr) to i64), i64 ptrtoint (ptr addrspacecast (ptr addrspace(3) @b_both to ptr) to i64)) to ptr), align 4 ret void } -; CHECK-LABEL: @through_functions() #0 define amdgpu_kernel void @through_functions() { +; CHECK-LABEL: define amdgpu_kernel void @through_functions( +; CHECK-SAME: ) #[[ATTR0]] { +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ] +; CHECK-NEXT: [[LD:%.*]] = call i32 @get_func() +; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[LD]], 4 +; CHECK-NEXT: call void @set_func(i32 [[MUL]]) +; CHECK-NEXT: ret void +; %ld = call i32 @get_func() %mul = mul i32 %ld, 4 call void @set_func(i32 %mul) diff --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll index 1156f2718cf1e..3329c9a761900 100644 --- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll +++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll @@ -365,110 +365,107 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX8-NEXT: s_waitcnt lgkmcnt(0) ; GFX8-NEXT: s_swappc_b64 s[30:31], s[4:5] ; GFX8-NEXT: v_lshlrev_b32_e32 v1, 17, v0 -; GFX8-NEXT: v_and_b32_e32 v10, 0xfe000000, v1 +; GFX8-NEXT: v_and_b32_e32 v12, 0xfe000000, v1 ; GFX8-NEXT: v_mov_b32_e32 v1, 3 ; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_or_b32_e32 v0, v10, v0 +; GFX8-NEXT: v_or_b32_e32 v0, v12, v0 ; GFX8-NEXT: v_mov_b32_e32 v1, s35 ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s34, v0 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc -; GFX8-NEXT: s_movk_i32 s0, 0x2800 +; GFX8-NEXT: s_movk_i32 s0, 0x5000 ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 -; GFX8-NEXT: v_mov_b32_e32 v6, 0 +; GFX8-NEXT: v_mov_b32_e32 v10, 0 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc -; GFX8-NEXT: v_mov_b32_e32 v7, 0 -; GFX8-NEXT: v_mov_b32_e32 v11, 0x7f -; GFX8-NEXT: s_movk_i32 s1, 0x800 -; GFX8-NEXT: s_movk_i32 s2, 0x1000 -; GFX8-NEXT: s_movk_i32 s3, 0x1800 -; GFX8-NEXT: s_movk_i32 s4, 0x2000 +; GFX8-NEXT: v_mov_b32_e32 v11, 0 +; GFX8-NEXT: v_mov_b32_e32 v13, 0x7f ; GFX8-NEXT: .LBB1_1: ; %for.cond.preheader ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB1_2 Depth 2 ; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: s_mov_b32 s5, 0 +; GFX8-NEXT: s_mov_b32 s0, 0 ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: .LBB1_2: ; %for.body ; GFX8-NEXT: ; Parent Loop BB1_1 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 -; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0xffffd800, v2 -; GFX8-NEXT: v_addc_u32_e32 v9, vcc, -1, v3, vcc -; GFX8-NEXT: flat_load_dwordx2 v[4:5], v[2:3] -; GFX8-NEXT: flat_load_dwordx2 v[14:15], v[8:9] -; GFX8-NEXT: v_add_u32_e32 v12, vcc, 0xffffe000, v2 -; GFX8-NEXT: v_addc_u32_e32 v13, vcc, -1, v3, vcc -; GFX8-NEXT: flat_load_dwordx2 v[12:13], v[12:13] -; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0xffffe800, v2 -; GFX8-NEXT: v_addc_u32_e32 v9, vcc, -1, v3, vcc -; GFX8-NEXT: flat_load_dwordx2 v[18:19], v[8:9] -; GFX8-NEXT: v_add_u32_e32 v16, vcc, 0xfffff000, v2 -; GFX8-NEXT: v_addc_u32_e32 v17, vcc, -1, v3, vcc -; GFX8-NEXT: v_add_u32_e32 v20, vcc, 0xfffff800, v2 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0xffffb000, v2 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, -1, v3, vcc +; GFX8-NEXT: flat_load_dwordx2 v[14:15], v[4:5] +; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0xffffb800, v2 +; GFX8-NEXT: v_addc_u32_e32 v7, vcc, -1, v3, vcc +; GFX8-NEXT: flat_load_dwordx2 v[16:17], v[6:7] +; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0xffffc000, v2 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, -1, v3, vcc +; GFX8-NEXT: flat_load_dwordx2 v[18:19], v[4:5] +; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0xffffc800, v2 +; GFX8-NEXT: v_addc_u32_e32 v7, vcc, -1, v3, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0xffffd000, v2 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, -1, v3, vcc +; GFX8-NEXT: v_add_u32_e32 v20, vcc, 0xffffd800, v2 ; GFX8-NEXT: v_addc_u32_e32 v21, vcc, -1, v3, vcc -; GFX8-NEXT: flat_load_dwordx2 v[8:9], v[16:17] -; GFX8-NEXT: v_add_u32_e32 v16, vcc, s1, v2 -; GFX8-NEXT: v_addc_u32_e32 v17, vcc, 0, v3, vcc -; GFX8-NEXT: s_addk_i32 s5, 0x2000 -; GFX8-NEXT: s_cmp_gt_u32 s5, 0x3fffff -; GFX8-NEXT: s_waitcnt vmcnt(3) -; GFX8-NEXT: v_add_u32_e32 v22, vcc, v14, v6 -; GFX8-NEXT: v_addc_u32_e32 v23, vcc, v15, v7, vcc -; GFX8-NEXT: v_add_u32_e32 v6, vcc, s2, v2 -; GFX8-NEXT: flat_load_dwordx2 v[14:15], v[20:21] -; GFX8-NEXT: flat_load_dwordx2 v[16:17], v[16:17] -; GFX8-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc -; GFX8-NEXT: v_add_u32_e32 v20, vcc, s3, v2 -; GFX8-NEXT: v_addc_u32_e32 v21, vcc, 0, v3, vcc -; GFX8-NEXT: s_waitcnt vmcnt(4) -; GFX8-NEXT: v_add_u32_e32 v22, vcc, v12, v22 -; GFX8-NEXT: v_addc_u32_e32 v23, vcc, v13, v23, vcc -; GFX8-NEXT: v_add_u32_e32 v12, vcc, s4, v2 ; GFX8-NEXT: flat_load_dwordx2 v[6:7], v[6:7] -; GFX8-NEXT: flat_load_dwordx2 v[20:21], v[20:21] -; GFX8-NEXT: v_addc_u32_e32 v13, vcc, 0, v3, vcc +; GFX8-NEXT: v_add_u32_e32 v22, vcc, 0xffffe000, v2 +; GFX8-NEXT: v_addc_u32_e32 v23, vcc, -1, v3, vcc +; GFX8-NEXT: flat_load_dwordx2 v[8:9], v[4:5] +; GFX8-NEXT: flat_load_dwordx2 v[4:5], v[20:21] +; GFX8-NEXT: s_addk_i32 s0, 0x2000 +; GFX8-NEXT: s_cmp_gt_u32 s0, 0x3fffff ; GFX8-NEXT: s_waitcnt vmcnt(5) +; GFX8-NEXT: v_add_u32_e32 v24, vcc, v14, v10 +; GFX8-NEXT: v_addc_u32_e32 v25, vcc, v15, v11, vcc +; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0xffffe800, v2 +; GFX8-NEXT: v_addc_u32_e32 v11, vcc, -1, v3, vcc +; GFX8-NEXT: v_add_u32_e32 v14, vcc, 0xfffff000, v2 +; GFX8-NEXT: flat_load_dwordx2 v[20:21], v[22:23] +; GFX8-NEXT: flat_load_dwordx2 v[10:11], v[10:11] +; GFX8-NEXT: v_addc_u32_e32 v15, vcc, -1, v3, vcc +; GFX8-NEXT: s_waitcnt vmcnt(6) +; GFX8-NEXT: v_add_u32_e32 v22, vcc, v16, v24 +; GFX8-NEXT: v_addc_u32_e32 v23, vcc, v17, v25, vcc +; GFX8-NEXT: v_add_u32_e32 v16, vcc, 0xfffff800, v2 +; GFX8-NEXT: flat_load_dwordx2 v[14:15], v[14:15] +; GFX8-NEXT: v_addc_u32_e32 v17, vcc, -1, v3, vcc +; GFX8-NEXT: flat_load_dwordx2 v[16:17], v[16:17] +; GFX8-NEXT: s_waitcnt vmcnt(7) ; GFX8-NEXT: v_add_u32_e32 v22, vcc, v18, v22 ; GFX8-NEXT: v_addc_u32_e32 v23, vcc, v19, v23, vcc -; GFX8-NEXT: v_add_u32_e32 v18, vcc, s0, v2 -; GFX8-NEXT: flat_load_dwordx2 v[12:13], v[12:13] -; GFX8-NEXT: v_addc_u32_e32 v19, vcc, 0, v3, vcc -; GFX8-NEXT: flat_load_dwordx2 v[18:19], v[18:19] +; GFX8-NEXT: flat_load_dwordx2 v[18:19], v[2:3] ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x10000, v2 ; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc +; GFX8-NEXT: s_waitcnt vmcnt(7) +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v22 +; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v7, v23, vcc ; GFX8-NEXT: s_waitcnt vmcnt(6) -; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v22 -; GFX8-NEXT: v_addc_u32_e32 v9, vcc, v9, v23, vcc +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v8, v6 +; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v9, v7, vcc ; GFX8-NEXT: s_waitcnt vmcnt(5) -; GFX8-NEXT: v_add_u32_e32 v8, vcc, v14, v8 -; GFX8-NEXT: v_addc_u32_e32 v9, vcc, v15, v9, vcc -; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v8 -; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v5, v9, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v6 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v5, v7, vcc ; GFX8-NEXT: s_waitcnt vmcnt(4) -; GFX8-NEXT: v_add_u32_e32 v4, vcc, v16, v4 -; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v17, v5, vcc -; GFX8-NEXT: s_waitcnt vmcnt(3) -; GFX8-NEXT: v_add_u32_e32 v4, vcc, v6, v4 -; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v7, v5, vcc -; GFX8-NEXT: s_waitcnt vmcnt(2) ; GFX8-NEXT: v_add_u32_e32 v4, vcc, v20, v4 ; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v21, v5, vcc +; GFX8-NEXT: s_waitcnt vmcnt(3) +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v10, v4 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v11, v5, vcc +; GFX8-NEXT: s_waitcnt vmcnt(2) +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v14, v4 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v15, v5, vcc ; GFX8-NEXT: s_waitcnt vmcnt(1) -; GFX8-NEXT: v_add_u32_e32 v4, vcc, v12, v4 -; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v13, v5, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v16, v4 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v17, v5, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v18, v4 -; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v19, v5, vcc +; GFX8-NEXT: v_add_u32_e32 v10, vcc, v18, v4 +; GFX8-NEXT: v_addc_u32_e32 v11, vcc, v19, v5, vcc ; GFX8-NEXT: s_cbranch_scc0 .LBB1_2 ; GFX8-NEXT: ; %bb.3: ; %while.cond.loopexit ; GFX8-NEXT: ; in Loop: Header=BB1_1 Depth=1 -; GFX8-NEXT: v_subrev_u32_e32 v11, vcc, 1, v11 +; GFX8-NEXT: v_subrev_u32_e32 v13, vcc, 1, v13 ; GFX8-NEXT: s_and_b64 vcc, exec, vcc ; GFX8-NEXT: s_cbranch_vccz .LBB1_1 ; GFX8-NEXT: ; %bb.4: ; %while.end ; GFX8-NEXT: v_mov_b32_e32 v1, s35 -; GFX8-NEXT: v_add_u32_e32 v0, vcc, s34, v10 +; GFX8-NEXT: v_add_u32_e32 v0, vcc, s34, v12 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc -; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[6:7] +; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[10:11] ; GFX8-NEXT: s_endpgm ; ; GFX900-LABEL: clmem_read: @@ -498,76 +495,79 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX900-NEXT: v_mov_b32_e32 v1, s35 ; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s34, v0 ; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc -; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, 0x2800, v0 +; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, 0x5000, v0 ; GFX900-NEXT: v_mov_b32_e32 v4, 0 ; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; GFX900-NEXT: v_mov_b32_e32 v5, 0 ; GFX900-NEXT: v_mov_b32_e32 v7, 0x7f -; GFX900-NEXT: s_movk_i32 s2, 0xf000 -; GFX900-NEXT: s_movk_i32 s3, 0x1000 -; GFX900-NEXT: s_movk_i32 s4, 0x2000 +; GFX900-NEXT: s_movk_i32 s2, 0xd000 +; GFX900-NEXT: s_movk_i32 s3, 0xe000 +; GFX900-NEXT: s_movk_i32 s4, 0xf000 ; GFX900-NEXT: .LBB1_1: ; %for.cond.preheader ; GFX900-NEXT: ; =>This Loop Header: Depth=1 ; GFX900-NEXT: ; Child Loop BB1_2 Depth 2 ; GFX900-NEXT: v_mov_b32_e32 v3, v1 -; GFX900-NEXT: v_mov_b32_e32 v2, v0 ; GFX900-NEXT: s_mov_b32 s5, 0 +; GFX900-NEXT: v_mov_b32_e32 v2, v0 ; GFX900-NEXT: .LBB1_2: ; %for.body ; GFX900-NEXT: ; Parent Loop BB1_1 Depth=1 ; GFX900-NEXT: ; => This Inner Loop Header: Depth=2 -; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, 0xffffe000, v2 +; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, 0xffffb000, v2 ; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, -1, v3, vcc -; GFX900-NEXT: global_load_dwordx2 v[14:15], v[8:9], off offset:-2048 +; GFX900-NEXT: global_load_dwordx2 v[8:9], v[8:9], off +; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, 0xffffc000, v2 +; GFX900-NEXT: v_addc_co_u32_e32 v15, vcc, -1, v3, vcc +; GFX900-NEXT: global_load_dwordx2 v[18:19], v[14:15], off offset:-2048 +; GFX900-NEXT: global_load_dwordx2 v[20:21], v[14:15], off +; GFX900-NEXT: v_add_co_u32_e32 v16, vcc, s2, v2 +; GFX900-NEXT: v_addc_co_u32_e32 v17, vcc, -1, v3, vcc +; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, s3, v2 +; GFX900-NEXT: global_load_dwordx2 v[16:17], v[16:17], off offset:-2048 +; GFX900-NEXT: v_addc_co_u32_e32 v15, vcc, -1, v3, vcc ; GFX900-NEXT: global_load_dwordx2 v[10:11], v[2:3], off offset:-4096 ; GFX900-NEXT: global_load_dwordx2 v[12:13], v[2:3], off offset:-2048 ; GFX900-NEXT: s_addk_i32 s5, 0x2000 ; GFX900-NEXT: s_cmp_gt_u32 s5, 0x3fffff -; GFX900-NEXT: s_waitcnt vmcnt(2) -; GFX900-NEXT: v_add_co_u32_e32 v16, vcc, v14, v4 -; GFX900-NEXT: v_addc_co_u32_e32 v17, vcc, v15, v5, vcc -; GFX900-NEXT: global_load_dwordx2 v[4:5], v[2:3], off -; GFX900-NEXT: global_load_dwordx2 v[14:15], v[8:9], off -; GFX900-NEXT: s_waitcnt vmcnt(0) -; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, v14, v16 -; GFX900-NEXT: v_addc_co_u32_e32 v15, vcc, v15, v17, vcc -; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, s2, v2 -; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, -1, v3, vcc -; GFX900-NEXT: global_load_dwordx2 v[8:9], v[8:9], off offset:-2048 -; GFX900-NEXT: s_waitcnt vmcnt(0) -; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, v8, v14 -; GFX900-NEXT: v_addc_co_u32_e32 v15, vcc, v9, v15, vcc -; GFX900-NEXT: global_load_dwordx2 v[8:9], v[2:3], off offset:2048 -; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, v10, v14 -; GFX900-NEXT: v_addc_co_u32_e32 v11, vcc, v11, v15, vcc -; GFX900-NEXT: v_add_co_u32_e64 v14, s[0:1], v12, v14 -; GFX900-NEXT: v_addc_co_u32_e64 v15, s[0:1], v13, v11, s[0:1] -; GFX900-NEXT: v_add_co_u32_e32 v10, vcc, s3, v2 -; GFX900-NEXT: v_add_co_u32_e64 v12, s[0:1], s4, v2 -; GFX900-NEXT: v_addc_co_u32_e32 v11, vcc, 0, v3, vcc -; GFX900-NEXT: v_addc_co_u32_e64 v13, vcc, 0, v3, s[0:1] -; GFX900-NEXT: v_add_co_u32_e32 v16, vcc, v4, v14 -; GFX900-NEXT: v_addc_co_u32_e32 v17, vcc, v5, v15, vcc -; GFX900-NEXT: global_load_dwordx2 v[4:5], v[12:13], off offset:-4096 -; GFX900-NEXT: global_load_dwordx2 v[14:15], v[10:11], off offset:2048 -; GFX900-NEXT: s_waitcnt vmcnt(2) -; GFX900-NEXT: v_add_co_u32_e32 v16, vcc, v8, v16 -; GFX900-NEXT: v_addc_co_u32_e32 v17, vcc, v9, v17, vcc -; GFX900-NEXT: global_load_dwordx2 v[8:9], v[12:13], off -; GFX900-NEXT: global_load_dwordx2 v[10:11], v[12:13], off offset:2048 +; GFX900-NEXT: s_waitcnt vmcnt(5) +; GFX900-NEXT: v_add_co_u32_e32 v22, vcc, v8, v4 +; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v9, v5, vcc +; GFX900-NEXT: global_load_dwordx2 v[8:9], v[14:15], off offset:-4096 +; GFX900-NEXT: s_waitcnt vmcnt(5) +; GFX900-NEXT: v_add_co_u32_e64 v24, s[0:1], v18, v22 +; GFX900-NEXT: v_addc_co_u32_e64 v25, s[0:1], v19, v5, s[0:1] +; GFX900-NEXT: global_load_dwordx2 v[18:19], v[14:15], off offset:-2048 +; GFX900-NEXT: global_load_dwordx2 v[22:23], v[14:15], off +; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, s4, v2 +; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, -1, v3, vcc +; GFX900-NEXT: global_load_dwordx2 v[4:5], v[4:5], off offset:-2048 +; GFX900-NEXT: s_waitcnt vmcnt(7) +; GFX900-NEXT: v_add_co_u32_e32 v20, vcc, v20, v24 +; GFX900-NEXT: global_load_dwordx2 v[14:15], v[2:3], off +; GFX900-NEXT: v_addc_co_u32_e32 v21, vcc, v21, v25, vcc ; GFX900-NEXT: v_add_co_u32_e32 v2, vcc, 0x10000, v2 ; GFX900-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX900-NEXT: s_waitcnt vmcnt(7) +; GFX900-NEXT: v_add_co_u32_e32 v16, vcc, v16, v20 +; GFX900-NEXT: v_addc_co_u32_e32 v17, vcc, v17, v21, vcc +; GFX900-NEXT: s_waitcnt vmcnt(4) +; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, v8, v16 +; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, v9, v17, vcc ; GFX900-NEXT: s_waitcnt vmcnt(3) -; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v4, v16 -; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v17, vcc +; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, v18, v8 +; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, v19, v9, vcc ; GFX900-NEXT: s_waitcnt vmcnt(2) -; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v14, v4 -; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v15, v5, vcc +; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, v22, v8 +; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, v23, v9, vcc ; GFX900-NEXT: s_waitcnt vmcnt(1) -; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v8, v4 -; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v9, v5, vcc -; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8 +; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v9, vcc ; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v10, v4 ; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v11, v5, vcc +; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v12, v4 +; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v13, v5, vcc +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v14, v4 +; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v15, v5, vcc ; GFX900-NEXT: s_cbranch_scc0 .LBB1_2 ; GFX900-NEXT: ; %bb.3: ; %while.cond.loopexit ; GFX900-NEXT: ; in Loop: Header=BB1_1 Depth=1 @@ -610,7 +610,7 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX10-NEXT: v_lshl_or_b32 v0, v0, 3, v6 ; GFX10-NEXT: v_add_co_u32 v0, s0, s34, v0 ; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, s35, 0, s0 -; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, 0x2800, v0 +; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, 0x5000, v0 ; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo ; GFX10-NEXT: .LBB1_1: ; %for.cond.preheader ; GFX10-NEXT: ; =>This Loop Header: Depth=1 @@ -621,30 +621,29 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX10-NEXT: .LBB1_2: ; %for.body ; GFX10-NEXT: ; Parent Loop BB1_1 Depth=1 ; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 -; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v4, 0xffffe000 +; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v4, 0xffffb800 ; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, -1, v5, vcc_lo -; GFX10-NEXT: v_add_co_u32 v10, vcc_lo, v4, 0xfffff000 +; GFX10-NEXT: v_add_co_u32 v10, vcc_lo, v4, 0xffffc800 ; GFX10-NEXT: v_add_co_ci_u32_e32 v11, vcc_lo, -1, v5, vcc_lo -; GFX10-NEXT: s_clause 0x5 +; GFX10-NEXT: v_add_co_u32 v14, vcc_lo, v4, 0xffffd800 +; GFX10-NEXT: v_add_co_ci_u32_e32 v15, vcc_lo, -1, v5, vcc_lo +; GFX10-NEXT: v_add_co_u32 v18, vcc_lo, v4, 0xffffe800 +; GFX10-NEXT: s_clause 0x2 ; GFX10-NEXT: global_load_dwordx2 v[12:13], v[8:9], off offset:-2048 -; GFX10-NEXT: global_load_dwordx2 v[14:15], v[8:9], off ; GFX10-NEXT: global_load_dwordx2 v[16:17], v[10:11], off offset:-2048 -; GFX10-NEXT: global_load_dwordx2 v[18:19], v[10:11], off -; GFX10-NEXT: global_load_dwordx2 v[20:21], v[4:5], off offset:-2048 -; GFX10-NEXT: global_load_dwordx2 v[22:23], v[4:5], off -; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v4, 0x1000 -; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, 0, v5, vcc_lo -; GFX10-NEXT: v_add_co_u32 v10, vcc_lo, v4, 0x2000 -; GFX10-NEXT: v_add_co_ci_u32_e32 v11, vcc_lo, 0, v5, vcc_lo -; GFX10-NEXT: global_load_dwordx2 v[24:25], v[8:9], off offset:-2048 -; GFX10-NEXT: v_add_co_u32 v26, vcc_lo, 0x2800, v4 -; GFX10-NEXT: s_clause 0x1 -; GFX10-NEXT: global_load_dwordx2 v[28:29], v[10:11], off offset:-2048 +; GFX10-NEXT: global_load_dwordx2 v[20:21], v[14:15], off offset:-2048 +; GFX10-NEXT: v_add_co_ci_u32_e32 v19, vcc_lo, -1, v5, vcc_lo +; GFX10-NEXT: v_add_co_u32 v22, vcc_lo, 0xfffff000, v4 +; GFX10-NEXT: v_add_co_ci_u32_e32 v23, vcc_lo, -1, v5, vcc_lo +; GFX10-NEXT: s_clause 0x7 +; GFX10-NEXT: global_load_dwordx2 v[24:25], v[18:19], off offset:-2048 ; GFX10-NEXT: global_load_dwordx2 v[8:9], v[8:9], off -; GFX10-NEXT: v_add_co_ci_u32_e32 v27, vcc_lo, 0, v5, vcc_lo -; GFX10-NEXT: s_clause 0x1 -; GFX10-NEXT: global_load_dwordx2 v[30:31], v[10:11], off -; GFX10-NEXT: global_load_dwordx2 v[32:33], v[26:27], off +; GFX10-NEXT: global_load_dwordx2 v[10:11], v[10:11], off +; GFX10-NEXT: global_load_dwordx2 v[14:15], v[14:15], off +; GFX10-NEXT: global_load_dwordx2 v[26:27], v[18:19], off +; GFX10-NEXT: global_load_dwordx2 v[28:29], v[22:23], off +; GFX10-NEXT: global_load_dwordx2 v[30:31], v[4:5], off offset:-2048 +; GFX10-NEXT: global_load_dwordx2 v[32:33], v[4:5], off ; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, 0x10000, v4 ; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo ; GFX10-NEXT: s_addk_i32 s1, 0x2000 @@ -652,27 +651,25 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX10-NEXT: s_waitcnt vmcnt(10) ; GFX10-NEXT: v_add_co_u32 v2, s0, v12, v2 ; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v13, v3, s0 -; GFX10-NEXT: s_waitcnt vmcnt(9) -; GFX10-NEXT: v_add_co_u32 v2, s0, v14, v2 -; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v15, v3, s0 -; GFX10-NEXT: s_waitcnt vmcnt(8) +; GFX10-NEXT: s_waitcnt vmcnt(6) +; GFX10-NEXT: v_add_co_u32 v2, s0, v8, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v9, v3, s0 ; GFX10-NEXT: v_add_co_u32 v2, s0, v16, v2 ; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v17, v3, s0 -; GFX10-NEXT: s_waitcnt vmcnt(7) -; GFX10-NEXT: v_add_co_u32 v2, s0, v18, v2 -; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v19, v3, s0 -; GFX10-NEXT: s_waitcnt vmcnt(6) +; GFX10-NEXT: s_waitcnt vmcnt(5) +; GFX10-NEXT: v_add_co_u32 v2, s0, v10, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v11, v3, s0 ; GFX10-NEXT: v_add_co_u32 v2, s0, v20, v2 ; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v21, v3, s0 -; GFX10-NEXT: s_waitcnt vmcnt(5) -; GFX10-NEXT: v_add_co_u32 v2, s0, v22, v2 -; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v23, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(4) +; GFX10-NEXT: v_add_co_u32 v2, s0, v14, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v15, v3, s0 ; GFX10-NEXT: v_add_co_u32 v2, s0, v24, v2 ; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v25, v3, s0 +; GFX10-NEXT: s_waitcnt vmcnt(3) +; GFX10-NEXT: v_add_co_u32 v2, s0, v26, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v27, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(2) -; GFX10-NEXT: v_add_co_u32 v2, s0, v8, v2 -; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v9, v3, s0 ; GFX10-NEXT: v_add_co_u32 v2, s0, v28, v2 ; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v29, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(1) @@ -720,76 +717,78 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX90A-NEXT: v_mov_b32_e32 v2, s35 ; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, s34, v1 ; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v2, vcc -; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, 0x2800, v1 +; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, 0x5000, v1 ; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc ; GFX90A-NEXT: v_mov_b32_e32 v1, 0x7f ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], 0, 0 +; GFX90A-NEXT: s_movk_i32 s0, 0xd000 +; GFX90A-NEXT: s_movk_i32 s1, 0xe000 ; GFX90A-NEXT: s_movk_i32 s2, 0xf000 -; GFX90A-NEXT: s_movk_i32 s3, 0x1000 -; GFX90A-NEXT: s_movk_i32 s4, 0x2000 ; GFX90A-NEXT: .LBB1_1: ; %for.cond.preheader ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB1_2 Depth 2 +; GFX90A-NEXT: s_mov_b32 s3, 0 ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[2:3], v[2:3] op_sel:[0,1] -; GFX90A-NEXT: s_mov_b32 s5, 0 ; GFX90A-NEXT: .LBB1_2: ; %for.body ; GFX90A-NEXT: ; Parent Loop BB1_1 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 -; GFX90A-NEXT: v_add_co_u32_e64 v18, s[0:1], s3, v6 -; GFX90A-NEXT: v_addc_co_u32_e64 v19, s[0:1], 0, v7, s[0:1] -; GFX90A-NEXT: v_add_co_u32_e64 v20, s[0:1], s4, v6 -; GFX90A-NEXT: v_add_co_u32_e32 v8, vcc, 0xffffe000, v6 -; GFX90A-NEXT: v_addc_co_u32_e64 v21, s[0:1], 0, v7, s[0:1] -; GFX90A-NEXT: v_addc_co_u32_e32 v9, vcc, -1, v7, vcc -; GFX90A-NEXT: global_load_dwordx2 v[24:25], v[20:21], off offset:-4096 -; GFX90A-NEXT: global_load_dwordx2 v[26:27], v[20:21], off -; GFX90A-NEXT: global_load_dwordx2 v[28:29], v[8:9], off offset:-2048 -; GFX90A-NEXT: global_load_dwordx2 v[30:31], v[8:9], off +; GFX90A-NEXT: v_add_co_u32_e32 v12, vcc, 0xffffb000, v6 +; GFX90A-NEXT: v_addc_co_u32_e32 v13, vcc, -1, v7, vcc +; GFX90A-NEXT: global_load_dwordx2 v[12:13], v[12:13], off +; GFX90A-NEXT: v_add_co_u32_e32 v14, vcc, 0xffffc000, v6 +; GFX90A-NEXT: v_addc_co_u32_e32 v15, vcc, -1, v7, vcc +; GFX90A-NEXT: global_load_dwordx2 v[18:19], v[14:15], off offset:-2048 +; GFX90A-NEXT: global_load_dwordx2 v[20:21], v[14:15], off +; GFX90A-NEXT: v_add_co_u32_e32 v16, vcc, s0, v6 +; GFX90A-NEXT: v_addc_co_u32_e32 v17, vcc, -1, v7, vcc +; GFX90A-NEXT: global_load_dwordx2 v[16:17], v[16:17], off offset:-2048 +; GFX90A-NEXT: v_add_co_u32_e32 v14, vcc, s1, v6 +; GFX90A-NEXT: v_addc_co_u32_e32 v15, vcc, -1, v7, vcc +; GFX90A-NEXT: global_load_dwordx2 v[24:25], v[14:15], off offset:-4096 +; GFX90A-NEXT: global_load_dwordx2 v[26:27], v[14:15], off offset:-2048 +; GFX90A-NEXT: global_load_dwordx2 v[28:29], v[14:15], off ; GFX90A-NEXT: v_add_co_u32_e32 v22, vcc, s2, v6 ; GFX90A-NEXT: v_addc_co_u32_e32 v23, vcc, -1, v7, vcc -; GFX90A-NEXT: global_load_dwordx2 v[8:9], v[22:23], off offset:-2048 -; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: global_load_dwordx2 v[18:19], v[18:19], off offset:2048 -; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: global_load_dwordx2 v[20:21], v[20:21], off offset:2048 -; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: global_load_dwordx2 v[10:11], v[6:7], off offset:-4096 -; GFX90A-NEXT: global_load_dwordx2 v[12:13], v[6:7], off offset:-2048 -; GFX90A-NEXT: global_load_dwordx2 v[14:15], v[6:7], off -; GFX90A-NEXT: global_load_dwordx2 v[16:17], v[6:7], off offset:2048 +; GFX90A-NEXT: global_load_dwordx2 v[14:15], v[22:23], off offset:-2048 +; GFX90A-NEXT: global_load_dwordx2 v[30:31], v[6:7], off +; GFX90A-NEXT: global_load_dwordx2 v[8:9], v[6:7], off offset:-4096 +; GFX90A-NEXT: global_load_dwordx2 v[10:11], v[6:7], off offset:-2048 ; GFX90A-NEXT: v_add_co_u32_e32 v6, vcc, 0x10000, v6 ; GFX90A-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v7, vcc -; GFX90A-NEXT: s_addk_i32 s5, 0x2000 -; GFX90A-NEXT: s_cmp_gt_u32 s5, 0x3fffff -; GFX90A-NEXT: s_waitcnt vmcnt(8) -; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v28, v4 -; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v29, v5, vcc -; GFX90A-NEXT: s_waitcnt vmcnt(7) -; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v30, v4 -; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v31, v5, vcc -; GFX90A-NEXT: s_waitcnt vmcnt(6) -; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v8, v4 -; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v9, v5, vcc -; GFX90A-NEXT: s_waitcnt vmcnt(3) -; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v10, v4 -; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v11, v5, vcc -; GFX90A-NEXT: s_waitcnt vmcnt(2) +; GFX90A-NEXT: s_addk_i32 s3, 0x2000 +; GFX90A-NEXT: s_cmp_gt_u32 s3, 0x3fffff +; GFX90A-NEXT: s_waitcnt vmcnt(10) ; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v12, v4 ; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v13, v5, vcc -; GFX90A-NEXT: s_waitcnt vmcnt(1) -; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v14, v4 -; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v15, v5, vcc -; GFX90A-NEXT: s_waitcnt vmcnt(0) +; GFX90A-NEXT: s_waitcnt vmcnt(9) +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v18, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v19, v5, vcc +; GFX90A-NEXT: s_waitcnt vmcnt(8) +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v20, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v21, v5, vcc +; GFX90A-NEXT: s_waitcnt vmcnt(7) ; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v16, v4 ; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v17, v5, vcc +; GFX90A-NEXT: s_waitcnt vmcnt(6) ; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v24, v4 ; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v25, v5, vcc -; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v18, v4 -; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v19, v5, vcc +; GFX90A-NEXT: s_waitcnt vmcnt(5) ; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v26, v4 ; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v27, v5, vcc -; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v20, v4 -; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v21, v5, vcc +; GFX90A-NEXT: s_waitcnt vmcnt(4) +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v28, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v29, v5, vcc +; GFX90A-NEXT: s_waitcnt vmcnt(3) +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v14, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v15, v5, vcc +; GFX90A-NEXT: s_waitcnt vmcnt(1) +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v8, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v9, v5, vcc +; GFX90A-NEXT: s_waitcnt vmcnt(0) +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v10, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v11, v5, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v30, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v31, v5, vcc ; GFX90A-NEXT: s_cbranch_scc0 .LBB1_2 ; GFX90A-NEXT: ; %bb.3: ; %while.cond.loopexit ; GFX90A-NEXT: ; in Loop: Header=BB1_1 Depth=1 @@ -824,7 +823,7 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX11-NEXT: v_add_co_u32 v0, s0, s34, v0 ; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s35, 0, s0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0x2800, v0 +; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0x5000, v0 ; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX11-NEXT: .LBB1_1: ; %for.cond.preheader ; GFX11-NEXT: ; =>This Loop Header: Depth=1 @@ -836,74 +835,76 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX11-NEXT: ; Parent Loop BB1_1 Depth=1 ; GFX11-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, 0xffffe000, v4 +; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v4, 0xffffc000 ; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v5, vcc_lo -; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, 0xfffff000, v4 +; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, 0xffffc000, v4 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v5, vcc_lo -; GFX11-NEXT: global_load_b64 v[12:13], v[8:9], off offset:-2048 -; GFX11-NEXT: v_add_co_u32 v22, vcc_lo, v4, 0x2000 -; GFX11-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v5, vcc_lo -; GFX11-NEXT: v_add_co_u32 v24, vcc_lo, 0x1000, v4 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v5, vcc_lo -; GFX11-NEXT: global_load_b64 v[26:27], v[22:23], off offset:-4096 -; GFX11-NEXT: v_add_co_u32 v28, vcc_lo, 0x2000, v4 -; GFX11-NEXT: s_clause 0x6 -; GFX11-NEXT: global_load_b64 v[24:25], v[24:25], off offset:2048 -; GFX11-NEXT: global_load_b64 v[8:9], v[8:9], off +; GFX11-NEXT: global_load_b64 v[14:15], v[8:9], off offset:-4096 +; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, 0xffffd000, v4 +; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, -1, v5, vcc_lo +; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v4, 0xffffe000 ; GFX11-NEXT: global_load_b64 v[10:11], v[10:11], off offset:-2048 -; GFX11-NEXT: global_load_b64 v[14:15], v[4:5], off offset:-4096 -; GFX11-NEXT: global_load_b64 v[16:17], v[4:5], off offset:-2048 -; GFX11-NEXT: global_load_b64 v[18:19], v[4:5], off -; GFX11-NEXT: global_load_b64 v[20:21], v[4:5], off offset:2048 -; GFX11-NEXT: v_add_co_ci_u32_e64 v29, null, 0, v5, vcc_lo +; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, -1, v5, vcc_lo +; GFX11-NEXT: global_load_b64 v[12:13], v[12:13], off offset:-2048 +; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, 0xffffe000, v4 ; GFX11-NEXT: s_clause 0x1 -; GFX11-NEXT: global_load_b64 v[22:23], v[22:23], off -; GFX11-NEXT: global_load_b64 v[28:29], v[28:29], off offset:2048 +; GFX11-NEXT: global_load_b64 v[20:21], v[16:17], off offset:-4096 +; GFX11-NEXT: global_load_b64 v[8:9], v[8:9], off +; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, -1, v5, vcc_lo +; GFX11-NEXT: v_add_co_u32 v22, vcc_lo, 0xfffff000, v4 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v23, null, -1, v5, vcc_lo +; GFX11-NEXT: s_clause 0x5 +; GFX11-NEXT: global_load_b64 v[18:19], v[18:19], off offset:-2048 +; GFX11-NEXT: global_load_b64 v[16:17], v[16:17], off +; GFX11-NEXT: global_load_b64 v[22:23], v[22:23], off offset:-2048 +; GFX11-NEXT: global_load_b64 v[24:25], v[4:5], off offset:-4096 +; GFX11-NEXT: global_load_b64 v[26:27], v[4:5], off offset:-2048 +; GFX11-NEXT: global_load_b64 v[28:29], v[4:5], off ; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x10000, v4 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo ; GFX11-NEXT: s_addk_i32 s1, 0x2000 ; GFX11-NEXT: s_cmp_gt_u32 s1, 0x3fffff ; GFX11-NEXT: s_waitcnt vmcnt(10) -; GFX11-NEXT: v_add_co_u32 v2, s0, v12, v2 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v13, v3, s0 -; GFX11-NEXT: s_waitcnt vmcnt(7) -; GFX11-NEXT: v_add_co_u32 v2, s0, v8, v2 +; GFX11-NEXT: v_add_co_u32 v2, s0, v14, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v9, v3, s0 -; GFX11-NEXT: s_waitcnt vmcnt(6) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v15, v3, s0 +; GFX11-NEXT: s_waitcnt vmcnt(9) ; GFX11-NEXT: v_add_co_u32 v2, s0, v10, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v11, v3, s0 +; GFX11-NEXT: s_waitcnt vmcnt(6) +; GFX11-NEXT: v_add_co_u32 v2, s0, v8, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v9, v3, s0 +; GFX11-NEXT: v_add_co_u32 v2, s0, v12, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v13, v3, s0 +; GFX11-NEXT: v_add_co_u32 v2, s0, v20, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v21, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(5) -; GFX11-NEXT: v_add_co_u32 v2, s0, v14, v2 +; GFX11-NEXT: v_add_co_u32 v2, s0, v18, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v15, v3, s0 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v19, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(4) ; GFX11-NEXT: v_add_co_u32 v2, s0, v16, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v17, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(3) -; GFX11-NEXT: v_add_co_u32 v2, s0, v18, v2 +; GFX11-NEXT: v_add_co_u32 v2, s0, v22, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v19, v3, s0 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v23, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(2) -; GFX11-NEXT: v_add_co_u32 v2, s0, v20, v2 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v21, v3, s0 -; GFX11-NEXT: v_add_co_u32 v2, s0, v26, v2 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v27, v3, s0 ; GFX11-NEXT: v_add_co_u32 v2, s0, v24, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v25, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(1) -; GFX11-NEXT: v_add_co_u32 v2, s0, v22, v2 +; GFX11-NEXT: v_add_co_u32 v2, s0, v26, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v23, v3, s0 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v27, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v28, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) diff --git a/llvm/test/CodeGen/AMDGPU/same-lds-variable-multiple-use-in-one-phi-node.ll b/llvm/test/CodeGen/AMDGPU/same-lds-variable-multiple-use-in-one-phi-node.ll new file mode 100644 index 0000000000000..35a9bee03411f --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/same-lds-variable-multiple-use-in-one-phi-node.ll @@ -0,0 +1,51 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -amdgpu-lower-module-lds %s -o - | FileCheck %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-lower-module-lds %s -o - | FileCheck %s + +@lds = internal unnamed_addr addrspace(3) global [6144 x half] poison, align 2 + +define amdgpu_kernel void @test(ptr addrspace(1) %out) { +; CHECK-LABEL: define amdgpu_kernel void @test( +; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: switch i32 0, label %[[BB_3:.*]] [ +; CHECK-NEXT: i32 18, label %[[BB_2:.*]] +; CHECK-NEXT: i32 1, label %[[BB_2]] +; CHECK-NEXT: i32 0, label %[[BB_3]] +; CHECK-NEXT: ] +; CHECK: [[BB_1:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.test.lds to ptr +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[TMP0]] to i64 +; CHECK-NEXT: switch i32 0, label %[[BB_3]] [ +; CHECK-NEXT: i32 18, label %[[BB_2]] +; CHECK-NEXT: i32 1, label %[[BB_2]] +; CHECK-NEXT: i32 0, label %[[BB_3]] +; CHECK-NEXT: ] +; CHECK: [[BB_2]]: +; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[TMP1]], %[[BB_1]] ], [ [[TMP1]], %[[BB_1]] ], [ 10, %[[ENTRY]] ], [ 10, %[[ENTRY]] ] +; CHECK-NEXT: store i64 [[PHI]], ptr addrspace(1) [[OUT]], align 8 +; CHECK-NEXT: br label %[[BB_3]] +; CHECK: [[BB_3]]: +; CHECK-NEXT: ret void +; +entry: + switch i32 0, label %bb.3 [ + i32 18, label %bb.2 + i32 1, label %bb.2 + i32 0, label %bb.3 + ] +bb.1: + switch i32 0, label %bb.3 [ + i32 18, label %bb.2 + i32 1, label %bb.2 + i32 0, label %bb.3 + ] + +bb.2: + %phi = phi i64 [ ptrtoint (ptr addrspacecast (ptr addrspace(3) @lds to ptr) to i64), %bb.1 ], [ ptrtoint (ptr addrspacecast (ptr addrspace(3) @lds to ptr) to i64), %bb.1 ], [10, %entry], [10, %entry] + store i64 %phi, ptr addrspace(1) %out, align 8 + br label %bb.3 + +bb.3: + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll b/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll index c0c1763d54cc0..67dae136afb72 100644 --- a/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll +++ b/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll @@ -146,7 +146,7 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x ; CHECK-NEXT: [[S_ASHR_I32_5:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_4]], 31, implicit-def dead $scc ; CHECK-NEXT: undef [[S_ADD_U32_18:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_4]], implicit-def $scc ; CHECK-NEXT: [[S_ADD_U32_18:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_5]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_18]], 168, 0 :: (invariant load (s32) from %ir.276, align 8, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_18]], 168, 0 :: (invariant load (s32) from %ir.275, align 8, addrspace 4) ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM14:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_8]], 576, 0 :: (invariant load (s128) from %ir.159, addrspace 4) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN11:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM13]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN12:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM9]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) @@ -169,7 +169,7 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x ; CHECK-NEXT: [[S_ADD_I32_14:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM4]], -467, implicit-def dead $scc ; CHECK-NEXT: undef [[S_ADD_U32_19:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_5]], implicit-def $scc ; CHECK-NEXT: [[S_ADD_U32_19:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_6]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_19]], 168, 0 :: (invariant load (s64) from %ir.285, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_19]], 168, 0 :: (invariant load (s64) from %ir.284, addrspace 4) ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET2:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[S_LOAD_DWORDX4_IMM16]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET3:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[S_LOAD_DWORDX4_IMM17]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM18:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_12]], 0, 0 :: (invariant load (s128) from %ir.207, addrspace 4) @@ -190,20 +190,20 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x ; CHECK-NEXT: [[S_ADD_I32_15:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM5]], -468, implicit-def dead $scc ; CHECK-NEXT: undef [[S_ADD_U32_20:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_6]], implicit-def $scc ; CHECK-NEXT: [[S_ADD_U32_20:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_7]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM2:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_20]], 168, 0 :: (invariant load (s64) from %ir.296, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM2:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_20]], 168, 0 :: (invariant load (s64) from %ir.295, addrspace 4) ; CHECK-NEXT: [[COPY17:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]] ; CHECK-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LOAD_DWORDX2_IMM2]].sub1, 65535, implicit-def dead $scc ; CHECK-NEXT: [[COPY17:%[0-9]+]].sub0:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM2]].sub0 ; CHECK-NEXT: [[COPY17:%[0-9]+]].sub1:sgpr_128 = COPY [[S_AND_B32_1]] ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM6:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[COPY17]], 0, 0 :: (dereferenceable invariant load (s32)) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM22:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_16]], 160, 0 :: (invariant load (s128) from %ir.259, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM22:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_16]], 160, 0 :: (invariant load (s128) from %ir.258, addrspace 4) ; CHECK-NEXT: [[S_LSHL_B32_7:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY14]], 3, implicit-def dead $scc - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM23:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_17]], 160, 0 :: (invariant load (s128) from %ir.268, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM23:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_17]], 160, 0 :: (invariant load (s128) from %ir.267, addrspace 4) ; CHECK-NEXT: [[S_ASHR_I32_8:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_7]], 31, implicit-def dead $scc ; CHECK-NEXT: [[S_ADD_I32_16:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM6]], -469, implicit-def dead $scc ; CHECK-NEXT: undef [[S_ADD_U32_21:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_7]], implicit-def $scc ; CHECK-NEXT: [[S_ADD_U32_21:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_8]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_21]], 168, 0 :: (invariant load (s32) from %ir.308, align 8, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_21]], 168, 0 :: (invariant load (s32) from %ir.307, align 8, addrspace 4) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN21:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM22]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN22:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM23]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM23]] @@ -221,13 +221,13 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x ; CHECK-NEXT: [[S_ADD_I32_22:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM7]], -473, implicit-def dead $scc ; CHECK-NEXT: undef [[S_ADD_U32_22:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_]], implicit-def $scc ; CHECK-NEXT: [[S_ADD_U32_22:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM24:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_22]], 96, 0 :: (invariant load (s128) from %ir.326, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM24:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_22]], 96, 0 :: (invariant load (s128) from %ir.325, addrspace 4) ; CHECK-NEXT: undef [[S_ADD_U32_23:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_1]], implicit-def $scc ; CHECK-NEXT: [[S_ADD_U32_23:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM25:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_23]], 96, 0 :: (invariant load (s128) from %ir.332, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM25:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_23]], 96, 0 :: (invariant load (s128) from %ir.331, addrspace 4) ; CHECK-NEXT: undef [[S_ADD_U32_24:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_2]], implicit-def $scc ; CHECK-NEXT: [[S_ADD_U32_24:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM26:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_24]], 96, 0 :: (invariant load (s128) from %ir.338, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM26:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_24]], 96, 0 :: (invariant load (s128) from %ir.337, addrspace 4) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN23:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM24]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN24:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM25]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN25:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM26]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) diff --git a/llvm/test/CodeGen/AMDGPU/strict_fadd.f16.ll b/llvm/test/CodeGen/AMDGPU/strict_fadd.f16.ll index a094631267e64..e9e4d5ebed41c 100644 --- a/llvm/test/CodeGen/AMDGPU/strict_fadd.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/strict_fadd.f16.ll @@ -1,17 +1,35 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji < %s | FileCheck -check-prefixes=GCN,GFX8 %s -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10 %s -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX11-TRUE16 %s -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX11-FAKE16 %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9,GFX9-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9,GFX9-GISEL %s + +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji < %s | FileCheck -check-prefixes=GFX8,GFX8-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji < %s | FileCheck -check-prefixes=GFX8,GFX8-GISEL %s + +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10,GFX10-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10,GFX10-GISEL %s + +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG-TRUE16 %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG-FAKE16 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-TRUE16 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-FAKE16 %s + +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12,GFX12-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12,GFX12-GISEL %s + ; FIXME: promotion not handled without f16 insts define half @v_constained_fadd_f16_fpexcept_strict(half %x, half %y) #0 { -; GCN-LABEL: v_constained_fadd_f16_fpexcept_strict: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_add_f16_e32 v0, v0, v1 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: v_constained_fadd_f16_fpexcept_strict: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_constained_fadd_f16_fpexcept_strict: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_constained_fadd_f16_fpexcept_strict: ; GFX10: ; %bb.0: @@ -19,27 +37,55 @@ define half @v_constained_fadd_f16_fpexcept_strict(half %x, half %y) #0 { ; GFX10-NEXT: v_add_f16_e32 v0, v0, v1 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-TRUE16-LABEL: v_constained_fadd_f16_fpexcept_strict: -; GFX11-TRUE16: ; %bb.0: -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_add_f16_e32 v0.l, v0.l, v1.l -; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; GFX11-SDAG-TRUE16-LABEL: v_constained_fadd_f16_fpexcept_strict: +; GFX11-SDAG-TRUE16: ; %bb.0: +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_add_f16_e32 v0.l, v0.l, v1.l +; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-FAKE16-LABEL: v_constained_fadd_f16_fpexcept_strict: -; GFX11-FAKE16: ; %bb.0: -; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_add_f16_e32 v0, v0, v1 -; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX11-SDAG-FAKE16-LABEL: v_constained_fadd_f16_fpexcept_strict: +; GFX11-SDAG-FAKE16: ; %bb.0: +; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SDAG-FAKE16-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX11-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-GISEL-TRUE16-LABEL: v_constained_fadd_f16_fpexcept_strict: +; GFX11-GISEL-TRUE16: ; %bb.0: +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_add_f16_e32 v0.l, v0.l, v1.l +; GFX11-GISEL-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-GISEL-FAKE16-LABEL: v_constained_fadd_f16_fpexcept_strict: +; GFX11-GISEL-FAKE16: ; %bb.0: +; GFX11-GISEL-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-FAKE16-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX11-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fadd_f16_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call half @llvm.experimental.constrained.fadd.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret half %val } define half @v_constained_fadd_f16_fpexcept_ignore(half %x, half %y) #0 { -; GCN-LABEL: v_constained_fadd_f16_fpexcept_ignore: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_add_f16_e32 v0, v0, v1 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: v_constained_fadd_f16_fpexcept_ignore: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_constained_fadd_f16_fpexcept_ignore: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_constained_fadd_f16_fpexcept_ignore: ; GFX10: ; %bb.0: @@ -47,12 +93,44 @@ define half @v_constained_fadd_f16_fpexcept_ignore(half %x, half %y) #0 { ; GFX10-NEXT: v_add_f16_e32 v0, v0, v1 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; +; GFX11-SDAG-TRUE16-LABEL: v_constained_fadd_f16_fpexcept_ignore: +; GFX11-SDAG-TRUE16: ; %bb.0: +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_add_f16_e32 v0.l, v0.l, v1.l +; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SDAG-FAKE16-LABEL: v_constained_fadd_f16_fpexcept_ignore: +; GFX11-SDAG-FAKE16: ; %bb.0: +; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SDAG-FAKE16-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX11-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-GISEL-TRUE16-LABEL: v_constained_fadd_f16_fpexcept_ignore: +; GFX11-GISEL-TRUE16: ; %bb.0: +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_add_f16_e32 v0.l, v0.l, v1.l +; GFX11-GISEL-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-GISEL-FAKE16-LABEL: v_constained_fadd_f16_fpexcept_ignore: +; GFX11-GISEL-FAKE16: ; %bb.0: +; GFX11-GISEL-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-FAKE16-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX11-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fadd_f16_fpexcept_ignore: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] ; GFX11-TRUE16-LABEL: v_constained_fadd_f16_fpexcept_ignore: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_add_f16_e32 v0.l, v0.l, v1.l ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] -; ; GFX11-FAKE16-LABEL: v_constained_fadd_f16_fpexcept_ignore: ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -63,11 +141,17 @@ define half @v_constained_fadd_f16_fpexcept_ignore(half %x, half %y) #0 { } define half @v_constained_fadd_f16_fpexcept_maytrap(half %x, half %y) #0 { -; GCN-LABEL: v_constained_fadd_f16_fpexcept_maytrap: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_add_f16_e32 v0, v0, v1 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: v_constained_fadd_f16_fpexcept_maytrap: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_constained_fadd_f16_fpexcept_maytrap: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_constained_fadd_f16_fpexcept_maytrap: ; GFX10: ; %bb.0: @@ -75,12 +159,44 @@ define half @v_constained_fadd_f16_fpexcept_maytrap(half %x, half %y) #0 { ; GFX10-NEXT: v_add_f16_e32 v0, v0, v1 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; +; GFX11-SDAG-TRUE16-LABEL: v_constained_fadd_f16_fpexcept_maytrap: +; GFX11-SDAG-TRUE16: ; %bb.0: +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_add_f16_e32 v0.l, v0.l, v1.l +; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SDAG-FAKE16-LABEL: v_constained_fadd_f16_fpexcept_maytrap: +; GFX11-SDAG-FAKE16: ; %bb.0: +; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SDAG-FAKE16-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX11-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-GISEL-TRUE16-LABEL: v_constained_fadd_f16_fpexcept_maytrap: +; GFX11-GISEL-TRUE16: ; %bb.0: +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_add_f16_e32 v0.l, v0.l, v1.l +; GFX11-GISEL-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-GISEL-FAKE16-LABEL: v_constained_fadd_f16_fpexcept_maytrap: +; GFX11-GISEL-FAKE16: ; %bb.0: +; GFX11-GISEL-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-FAKE16-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX11-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fadd_f16_fpexcept_maytrap: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] ; GFX11-TRUE16-LABEL: v_constained_fadd_f16_fpexcept_maytrap: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_add_f16_e32 v0.l, v0.l, v1.l ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] -; ; GFX11-FAKE16-LABEL: v_constained_fadd_f16_fpexcept_maytrap: ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -97,19 +213,43 @@ define <2 x half> @v_constained_fadd_v2f16_fpexcept_strict(<2 x half> %x, <2 x h ; GFX9-NEXT: v_pk_add_f16 v0, v0, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: v_constained_fadd_v2f16_fpexcept_strict: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_add_f16_e32 v0, v0, v1 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX8-SDAG-LABEL: v_constained_fadd_v2f16_fpexcept_strict: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: v_add_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-SDAG-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX8-SDAG-NEXT: v_or_b32_e32 v0, v0, v2 +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: v_constained_fadd_v2f16_fpexcept_strict: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_add_f16_e32 v2, v0, v1 +; GFX8-GISEL-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-GISEL-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_constained_fadd_v2f16_fpexcept_strict: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_pk_add_f16 v0, v0, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_constained_fadd_v2f16_fpexcept_strict: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_add_f16 v0, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX10PLUS-LABEL: v_constained_fadd_v2f16_fpexcept_strict: -; GFX10PLUS: ; %bb.0: -; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10PLUS-NEXT: v_pk_add_f16 v0, v0, v1 -; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; GFX12-LABEL: v_constained_fadd_v2f16_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_pk_add_f16 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half> %x, <2 x half> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret <2 x half> %val } @@ -121,19 +261,43 @@ define <2 x half> @v_constained_fadd_v2f16_fpexcept_ignore(<2 x half> %x, <2 x h ; GFX9-NEXT: v_pk_add_f16 v0, v0, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: v_constained_fadd_v2f16_fpexcept_ignore: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_add_f16_e32 v0, v0, v1 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX8-SDAG-LABEL: v_constained_fadd_v2f16_fpexcept_ignore: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: v_add_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-SDAG-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX8-SDAG-NEXT: v_or_b32_e32 v0, v0, v2 +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: v_constained_fadd_v2f16_fpexcept_ignore: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_add_f16_e32 v2, v0, v1 +; GFX8-GISEL-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-GISEL-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_constained_fadd_v2f16_fpexcept_ignore: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_pk_add_f16 v0, v0, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] ; -; GFX10PLUS-LABEL: v_constained_fadd_v2f16_fpexcept_ignore: -; GFX10PLUS: ; %bb.0: -; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10PLUS-NEXT: v_pk_add_f16 v0, v0, v1 -; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; GFX11-LABEL: v_constained_fadd_v2f16_fpexcept_ignore: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_add_f16 v0, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fadd_v2f16_fpexcept_ignore: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_pk_add_f16 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half> %x, <2 x half> %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x half> %val } @@ -145,54 +309,142 @@ define <2 x half> @v_constained_fadd_v2f16_fpexcept_maytrap(<2 x half> %x, <2 x ; GFX9-NEXT: v_pk_add_f16 v0, v0, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: v_constained_fadd_v2f16_fpexcept_maytrap: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_add_f16_e32 v0, v0, v1 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX8-SDAG-LABEL: v_constained_fadd_v2f16_fpexcept_maytrap: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: v_add_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-SDAG-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX8-SDAG-NEXT: v_or_b32_e32 v0, v0, v2 +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: v_constained_fadd_v2f16_fpexcept_maytrap: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_add_f16_e32 v2, v0, v1 +; GFX8-GISEL-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-GISEL-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] ; -; GFX10PLUS-LABEL: v_constained_fadd_v2f16_fpexcept_maytrap: -; GFX10PLUS: ; %bb.0: -; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10PLUS-NEXT: v_pk_add_f16 v0, v0, v1 -; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; GFX10-LABEL: v_constained_fadd_v2f16_fpexcept_maytrap: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_pk_add_f16 v0, v0, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_constained_fadd_v2f16_fpexcept_maytrap: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_add_f16 v0, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fadd_v2f16_fpexcept_maytrap: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_pk_add_f16 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half> %x, <2 x half> %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") ret <2 x half> %val } define <3 x half> @v_constained_fadd_v3f16_fpexcept_strict(<3 x half> %x, <3 x half> %y) #0 { -; GFX9-LABEL: v_constained_fadd_v3f16_fpexcept_strict: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_pk_add_f16 v0, v0, v2 -; GFX9-NEXT: v_add_f16_e32 v1, v1, v3 -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX9-SDAG-LABEL: v_constained_fadd_v3f16_fpexcept_strict: +; GFX9-SDAG: ; %bb.0: +; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-SDAG-NEXT: v_pk_add_f16 v0, v0, v2 +; GFX9-SDAG-NEXT: v_add_f16_e32 v1, v1, v3 +; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: v_constained_fadd_v3f16_fpexcept_strict: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_add_f16_e32 v0, v0, v2 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX8-NEXT: v_add_f16_e32 v1, v1, v3 -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX9-GISEL-LABEL: v_constained_fadd_v3f16_fpexcept_strict: +; GFX9-GISEL: ; %bb.0: +; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-GISEL-NEXT: v_pk_add_f16 v0, v0, v2 +; GFX9-GISEL-NEXT: v_pk_add_f16 v1, v1, v3 +; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31] ; -; GFX10-LABEL: v_constained_fadd_v3f16_fpexcept_strict: -; GFX10: ; %bb.0: -; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_pk_add_f16 v0, v0, v2 -; GFX10-NEXT: v_add_f16_e32 v1, v1, v3 -; GFX10-NEXT: s_setpc_b64 s[30:31] +; GFX8-SDAG-LABEL: v_constained_fadd_v3f16_fpexcept_strict: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: v_add_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-SDAG-NEXT: v_add_f16_e32 v0, v0, v2 +; GFX8-SDAG-NEXT: v_or_b32_e32 v0, v0, v4 +; GFX8-SDAG-NEXT: v_add_f16_e32 v1, v1, v3 +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: v_constained_fadd_v3f16_fpexcept_strict: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_add_f16_e32 v4, v0, v2 +; GFX8-GISEL-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-GISEL-NEXT: v_add_f16_e32 v1, v1, v3 +; GFX8-GISEL-NEXT: v_or_b32_e32 v0, v4, v0 +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-SDAG-LABEL: v_constained_fadd_v3f16_fpexcept_strict: +; GFX10-SDAG: ; %bb.0: +; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-SDAG-NEXT: v_pk_add_f16 v0, v0, v2 +; GFX10-SDAG-NEXT: v_add_f16_e32 v1, v1, v3 +; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31] ; +; GFX10-GISEL-LABEL: v_constained_fadd_v3f16_fpexcept_strict: +; GFX10-GISEL: ; %bb.0: +; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-GISEL-NEXT: v_pk_add_f16 v0, v0, v2 +; GFX10-GISEL-NEXT: v_pk_add_f16 v1, v1, v3 +; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SDAG-TRUE16-LABEL: v_constained_fadd_v3f16_fpexcept_strict: +; GFX11-SDAG-TRUE16: ; %bb.0: +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_pk_add_f16 v0, v0, v2 +; GFX11-SDAG-TRUE16-NEXT: v_add_f16_e32 v1.l, v1.l, v3.l +; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SDAG-FAKE16-LABEL: v_constained_fadd_v3f16_fpexcept_strict: +; GFX11-SDAG-FAKE16: ; %bb.0: +; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SDAG-FAKE16-NEXT: v_pk_add_f16 v0, v0, v2 +; GFX11-SDAG-FAKE16-NEXT: v_add_f16_e32 v1, v1, v3 +; GFX11-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-GISEL-LABEL: v_constained_fadd_v3f16_fpexcept_strict: +; GFX11-GISEL: ; %bb.0: +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-NEXT: v_pk_add_f16 v0, v0, v2 +; GFX11-GISEL-NEXT: v_pk_add_f16 v1, v1, v3 +; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-SDAG-LABEL: v_constained_fadd_v3f16_fpexcept_strict: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-SDAG-NEXT: s_wait_expcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0 +; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-NEXT: v_pk_add_f16 v0, v0, v2 +; GFX12-SDAG-NEXT: v_add_f16_e32 v1, v1, v3 +; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-GISEL-LABEL: v_constained_fadd_v3f16_fpexcept_strict: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-GISEL-NEXT: s_wait_expcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0 +; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-NEXT: v_pk_add_f16 v0, v0, v2 +; GFX12-GISEL-NEXT: v_pk_add_f16 v1, v1, v3 +; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] ; GFX11-TRUE16-LABEL: v_constained_fadd_v3f16_fpexcept_strict: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, v0, v2 ; GFX11-TRUE16-NEXT: v_add_f16_e32 v1.l, v1.l, v3.l ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] -; ; GFX11-FAKE16-LABEL: v_constained_fadd_v3f16_fpexcept_strict: ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -205,40 +457,127 @@ define <3 x half> @v_constained_fadd_v3f16_fpexcept_strict(<3 x half> %x, <3 x h ; FIXME: Scalarized define <4 x half> @v_constained_fadd_v4f16_fpexcept_strict(<4 x half> %x, <4 x half> %y) #0 { -; GFX9-LABEL: v_constained_fadd_v4f16_fpexcept_strict: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_add_f16_sdwa v4, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX9-NEXT: v_add_f16_sdwa v5, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX9-NEXT: v_add_f16_e32 v1, v1, v3 -; GFX9-NEXT: v_add_f16_e32 v0, v0, v2 -; GFX9-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-NEXT: v_perm_b32 v0, v5, v0, s4 -; GFX9-NEXT: v_perm_b32 v1, v4, v1, s4 -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX9-SDAG-LABEL: v_constained_fadd_v4f16_fpexcept_strict: +; GFX9-SDAG: ; %bb.0: +; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-SDAG-NEXT: v_add_f16_sdwa v4, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX9-SDAG-NEXT: v_add_f16_sdwa v5, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX9-SDAG-NEXT: v_add_f16_e32 v1, v1, v3 +; GFX9-SDAG-NEXT: v_add_f16_e32 v0, v0, v2 +; GFX9-SDAG-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-SDAG-NEXT: v_perm_b32 v0, v5, v0, s4 +; GFX9-SDAG-NEXT: v_perm_b32 v1, v4, v1, s4 +; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: v_constained_fadd_v4f16_fpexcept_strict: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_add_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_add_f16_e32 v1, v1, v3 -; GFX8-NEXT: v_add_f16_e32 v0, v0, v2 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v5 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX9-GISEL-LABEL: v_constained_fadd_v4f16_fpexcept_strict: +; GFX9-GISEL: ; %bb.0: +; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-GISEL-NEXT: v_pk_add_f16 v0, v0, v2 +; GFX9-GISEL-NEXT: v_pk_add_f16 v1, v1, v3 +; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31] ; -; GFX10-LABEL: v_constained_fadd_v4f16_fpexcept_strict: -; GFX10: ; %bb.0: -; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_f16_sdwa v4, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX10-NEXT: v_add_f16_sdwa v5, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX10-NEXT: v_add_f16_e32 v0, v0, v2 -; GFX10-NEXT: v_add_f16_e32 v1, v1, v3 -; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 -; GFX10-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 -; GFX10-NEXT: s_setpc_b64 s[30:31] +; GFX8-SDAG-LABEL: v_constained_fadd_v4f16_fpexcept_strict: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: v_add_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-SDAG-NEXT: v_add_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-SDAG-NEXT: v_add_f16_e32 v1, v1, v3 +; GFX8-SDAG-NEXT: v_add_f16_e32 v0, v0, v2 +; GFX8-SDAG-NEXT: v_or_b32_e32 v0, v0, v5 +; GFX8-SDAG-NEXT: v_or_b32_e32 v1, v1, v4 +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: v_constained_fadd_v4f16_fpexcept_strict: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_add_f16_e32 v4, v0, v2 +; GFX8-GISEL-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-GISEL-NEXT: v_add_f16_e32 v2, v1, v3 +; GFX8-GISEL-NEXT: v_add_f16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-GISEL-NEXT: v_or_b32_e32 v0, v4, v0 +; GFX8-GISEL-NEXT: v_or_b32_e32 v1, v2, v1 +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-SDAG-LABEL: v_constained_fadd_v4f16_fpexcept_strict: +; GFX10-SDAG: ; %bb.0: +; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-SDAG-NEXT: v_add_f16_sdwa v4, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX10-SDAG-NEXT: v_add_f16_sdwa v5, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX10-SDAG-NEXT: v_add_f16_e32 v0, v0, v2 +; GFX10-SDAG-NEXT: v_add_f16_e32 v1, v1, v3 +; GFX10-SDAG-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 +; GFX10-SDAG-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 +; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31] ; +; GFX10-GISEL-LABEL: v_constained_fadd_v4f16_fpexcept_strict: +; GFX10-GISEL: ; %bb.0: +; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-GISEL-NEXT: v_pk_add_f16 v0, v0, v2 +; GFX10-GISEL-NEXT: v_pk_add_f16 v1, v1, v3 +; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SDAG-TRUE16-LABEL: v_constained_fadd_v4f16_fpexcept_strict: +; GFX11-SDAG-TRUE16: ; %bb.0: +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_add_f16_e32 v1.h, v1.h, v3.h +; GFX11-SDAG-TRUE16-NEXT: v_add_f16_e32 v0.h, v0.h, v2.h +; GFX11-SDAG-TRUE16-NEXT: v_add_f16_e32 v0.l, v0.l, v2.l +; GFX11-SDAG-TRUE16-NEXT: v_add_f16_e32 v1.l, v1.l, v3.l +; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SDAG-FAKE16-LABEL: v_constained_fadd_v4f16_fpexcept_strict: +; GFX11-SDAG-FAKE16: ; %bb.0: +; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 +; GFX11-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v2 +; GFX11-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; GFX11-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX11-SDAG-FAKE16-NEXT: v_add_f16_e32 v1, v1, v3 +; GFX11-SDAG-FAKE16-NEXT: v_add_f16_e32 v0, v0, v2 +; GFX11-SDAG-FAKE16-NEXT: v_add_f16_e32 v2, v6, v5 +; GFX11-SDAG-FAKE16-NEXT: v_add_f16_e32 v3, v7, v4 +; GFX11-SDAG-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-SDAG-FAKE16-NEXT: v_perm_b32 v1, v3, v1, 0x5040100 +; GFX11-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-GISEL-LABEL: v_constained_fadd_v4f16_fpexcept_strict: +; GFX11-GISEL: ; %bb.0: +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-NEXT: v_pk_add_f16 v0, v0, v2 +; GFX11-GISEL-NEXT: v_pk_add_f16 v1, v1, v3 +; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-SDAG-LABEL: v_constained_fadd_v4f16_fpexcept_strict: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-SDAG-NEXT: s_wait_expcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0 +; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v4, 16, v3 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v5, 16, v2 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX12-SDAG-NEXT: v_add_f16_e32 v1, v1, v3 +; GFX12-SDAG-NEXT: v_add_f16_e32 v0, v0, v2 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX12-SDAG-NEXT: v_add_f16_e32 v2, v6, v5 +; GFX12-SDAG-NEXT: v_add_f16_e32 v3, v7, v4 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-SDAG-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX12-SDAG-NEXT: v_perm_b32 v1, v3, v1, 0x5040100 +; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-GISEL-LABEL: v_constained_fadd_v4f16_fpexcept_strict: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-GISEL-NEXT: s_wait_expcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0 +; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-NEXT: v_pk_add_f16 v0, v0, v2 +; GFX12-GISEL-NEXT: v_pk_add_f16 v1, v1, v3 +; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] ; GFX11-TRUE16-LABEL: v_constained_fadd_v4f16_fpexcept_strict: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -247,7 +586,6 @@ define <4 x half> @v_constained_fadd_v4f16_fpexcept_strict(<4 x half> %x, <4 x h ; GFX11-TRUE16-NEXT: v_add_f16_e32 v0.l, v0.l, v2.l ; GFX11-TRUE16-NEXT: v_add_f16_e32 v1.l, v1.l, v3.l ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] -; ; GFX11-FAKE16-LABEL: v_constained_fadd_v4f16_fpexcept_strict: ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -267,22 +605,53 @@ define <4 x half> @v_constained_fadd_v4f16_fpexcept_strict(<4 x half> %x, <4 x h } define amdgpu_ps half @s_constained_fadd_f16_fpexcept_strict(half inreg %x, half inreg %y) #0 { -; GCN-LABEL: s_constained_fadd_f16_fpexcept_strict: -; GCN: ; %bb.0: -; GCN-NEXT: v_mov_b32_e32 v0, s3 -; GCN-NEXT: v_add_f16_e32 v0, s2, v0 -; GCN-NEXT: ; return to shader part epilog +; GFX9-LABEL: s_constained_fadd_f16_fpexcept_strict: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_add_f16_e32 v0, s2, v0 +; GFX9-NEXT: ; return to shader part epilog +; +; GFX8-LABEL: s_constained_fadd_f16_fpexcept_strict: +; GFX8: ; %bb.0: +; GFX8-NEXT: v_mov_b32_e32 v0, s3 +; GFX8-NEXT: v_add_f16_e32 v0, s2, v0 +; GFX8-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: s_constained_fadd_f16_fpexcept_strict: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add_f16_e64 v0, s2, s3 ; GFX10-NEXT: ; return to shader part epilog ; +; GFX11-SDAG-TRUE16-LABEL: s_constained_fadd_f16_fpexcept_strict: +; GFX11-SDAG-TRUE16: ; %bb.0: +; GFX11-SDAG-TRUE16-NEXT: v_add_f16_e64 v0.l, s2, s3 +; GFX11-SDAG-TRUE16-NEXT: ; return to shader part epilog +; +; GFX11-SDAG-FAKE16-LABEL: s_constained_fadd_f16_fpexcept_strict: +; GFX11-SDAG-FAKE16: ; %bb.0: +; GFX11-SDAG-FAKE16-NEXT: v_add_f16_e64 v0, s2, s3 +; GFX11-SDAG-FAKE16-NEXT: ; return to shader part epilog +; +; GFX11-GISEL-TRUE16-LABEL: s_constained_fadd_f16_fpexcept_strict: +; GFX11-GISEL-TRUE16: ; %bb.0: +; GFX11-GISEL-TRUE16-NEXT: v_add_f16_e64 v0.l, s2, s3 +; GFX11-GISEL-TRUE16-NEXT: ; return to shader part epilog +; +; GFX11-GISEL-FAKE16-LABEL: s_constained_fadd_f16_fpexcept_strict: +; GFX11-GISEL-FAKE16: ; %bb.0: +; GFX11-GISEL-FAKE16-NEXT: v_add_f16_e64 v0, s2, s3 +; GFX11-GISEL-FAKE16-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: s_constained_fadd_f16_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_add_f16 s0, s2, s3 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_3) +; GFX12-NEXT: v_mov_b32_e32 v0, s0 +; GFX12-NEXT: ; return to shader part epilog ; GFX11-TRUE16-LABEL: s_constained_fadd_f16_fpexcept_strict: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: v_add_f16_e64 v0.l, s2, s3 ; GFX11-TRUE16-NEXT: ; return to shader part epilog -; ; GFX11-FAKE16-LABEL: s_constained_fadd_f16_fpexcept_strict: ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: v_add_f16_e64 v0, s2, s3 @@ -298,22 +667,44 @@ define amdgpu_ps <2 x half> @s_constained_fadd_v2f16_fpexcept_strict(<2 x half> ; GFX9-NEXT: v_pk_add_f16 v0, s2, v0 ; GFX9-NEXT: ; return to shader part epilog ; -; GFX8-LABEL: s_constained_fadd_v2f16_fpexcept_strict: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_lshr_b32 s0, s3, 16 -; GFX8-NEXT: s_lshr_b32 s1, s2, 16 -; GFX8-NEXT: v_mov_b32_e32 v0, s0 -; GFX8-NEXT: v_mov_b32_e32 v1, s1 -; GFX8-NEXT: v_add_f16_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX8-NEXT: v_mov_b32_e32 v1, s3 -; GFX8-NEXT: v_add_f16_e32 v1, s2, v1 -; GFX8-NEXT: v_or_b32_e32 v0, v1, v0 -; GFX8-NEXT: ; return to shader part epilog +; GFX8-SDAG-LABEL: s_constained_fadd_v2f16_fpexcept_strict: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_lshr_b32 s0, s3, 16 +; GFX8-SDAG-NEXT: s_lshr_b32 s1, s2, 16 +; GFX8-SDAG-NEXT: v_mov_b32_e32 v0, s0 +; GFX8-SDAG-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-SDAG-NEXT: v_add_f16_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-SDAG-NEXT: v_mov_b32_e32 v1, s3 +; GFX8-SDAG-NEXT: v_add_f16_e32 v1, s2, v1 +; GFX8-SDAG-NEXT: v_or_b32_e32 v0, v1, v0 +; GFX8-SDAG-NEXT: ; return to shader part epilog +; +; GFX8-GISEL-LABEL: s_constained_fadd_v2f16_fpexcept_strict: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_lshr_b32 s0, s2, 16 +; GFX8-GISEL-NEXT: s_lshr_b32 s1, s3, 16 +; GFX8-GISEL-NEXT: v_mov_b32_e32 v0, s3 +; GFX8-GISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-GISEL-NEXT: v_mov_b32_e32 v2, s0 +; GFX8-GISEL-NEXT: v_add_f16_e32 v0, s2, v0 +; GFX8-GISEL-NEXT: v_add_f16_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-GISEL-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX8-GISEL-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: s_constained_fadd_v2f16_fpexcept_strict: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_pk_add_f16 v0, s2, s3 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: s_constained_fadd_v2f16_fpexcept_strict: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_pk_add_f16 v0, s2, s3 +; GFX11-NEXT: ; return to shader part epilog ; -; GFX10PLUS-LABEL: s_constained_fadd_v2f16_fpexcept_strict: -; GFX10PLUS: ; %bb.0: -; GFX10PLUS-NEXT: v_pk_add_f16 v0, s2, s3 -; GFX10PLUS-NEXT: ; return to shader part epilog +; GFX12-LABEL: s_constained_fadd_v2f16_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_pk_add_f16 v0, s2, s3 +; GFX12-NEXT: ; return to shader part epilog %val = call <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half> %x, <2 x half> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret <2 x half> %val } @@ -325,5 +716,3 @@ declare <4 x half> @llvm.experimental.constrained.fadd.v4f16(<4 x half>, <4 x ha attributes #0 = { strictfp } attributes #1 = { inaccessiblememonly nounwind willreturn } -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; GFX11: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/strict_fadd.f32.ll b/llvm/test/CodeGen/AMDGPU/strict_fadd.f32.ll index 2aecf5fd8753c..a039c2629c395 100644 --- a/llvm/test/CodeGen/AMDGPU/strict_fadd.f32.ll +++ b/llvm/test/CodeGen/AMDGPU/strict_fadd.f32.ll @@ -1,137 +1,383 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN %s -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10 %s -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11 %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9,GFX9-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9,GFX9-GISEL %s + +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10-GISEL %s + +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11-GISEL %s + +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12,GFX12-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12,GFX12-GISEL %s define float @v_constained_fadd_f32_fpexcept_strict(float %x, float %y) #0 { -; GCN-LABEL: v_constained_fadd_f32_fpexcept_strict: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_add_f32_e32 v0, v0, v1 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: v_constained_fadd_f32_fpexcept_strict: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_f32_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] ; ; GFX10PLUS-LABEL: v_constained_fadd_f32_fpexcept_strict: ; GFX10PLUS: ; %bb.0: ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_add_f32_e32 v0, v0, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fadd_f32_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_add_f32_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] +; GCN-SDAG-LABEL: v_constained_fadd_f32_fpexcept_strict: +; GCN-SDAG: ; %bb.0: +; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-SDAG-NEXT: v_add_f32_e32 v0, v0, v1 +; GCN-SDAG-NEXT: s_setpc_b64 s[30:31] +; GCN-GISEL-LABEL: v_constained_fadd_f32_fpexcept_strict: +; GCN-GISEL: ; %bb.0: +; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-GISEL-NEXT: v_add_f32_e32 v0, v0, v1 +; GCN-GISEL-NEXT: s_setpc_b64 s[30:31] %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret float %val } define float @v_constained_fadd_f32_fpexcept_ignore(float %x, float %y) #0 { -; GCN-LABEL: v_constained_fadd_f32_fpexcept_ignore: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_add_f32_e32 v0, v0, v1 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: v_constained_fadd_f32_fpexcept_ignore: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_f32_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] ; ; GFX10PLUS-LABEL: v_constained_fadd_f32_fpexcept_ignore: ; GFX10PLUS: ; %bb.0: ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_add_f32_e32 v0, v0, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fadd_f32_fpexcept_ignore: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_add_f32_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] +; GCN-SDAG-LABEL: v_constained_fadd_f32_fpexcept_ignore: +; GCN-SDAG: ; %bb.0: +; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-SDAG-NEXT: v_add_f32_e32 v0, v0, v1 +; GCN-SDAG-NEXT: s_setpc_b64 s[30:31] +; GCN-GISEL-LABEL: v_constained_fadd_f32_fpexcept_ignore: +; GCN-GISEL: ; %bb.0: +; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-GISEL-NEXT: v_add_f32_e32 v0, v0, v1 +; GCN-GISEL-NEXT: s_setpc_b64 s[30:31] %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret float %val } define float @v_constained_fadd_f32_fpexcept_maytrap(float %x, float %y) #0 { -; GCN-LABEL: v_constained_fadd_f32_fpexcept_maytrap: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_add_f32_e32 v0, v0, v1 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: v_constained_fadd_f32_fpexcept_maytrap: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_f32_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] ; ; GFX10PLUS-LABEL: v_constained_fadd_f32_fpexcept_maytrap: ; GFX10PLUS: ; %bb.0: ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_add_f32_e32 v0, v0, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fadd_f32_fpexcept_maytrap: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_add_f32_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] +; GCN-SDAG-LABEL: v_constained_fadd_f32_fpexcept_maytrap: +; GCN-SDAG: ; %bb.0: +; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-SDAG-NEXT: v_add_f32_e32 v0, v0, v1 +; GCN-SDAG-NEXT: s_setpc_b64 s[30:31] +; GCN-GISEL-LABEL: v_constained_fadd_f32_fpexcept_maytrap: +; GCN-GISEL: ; %bb.0: +; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-GISEL-NEXT: v_add_f32_e32 v0, v0, v1 +; GCN-GISEL-NEXT: s_setpc_b64 s[30:31] %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") ret float %val } define <2 x float> @v_constained_fadd_v2f32_fpexcept_strict(<2 x float> %x, <2 x float> %y) #0 { -; GCN-LABEL: v_constained_fadd_v2f32_fpexcept_strict: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_add_f32_e32 v0, v0, v2 -; GCN-NEXT: v_add_f32_e32 v1, v1, v3 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: v_constained_fadd_v2f32_fpexcept_strict: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_f32_e32 v0, v0, v2 +; GFX9-NEXT: v_add_f32_e32 v1, v1, v3 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-SDAG-LABEL: v_constained_fadd_v2f32_fpexcept_strict: +; GFX10-SDAG: ; %bb.0: +; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-SDAG-NEXT: v_add_f32_e32 v0, v0, v2 +; GFX10-SDAG-NEXT: v_add_f32_e32 v1, v1, v3 +; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-GISEL-LABEL: v_constained_fadd_v2f32_fpexcept_strict: +; GFX10-GISEL: ; %bb.0: +; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-GISEL-NEXT: v_add_f32_e32 v0, v0, v2 +; GFX10-GISEL-NEXT: v_add_f32_e32 v1, v1, v3 +; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SDAG-LABEL: v_constained_fadd_v2f32_fpexcept_strict: +; GFX11-SDAG: ; %bb.0: +; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SDAG-NEXT: v_dual_add_f32 v0, v0, v2 :: v_dual_add_f32 v1, v1, v3 +; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-GISEL-LABEL: v_constained_fadd_v2f32_fpexcept_strict: +; GFX11-GISEL: ; %bb.0: +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-NEXT: v_dual_add_f32 v0, v0, v2 :: v_dual_add_f32 v1, v1, v3 +; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] ; +; GFX12-LABEL: v_constained_fadd_v2f32_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_dual_add_f32 v0, v0, v2 :: v_dual_add_f32 v1, v1, v3 +; GFX12-NEXT: s_setpc_b64 s[30:31] ; GFX10-LABEL: v_constained_fadd_v2f32_fpexcept_strict: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_add_f32_e32 v0, v0, v2 ; GFX10-NEXT: v_add_f32_e32 v1, v1, v3 ; GFX10-NEXT: s_setpc_b64 s[30:31] -; ; GFX11-LABEL: v_constained_fadd_v2f32_fpexcept_strict: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_add_f32 v0, v0, v2 :: v_dual_add_f32 v1, v1, v3 ; GFX11-NEXT: s_setpc_b64 s[30:31] +; GCN-SDAG-LABEL: v_constained_fadd_v2f32_fpexcept_strict: +; GCN-SDAG: ; %bb.0: +; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-SDAG-NEXT: v_add_f32_e32 v0, v0, v2 +; GCN-SDAG-NEXT: v_add_f32_e32 v1, v1, v3 +; GCN-SDAG-NEXT: s_setpc_b64 s[30:31] +; GCN-GISEL-LABEL: v_constained_fadd_v2f32_fpexcept_strict: +; GCN-GISEL: ; %bb.0: +; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-GISEL-NEXT: v_add_f32_e32 v0, v0, v2 +; GCN-GISEL-NEXT: v_add_f32_e32 v1, v1, v3 +; GCN-GISEL-NEXT: s_setpc_b64 s[30:31] %val = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret <2 x float> %val } define <2 x float> @v_constained_fadd_v2f32_fpexcept_ignore(<2 x float> %x, <2 x float> %y) #0 { -; GCN-LABEL: v_constained_fadd_v2f32_fpexcept_ignore: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_add_f32_e32 v0, v0, v2 -; GCN-NEXT: v_add_f32_e32 v1, v1, v3 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: v_constained_fadd_v2f32_fpexcept_ignore: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_f32_e32 v0, v0, v2 +; GFX9-NEXT: v_add_f32_e32 v1, v1, v3 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-SDAG-LABEL: v_constained_fadd_v2f32_fpexcept_ignore: +; GFX10-SDAG: ; %bb.0: +; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-SDAG-NEXT: v_add_f32_e32 v0, v0, v2 +; GFX10-SDAG-NEXT: v_add_f32_e32 v1, v1, v3 +; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-GISEL-LABEL: v_constained_fadd_v2f32_fpexcept_ignore: +; GFX10-GISEL: ; %bb.0: +; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-GISEL-NEXT: v_add_f32_e32 v0, v0, v2 +; GFX10-GISEL-NEXT: v_add_f32_e32 v1, v1, v3 +; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SDAG-LABEL: v_constained_fadd_v2f32_fpexcept_ignore: +; GFX11-SDAG: ; %bb.0: +; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SDAG-NEXT: v_dual_add_f32 v0, v0, v2 :: v_dual_add_f32 v1, v1, v3 +; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-GISEL-LABEL: v_constained_fadd_v2f32_fpexcept_ignore: +; GFX11-GISEL: ; %bb.0: +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-NEXT: v_dual_add_f32 v0, v0, v2 :: v_dual_add_f32 v1, v1, v3 +; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] ; +; GFX12-LABEL: v_constained_fadd_v2f32_fpexcept_ignore: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_dual_add_f32 v0, v0, v2 :: v_dual_add_f32 v1, v1, v3 +; GFX12-NEXT: s_setpc_b64 s[30:31] ; GFX10-LABEL: v_constained_fadd_v2f32_fpexcept_ignore: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_add_f32_e32 v0, v0, v2 ; GFX10-NEXT: v_add_f32_e32 v1, v1, v3 ; GFX10-NEXT: s_setpc_b64 s[30:31] -; ; GFX11-LABEL: v_constained_fadd_v2f32_fpexcept_ignore: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_add_f32 v0, v0, v2 :: v_dual_add_f32 v1, v1, v3 ; GFX11-NEXT: s_setpc_b64 s[30:31] +; GCN-SDAG-LABEL: v_constained_fadd_v2f32_fpexcept_ignore: +; GCN-SDAG: ; %bb.0: +; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-SDAG-NEXT: v_add_f32_e32 v0, v0, v2 +; GCN-SDAG-NEXT: v_add_f32_e32 v1, v1, v3 +; GCN-SDAG-NEXT: s_setpc_b64 s[30:31] +; GCN-GISEL-LABEL: v_constained_fadd_v2f32_fpexcept_ignore: +; GCN-GISEL: ; %bb.0: +; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-GISEL-NEXT: v_add_f32_e32 v0, v0, v2 +; GCN-GISEL-NEXT: v_add_f32_e32 v1, v1, v3 +; GCN-GISEL-NEXT: s_setpc_b64 s[30:31] %val = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x float> %val } define <2 x float> @v_constained_fadd_v2f32_fpexcept_maytrap(<2 x float> %x, <2 x float> %y) #0 { -; GCN-LABEL: v_constained_fadd_v2f32_fpexcept_maytrap: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_add_f32_e32 v0, v0, v2 -; GCN-NEXT: v_add_f32_e32 v1, v1, v3 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: v_constained_fadd_v2f32_fpexcept_maytrap: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_f32_e32 v0, v0, v2 +; GFX9-NEXT: v_add_f32_e32 v1, v1, v3 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-SDAG-LABEL: v_constained_fadd_v2f32_fpexcept_maytrap: +; GFX10-SDAG: ; %bb.0: +; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-SDAG-NEXT: v_add_f32_e32 v0, v0, v2 +; GFX10-SDAG-NEXT: v_add_f32_e32 v1, v1, v3 +; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-GISEL-LABEL: v_constained_fadd_v2f32_fpexcept_maytrap: +; GFX10-GISEL: ; %bb.0: +; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-GISEL-NEXT: v_add_f32_e32 v0, v0, v2 +; GFX10-GISEL-NEXT: v_add_f32_e32 v1, v1, v3 +; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SDAG-LABEL: v_constained_fadd_v2f32_fpexcept_maytrap: +; GFX11-SDAG: ; %bb.0: +; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SDAG-NEXT: v_dual_add_f32 v0, v0, v2 :: v_dual_add_f32 v1, v1, v3 +; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-GISEL-LABEL: v_constained_fadd_v2f32_fpexcept_maytrap: +; GFX11-GISEL: ; %bb.0: +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-NEXT: v_dual_add_f32 v0, v0, v2 :: v_dual_add_f32 v1, v1, v3 +; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] ; +; GFX12-LABEL: v_constained_fadd_v2f32_fpexcept_maytrap: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_dual_add_f32 v0, v0, v2 :: v_dual_add_f32 v1, v1, v3 +; GFX12-NEXT: s_setpc_b64 s[30:31] ; GFX10-LABEL: v_constained_fadd_v2f32_fpexcept_maytrap: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_add_f32_e32 v0, v0, v2 ; GFX10-NEXT: v_add_f32_e32 v1, v1, v3 ; GFX10-NEXT: s_setpc_b64 s[30:31] -; ; GFX11-LABEL: v_constained_fadd_v2f32_fpexcept_maytrap: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_add_f32 v0, v0, v2 :: v_dual_add_f32 v1, v1, v3 ; GFX11-NEXT: s_setpc_b64 s[30:31] +; GCN-SDAG-LABEL: v_constained_fadd_v2f32_fpexcept_maytrap: +; GCN-SDAG: ; %bb.0: +; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-SDAG-NEXT: v_add_f32_e32 v0, v0, v2 +; GCN-SDAG-NEXT: v_add_f32_e32 v1, v1, v3 +; GCN-SDAG-NEXT: s_setpc_b64 s[30:31] +; GCN-GISEL-LABEL: v_constained_fadd_v2f32_fpexcept_maytrap: +; GCN-GISEL: ; %bb.0: +; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-GISEL-NEXT: v_add_f32_e32 v0, v0, v2 +; GCN-GISEL-NEXT: v_add_f32_e32 v1, v1, v3 +; GCN-GISEL-NEXT: s_setpc_b64 s[30:31] %val = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") ret <2 x float> %val } define <3 x float> @v_constained_fadd_v3f32_fpexcept_strict(<3 x float> %x, <3 x float> %y) #0 { -; GCN-LABEL: v_constained_fadd_v3f32_fpexcept_strict: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_add_f32_e32 v0, v0, v3 -; GCN-NEXT: v_add_f32_e32 v1, v1, v4 -; GCN-NEXT: v_add_f32_e32 v2, v2, v5 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: v_constained_fadd_v3f32_fpexcept_strict: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_f32_e32 v0, v0, v3 +; GFX9-NEXT: v_add_f32_e32 v1, v1, v4 +; GFX9-NEXT: v_add_f32_e32 v2, v2, v5 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-SDAG-LABEL: v_constained_fadd_v3f32_fpexcept_strict: +; GFX10-SDAG: ; %bb.0: +; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-SDAG-NEXT: v_add_f32_e32 v0, v0, v3 +; GFX10-SDAG-NEXT: v_add_f32_e32 v1, v1, v4 +; GFX10-SDAG-NEXT: v_add_f32_e32 v2, v2, v5 +; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-GISEL-LABEL: v_constained_fadd_v3f32_fpexcept_strict: +; GFX10-GISEL: ; %bb.0: +; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-GISEL-NEXT: v_add_f32_e32 v0, v0, v3 +; GFX10-GISEL-NEXT: v_add_f32_e32 v1, v1, v4 +; GFX10-GISEL-NEXT: v_add_f32_e32 v2, v2, v5 +; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SDAG-LABEL: v_constained_fadd_v3f32_fpexcept_strict: +; GFX11-SDAG: ; %bb.0: +; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SDAG-NEXT: v_dual_add_f32 v0, v0, v3 :: v_dual_add_f32 v1, v1, v4 +; GFX11-SDAG-NEXT: v_add_f32_e32 v2, v2, v5 +; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-GISEL-LABEL: v_constained_fadd_v3f32_fpexcept_strict: +; GFX11-GISEL: ; %bb.0: +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-NEXT: v_dual_add_f32 v0, v0, v3 :: v_dual_add_f32 v1, v1, v4 +; GFX11-GISEL-NEXT: v_add_f32_e32 v2, v2, v5 +; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] ; +; GFX12-LABEL: v_constained_fadd_v3f32_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_dual_add_f32 v0, v0, v3 :: v_dual_add_f32 v1, v1, v4 +; GFX12-NEXT: v_add_f32_e32 v2, v2, v5 +; GFX12-NEXT: s_setpc_b64 s[30:31] ; GFX10-LABEL: v_constained_fadd_v3f32_fpexcept_strict: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -139,78 +385,204 @@ define <3 x float> @v_constained_fadd_v3f32_fpexcept_strict(<3 x float> %x, <3 x ; GFX10-NEXT: v_add_f32_e32 v1, v1, v4 ; GFX10-NEXT: v_add_f32_e32 v2, v2, v5 ; GFX10-NEXT: s_setpc_b64 s[30:31] -; ; GFX11-LABEL: v_constained_fadd_v3f32_fpexcept_strict: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_add_f32 v0, v0, v3 :: v_dual_add_f32 v1, v1, v4 ; GFX11-NEXT: v_add_f32_e32 v2, v2, v5 ; GFX11-NEXT: s_setpc_b64 s[30:31] +; GCN-SDAG-LABEL: v_constained_fadd_v3f32_fpexcept_strict: +; GCN-SDAG: ; %bb.0: +; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-SDAG-NEXT: v_add_f32_e32 v0, v0, v3 +; GCN-SDAG-NEXT: v_add_f32_e32 v1, v1, v4 +; GCN-SDAG-NEXT: v_add_f32_e32 v2, v2, v5 +; GCN-SDAG-NEXT: s_setpc_b64 s[30:31] +; GCN-GISEL-LABEL: v_constained_fadd_v3f32_fpexcept_strict: +; GCN-GISEL: ; %bb.0: +; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-GISEL-NEXT: v_add_f32_e32 v0, v0, v3 +; GCN-GISEL-NEXT: v_add_f32_e32 v1, v1, v4 +; GCN-GISEL-NEXT: v_add_f32_e32 v2, v2, v5 +; GCN-GISEL-NEXT: s_setpc_b64 s[30:31] %val = call <3 x float> @llvm.experimental.constrained.fadd.v3f32(<3 x float> %x, <3 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret <3 x float> %val } define amdgpu_ps float @s_constained_fadd_f32_fpexcept_strict(float inreg %x, float inreg %y) #0 { -; GCN-LABEL: s_constained_fadd_f32_fpexcept_strict: -; GCN: ; %bb.0: -; GCN-NEXT: v_mov_b32_e32 v0, s3 -; GCN-NEXT: v_add_f32_e32 v0, s2, v0 -; GCN-NEXT: ; return to shader part epilog +; GFX9-LABEL: s_constained_fadd_f32_fpexcept_strict: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_add_f32_e32 v0, s2, v0 +; GFX9-NEXT: ; return to shader part epilog ; ; GFX10PLUS-LABEL: s_constained_fadd_f32_fpexcept_strict: ; GFX10PLUS: ; %bb.0: ; GFX10PLUS-NEXT: v_add_f32_e64 v0, s2, s3 ; GFX10PLUS-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: s_constained_fadd_f32_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_add_f32 s0, s2, s3 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_3) +; GFX12-NEXT: v_mov_b32_e32 v0, s0 +; GFX12-NEXT: ; return to shader part epilog +; GCN-SDAG-LABEL: s_constained_fadd_f32_fpexcept_strict: +; GCN-SDAG: ; %bb.0: +; GCN-SDAG-NEXT: v_mov_b32_e32 v0, s3 +; GCN-SDAG-NEXT: v_add_f32_e32 v0, s2, v0 +; GCN-SDAG-NEXT: ; return to shader part epilog +; GCN-GISEL-LABEL: s_constained_fadd_f32_fpexcept_strict: +; GCN-GISEL: ; %bb.0: +; GCN-GISEL-NEXT: v_mov_b32_e32 v0, s3 +; GCN-GISEL-NEXT: v_add_f32_e32 v0, s2, v0 +; GCN-GISEL-NEXT: ; return to shader part epilog %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret float %val } define float @v_constained_fadd_f32_fpexcept_strict_fabs_lhs(float %x, float %y) #0 { -; GCN-LABEL: v_constained_fadd_f32_fpexcept_strict_fabs_lhs: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_add_f32_e64 v0, |v0|, v1 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: v_constained_fadd_f32_fpexcept_strict_fabs_lhs: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_f32_e64 v0, |v0|, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] ; ; GFX10PLUS-LABEL: v_constained_fadd_f32_fpexcept_strict_fabs_lhs: ; GFX10PLUS: ; %bb.0: ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_add_f32_e64 v0, |v0|, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fadd_f32_fpexcept_strict_fabs_lhs: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_add_f32_e64 v0, |v0|, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] +; GCN-SDAG-LABEL: v_constained_fadd_f32_fpexcept_strict_fabs_lhs: +; GCN-SDAG: ; %bb.0: +; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-SDAG-NEXT: v_add_f32_e64 v0, |v0|, v1 +; GCN-SDAG-NEXT: s_setpc_b64 s[30:31] +; GCN-GISEL-LABEL: v_constained_fadd_f32_fpexcept_strict_fabs_lhs: +; GCN-GISEL: ; %bb.0: +; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-GISEL-NEXT: v_add_f32_e64 v0, |v0|, v1 +; GCN-GISEL-NEXT: s_setpc_b64 s[30:31] %fabs.x = call float @llvm.fabs.f32(float %x) #0 %val = call float @llvm.experimental.constrained.fadd.f32(float %fabs.x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret float %val } define float @v_constained_fadd_f32_fpexcept_strict_fabs_rhs(float %x, float %y) #0 { -; GCN-LABEL: v_constained_fadd_f32_fpexcept_strict_fabs_rhs: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_add_f32_e64 v0, v0, |v1| -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: v_constained_fadd_f32_fpexcept_strict_fabs_rhs: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_f32_e64 v0, v0, |v1| +; GFX9-NEXT: s_setpc_b64 s[30:31] ; ; GFX10PLUS-LABEL: v_constained_fadd_f32_fpexcept_strict_fabs_rhs: ; GFX10PLUS: ; %bb.0: ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_add_f32_e64 v0, v0, |v1| ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fadd_f32_fpexcept_strict_fabs_rhs: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_add_f32_e64 v0, v0, |v1| +; GFX12-NEXT: s_setpc_b64 s[30:31] +; GCN-SDAG-LABEL: v_constained_fadd_f32_fpexcept_strict_fabs_rhs: +; GCN-SDAG: ; %bb.0: +; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-SDAG-NEXT: v_add_f32_e64 v0, v0, |v1| +; GCN-SDAG-NEXT: s_setpc_b64 s[30:31] +; GCN-GISEL-LABEL: v_constained_fadd_f32_fpexcept_strict_fabs_rhs: +; GCN-GISEL: ; %bb.0: +; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-GISEL-NEXT: v_add_f32_e64 v0, v0, |v1| +; GCN-GISEL-NEXT: s_setpc_b64 s[30:31] %fabs.y = call float @llvm.fabs.f32(float %y) #0 %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %fabs.y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret float %val } define float @v_constained_fadd_f32_fpexcept_strict_fneg_fabs_lhs(float %x, float %y) #0 { -; GCN-LABEL: v_constained_fadd_f32_fpexcept_strict_fneg_fabs_lhs: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_sub_f32_e64 v0, v1, |v0| -; GCN-NEXT: s_setpc_b64 s[30:31] +; GCN-SDAG-LABEL: v_constained_fadd_f32_fpexcept_strict_fneg_fabs_lhs: +; GCN-SDAG: ; %bb.0: +; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-SDAG-NEXT: v_sub_f32_e64 v0, v1, |v0| +; GCN-SDAG-NEXT: s_setpc_b64 s[30:31] ; -; GFX10PLUS-LABEL: v_constained_fadd_f32_fpexcept_strict_fneg_fabs_lhs: -; GFX10PLUS: ; %bb.0: -; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10PLUS-NEXT: v_sub_f32_e64 v0, v1, |v0| -; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; GCN-GISEL-LABEL: v_constained_fadd_f32_fpexcept_strict_fneg_fabs_lhs: +; GCN-GISEL: ; %bb.0: +; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-GISEL-NEXT: v_add_f32_e64 v0, -|v0|, v1 +; GCN-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-SDAG-LABEL: v_constained_fadd_f32_fpexcept_strict_fneg_fabs_lhs: +; GFX9-SDAG: ; %bb.0: +; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-SDAG-NEXT: v_sub_f32_e64 v0, v1, |v0| +; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-GISEL-LABEL: v_constained_fadd_f32_fpexcept_strict_fneg_fabs_lhs: +; GFX9-GISEL: ; %bb.0: +; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-GISEL-NEXT: v_add_f32_e64 v0, -|v0|, v1 +; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-SDAG-LABEL: v_constained_fadd_f32_fpexcept_strict_fneg_fabs_lhs: +; GFX10-SDAG: ; %bb.0: +; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-SDAG-NEXT: v_sub_f32_e64 v0, v1, |v0| +; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-GISEL-LABEL: v_constained_fadd_f32_fpexcept_strict_fneg_fabs_lhs: +; GFX10-GISEL: ; %bb.0: +; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-GISEL-NEXT: v_add_f32_e64 v0, -|v0|, v1 +; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SDAG-LABEL: v_constained_fadd_f32_fpexcept_strict_fneg_fabs_lhs: +; GFX11-SDAG: ; %bb.0: +; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SDAG-NEXT: v_sub_f32_e64 v0, v1, |v0| +; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-GISEL-LABEL: v_constained_fadd_f32_fpexcept_strict_fneg_fabs_lhs: +; GFX11-GISEL: ; %bb.0: +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-NEXT: v_add_f32_e64 v0, -|v0|, v1 +; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-SDAG-LABEL: v_constained_fadd_f32_fpexcept_strict_fneg_fabs_lhs: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-SDAG-NEXT: s_wait_expcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0 +; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-NEXT: v_sub_f32_e64 v0, v1, |v0| +; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-GISEL-LABEL: v_constained_fadd_f32_fpexcept_strict_fneg_fabs_lhs: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-GISEL-NEXT: s_wait_expcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0 +; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-NEXT: v_add_f32_e64 v0, -|v0|, v1 +; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] %fabs.x = call float @llvm.fabs.f32(float %x) #0 %neg.fabs.x = fneg float %fabs.x %val = call float @llvm.experimental.constrained.fadd.f32(float %neg.fabs.x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") diff --git a/llvm/test/CodeGen/AMDGPU/strict_fadd.f64.ll b/llvm/test/CodeGen/AMDGPU/strict_fadd.f64.ll index faa0131c88c2d..5469fc8330971 100644 --- a/llvm/test/CodeGen/AMDGPU/strict_fadd.f64.ll +++ b/llvm/test/CodeGen/AMDGPU/strict_fadd.f64.ll @@ -1,7 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN %s -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX10 %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GCN-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GCN-GISEL %s + +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GCN,GFX10PLUS,GFX10 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GCN,GFX10PLUS,GFX10 %s + +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GCN,GFX10PLUS,GFX11 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GCN,GFX10PLUS,GFX11 %s define double @v_constained_fadd_f64_fpexcept_strict(double %x, double %y) #0 { ; GCN-LABEL: v_constained_fadd_f64_fpexcept_strict: @@ -9,12 +14,6 @@ define double @v_constained_fadd_f64_fpexcept_strict(double %x, double %y) #0 { ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3] ; GCN-NEXT: s_setpc_b64 s[30:31] -; -; GFX10-LABEL: v_constained_fadd_f64_fpexcept_strict: -; GFX10: ; %bb.0: -; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3] -; GFX10-NEXT: s_setpc_b64 s[30:31] %val = call double @llvm.experimental.constrained.fadd.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret double %val } @@ -25,12 +24,6 @@ define double @v_constained_fadd_f64_fpexcept_ignore(double %x, double %y) #0 { ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3] ; GCN-NEXT: s_setpc_b64 s[30:31] -; -; GFX10-LABEL: v_constained_fadd_f64_fpexcept_ignore: -; GFX10: ; %bb.0: -; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3] -; GFX10-NEXT: s_setpc_b64 s[30:31] %val = call double @llvm.experimental.constrained.fadd.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret double %val } @@ -41,12 +34,6 @@ define double @v_constained_fadd_f64_fpexcept_maytrap(double %x, double %y) #0 { ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3] ; GCN-NEXT: s_setpc_b64 s[30:31] -; -; GFX10-LABEL: v_constained_fadd_f64_fpexcept_maytrap: -; GFX10: ; %bb.0: -; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3] -; GFX10-NEXT: s_setpc_b64 s[30:31] %val = call double @llvm.experimental.constrained.fadd.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") ret double %val } @@ -58,13 +45,6 @@ define <2 x double> @v_constained_fadd_v2f64_fpexcept_strict(<2 x double> %x, <2 ; GCN-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5] ; GCN-NEXT: v_add_f64 v[2:3], v[2:3], v[6:7] ; GCN-NEXT: s_setpc_b64 s[30:31] -; -; GFX10-LABEL: v_constained_fadd_v2f64_fpexcept_strict: -; GFX10: ; %bb.0: -; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5] -; GFX10-NEXT: v_add_f64 v[2:3], v[2:3], v[6:7] -; GFX10-NEXT: s_setpc_b64 s[30:31] %val = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret <2 x double> %val } @@ -76,13 +56,6 @@ define <2 x double> @v_constained_fadd_v2f64_fpexcept_ignore(<2 x double> %x, <2 ; GCN-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5] ; GCN-NEXT: v_add_f64 v[2:3], v[2:3], v[6:7] ; GCN-NEXT: s_setpc_b64 s[30:31] -; -; GFX10-LABEL: v_constained_fadd_v2f64_fpexcept_ignore: -; GFX10: ; %bb.0: -; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5] -; GFX10-NEXT: v_add_f64 v[2:3], v[2:3], v[6:7] -; GFX10-NEXT: s_setpc_b64 s[30:31] %val = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x double> %val } @@ -94,13 +67,6 @@ define <2 x double> @v_constained_fadd_v2f64_fpexcept_maytrap(<2 x double> %x, < ; GCN-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5] ; GCN-NEXT: v_add_f64 v[2:3], v[2:3], v[6:7] ; GCN-NEXT: s_setpc_b64 s[30:31] -; -; GFX10-LABEL: v_constained_fadd_v2f64_fpexcept_maytrap: -; GFX10: ; %bb.0: -; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5] -; GFX10-NEXT: v_add_f64 v[2:3], v[2:3], v[6:7] -; GFX10-NEXT: s_setpc_b64 s[30:31] %val = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") ret <2 x double> %val } @@ -113,30 +79,29 @@ define <3 x double> @v_constained_fadd_v3f64_fpexcept_strict(<3 x double> %x, <3 ; GCN-NEXT: v_add_f64 v[2:3], v[2:3], v[8:9] ; GCN-NEXT: v_add_f64 v[4:5], v[4:5], v[10:11] ; GCN-NEXT: s_setpc_b64 s[30:31] -; -; GFX10-LABEL: v_constained_fadd_v3f64_fpexcept_strict: -; GFX10: ; %bb.0: -; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7] -; GFX10-NEXT: v_add_f64 v[2:3], v[2:3], v[8:9] -; GFX10-NEXT: v_add_f64 v[4:5], v[4:5], v[10:11] -; GFX10-NEXT: s_setpc_b64 s[30:31] %val = call <3 x double> @llvm.experimental.constrained.fadd.v3f64(<3 x double> %x, <3 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret <3 x double> %val } define amdgpu_ps <2 x float> @s_constained_fadd_f64_fpexcept_strict(double inreg %x, double inreg %y) #0 { -; GCN-LABEL: s_constained_fadd_f64_fpexcept_strict: -; GCN: ; %bb.0: -; GCN-NEXT: v_mov_b32_e32 v0, s4 -; GCN-NEXT: v_mov_b32_e32 v1, s5 -; GCN-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1] -; GCN-NEXT: ; return to shader part epilog +; GCN-SDAG-LABEL: s_constained_fadd_f64_fpexcept_strict: +; GCN-SDAG: ; %bb.0: +; GCN-SDAG-NEXT: v_mov_b32_e32 v0, s4 +; GCN-SDAG-NEXT: v_mov_b32_e32 v1, s5 +; GCN-SDAG-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1] +; GCN-SDAG-NEXT: ; return to shader part epilog +; +; GCN-GISEL-LABEL: s_constained_fadd_f64_fpexcept_strict: +; GCN-GISEL: ; %bb.0: +; GCN-GISEL-NEXT: v_mov_b32_e32 v0, s4 +; GCN-GISEL-NEXT: v_mov_b32_e32 v1, s5 +; GCN-GISEL-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1] +; GCN-GISEL-NEXT: ; return to shader part epilog ; -; GFX10-LABEL: s_constained_fadd_f64_fpexcept_strict: -; GFX10: ; %bb.0: -; GFX10-NEXT: v_add_f64 v[0:1], s[2:3], s[4:5] -; GFX10-NEXT: ; return to shader part epilog +; GFX10PLUS-LABEL: s_constained_fadd_f64_fpexcept_strict: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: v_add_f64 v[0:1], s[2:3], s[4:5] +; GFX10PLUS-NEXT: ; return to shader part epilog %val = call double @llvm.experimental.constrained.fadd.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") %cast = bitcast double %val to <2 x float> ret <2 x float> %cast @@ -148,3 +113,6 @@ declare <3 x double> @llvm.experimental.constrained.fadd.v3f64(<3 x double>, <3 attributes #0 = { strictfp } attributes #1 = { inaccessiblememonly nounwind willreturn } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; GFX10: {{.*}} +; GFX11: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/strict_fmul.f16.ll b/llvm/test/CodeGen/AMDGPU/strict_fmul.f16.ll index eed5f016aa787..79154d0db16ec 100644 --- a/llvm/test/CodeGen/AMDGPU/strict_fmul.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/strict_fmul.f16.ll @@ -10,9 +10,11 @@ ; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX11-SDAG,GFX11-SDAG-TRUE16 %s ; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX11-SDAG,GFX11-SDAG-FAKE16 %s -; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX1-GISEL,GFX1-GISEL-TRUE16 %s -; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX1-GISEL,GFX1-GISEL-FAKE16 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX11-GISEL,GFX11-GISEL-TRUE16 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX11-GISEL,GFX11-GISEL-FAKE16 %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12,GFX12-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12,GFX12-GISEL %s ; FIXME: promotion not handled without f16 insts @@ -41,12 +43,32 @@ define half @v_constained_fmul_f16_fpexcept_strict(half %x, half %y) #0 { ; GFX11-SDAG-FAKE16-NEXT: v_mul_f16_e32 v0, v0, v1 ; GFX11-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31] ; +; GFX11-GISEL-TRUE16-LABEL: v_constained_fmul_f16_fpexcept_strict: +; GFX11-GISEL-TRUE16: ; %bb.0: +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_mul_f16_e32 v0.l, v0.l, v1.l +; GFX11-GISEL-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-GISEL-FAKE16-LABEL: v_constained_fmul_f16_fpexcept_strict: +; GFX11-GISEL-FAKE16: ; %bb.0: +; GFX11-GISEL-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-FAKE16-NEXT: v_mul_f16_e32 v0, v0, v1 +; GFX11-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fmul_f16_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] ; GFX1-GISEL-TRUE16-LABEL: v_constained_fmul_f16_fpexcept_strict: ; GFX1-GISEL-TRUE16: ; %bb.0: ; GFX1-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX1-GISEL-TRUE16-NEXT: v_mul_f16_e32 v0.l, v0.l, v1.l ; GFX1-GISEL-TRUE16-NEXT: s_setpc_b64 s[30:31] -; ; GFX1-GISEL-FAKE16-LABEL: v_constained_fmul_f16_fpexcept_strict: ; GFX1-GISEL-FAKE16: ; %bb.0: ; GFX1-GISEL-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -81,12 +103,32 @@ define half @v_constained_fmul_f16_fpexcept_ignore(half %x, half %y) #0 { ; GFX11-SDAG-FAKE16-NEXT: v_mul_f16_e32 v0, v0, v1 ; GFX11-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31] ; +; GFX11-GISEL-TRUE16-LABEL: v_constained_fmul_f16_fpexcept_ignore: +; GFX11-GISEL-TRUE16: ; %bb.0: +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_mul_f16_e32 v0.l, v0.l, v1.l +; GFX11-GISEL-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-GISEL-FAKE16-LABEL: v_constained_fmul_f16_fpexcept_ignore: +; GFX11-GISEL-FAKE16: ; %bb.0: +; GFX11-GISEL-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-FAKE16-NEXT: v_mul_f16_e32 v0, v0, v1 +; GFX11-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fmul_f16_fpexcept_ignore: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] ; GFX1-GISEL-TRUE16-LABEL: v_constained_fmul_f16_fpexcept_ignore: ; GFX1-GISEL-TRUE16: ; %bb.0: ; GFX1-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX1-GISEL-TRUE16-NEXT: v_mul_f16_e32 v0.l, v0.l, v1.l ; GFX1-GISEL-TRUE16-NEXT: s_setpc_b64 s[30:31] -; ; GFX1-GISEL-FAKE16-LABEL: v_constained_fmul_f16_fpexcept_ignore: ; GFX1-GISEL-FAKE16: ; %bb.0: ; GFX1-GISEL-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -121,12 +163,32 @@ define half @v_constained_fmul_f16_fpexcept_maytrap(half %x, half %y) #0 { ; GFX11-SDAG-FAKE16-NEXT: v_mul_f16_e32 v0, v0, v1 ; GFX11-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31] ; +; GFX11-GISEL-TRUE16-LABEL: v_constained_fmul_f16_fpexcept_maytrap: +; GFX11-GISEL-TRUE16: ; %bb.0: +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_mul_f16_e32 v0.l, v0.l, v1.l +; GFX11-GISEL-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-GISEL-FAKE16-LABEL: v_constained_fmul_f16_fpexcept_maytrap: +; GFX11-GISEL-FAKE16: ; %bb.0: +; GFX11-GISEL-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-FAKE16-NEXT: v_mul_f16_e32 v0, v0, v1 +; GFX11-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fmul_f16_fpexcept_maytrap: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] ; GFX1-GISEL-TRUE16-LABEL: v_constained_fmul_f16_fpexcept_maytrap: ; GFX1-GISEL-TRUE16: ; %bb.0: ; GFX1-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX1-GISEL-TRUE16-NEXT: v_mul_f16_e32 v0.l, v0.l, v1.l ; GFX1-GISEL-TRUE16-NEXT: s_setpc_b64 s[30:31] -; ; GFX1-GISEL-FAKE16-LABEL: v_constained_fmul_f16_fpexcept_maytrap: ; GFX1-GISEL-FAKE16: ; %bb.0: ; GFX1-GISEL-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -164,6 +226,16 @@ define <2 x half> @v_constained_fmul_v2f16_fpexcept_strict(<2 x half> %x, <2 x h ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_pk_mul_f16 v0, v0, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fmul_v2f16_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_pk_mul_f16 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call <2 x half> @llvm.experimental.constrained.fmul.v2f16(<2 x half> %x, <2 x half> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret <2 x half> %val } @@ -196,6 +268,16 @@ define <2 x half> @v_constained_fmul_v2f16_fpexcept_ignore(<2 x half> %x, <2 x h ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_pk_mul_f16 v0, v0, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fmul_v2f16_fpexcept_ignore: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_pk_mul_f16 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call <2 x half> @llvm.experimental.constrained.fmul.v2f16(<2 x half> %x, <2 x half> %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x half> %val } @@ -228,6 +310,16 @@ define <2 x half> @v_constained_fmul_v2f16_fpexcept_maytrap(<2 x half> %x, <2 x ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_pk_mul_f16 v0, v0, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fmul_v2f16_fpexcept_maytrap: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_pk_mul_f16 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call <2 x half> @llvm.experimental.constrained.fmul.v2f16(<2 x half> %x, <2 x half> %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") ret <2 x half> %val } @@ -293,6 +385,34 @@ define <3 x half> @v_constained_fmul_v3f16_fpexcept_strict(<3 x half> %x, <3 x h ; GFX11-SDAG-FAKE16-NEXT: v_mul_f16_e32 v1, v1, v3 ; GFX11-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31] ; +; GFX11-GISEL-LABEL: v_constained_fmul_v3f16_fpexcept_strict: +; GFX11-GISEL: ; %bb.0: +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-NEXT: v_pk_mul_f16 v0, v0, v2 +; GFX11-GISEL-NEXT: v_pk_mul_f16 v1, v1, v3 +; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-SDAG-LABEL: v_constained_fmul_v3f16_fpexcept_strict: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-SDAG-NEXT: s_wait_expcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0 +; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-NEXT: v_pk_mul_f16 v0, v0, v2 +; GFX12-SDAG-NEXT: v_mul_f16_e32 v1, v1, v3 +; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-GISEL-LABEL: v_constained_fmul_v3f16_fpexcept_strict: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-GISEL-NEXT: s_wait_expcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0 +; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-NEXT: v_pk_mul_f16 v0, v0, v2 +; GFX12-GISEL-NEXT: v_pk_mul_f16 v1, v1, v3 +; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] ; GFX1-GISEL-LABEL: v_constained_fmul_v3f16_fpexcept_strict: ; GFX1-GISEL: ; %bb.0: ; GFX1-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -388,6 +508,44 @@ define <4 x half> @v_constained_fmul_v4f16_fpexcept_strict(<4 x half> %x, <4 x h ; GFX11-SDAG-FAKE16-NEXT: v_perm_b32 v1, v3, v1, 0x5040100 ; GFX11-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31] ; +; GFX11-GISEL-LABEL: v_constained_fmul_v4f16_fpexcept_strict: +; GFX11-GISEL: ; %bb.0: +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-GISEL-NEXT: v_pk_mul_f16 v0, v0, v2 +; GFX11-GISEL-NEXT: v_pk_mul_f16 v1, v1, v3 +; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-SDAG-LABEL: v_constained_fmul_v4f16_fpexcept_strict: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-SDAG-NEXT: s_wait_expcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0 +; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v4, 16, v3 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v5, 16, v2 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX12-SDAG-NEXT: v_mul_f16_e32 v1, v1, v3 +; GFX12-SDAG-NEXT: v_mul_f16_e32 v0, v0, v2 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX12-SDAG-NEXT: v_mul_f16_e32 v2, v6, v5 +; GFX12-SDAG-NEXT: v_mul_f16_e32 v3, v7, v4 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-SDAG-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX12-SDAG-NEXT: v_perm_b32 v1, v3, v1, 0x5040100 +; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-GISEL-LABEL: v_constained_fmul_v4f16_fpexcept_strict: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-GISEL-NEXT: s_wait_expcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0 +; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-NEXT: v_pk_mul_f16 v0, v0, v2 +; GFX12-GISEL-NEXT: v_pk_mul_f16 v1, v1, v3 +; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] ; GFX1-GISEL-LABEL: v_constained_fmul_v4f16_fpexcept_strict: ; GFX1-GISEL: ; %bb.0: ; GFX1-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -420,11 +578,26 @@ define amdgpu_ps half @s_constained_fmul_f16_fpexcept_strict(half inreg %x, half ; GFX11-SDAG-FAKE16-NEXT: v_mul_f16_e64 v0, s2, s3 ; GFX11-SDAG-FAKE16-NEXT: ; return to shader part epilog ; +; GFX11-GISEL-TRUE16-LABEL: s_constained_fmul_f16_fpexcept_strict: +; GFX11-GISEL-TRUE16: ; %bb.0: +; GFX11-GISEL-TRUE16-NEXT: v_mul_f16_e64 v0.l, s2, s3 +; GFX11-GISEL-TRUE16-NEXT: ; return to shader part epilog +; +; GFX11-GISEL-FAKE16-LABEL: s_constained_fmul_f16_fpexcept_strict: +; GFX11-GISEL-FAKE16: ; %bb.0: +; GFX11-GISEL-FAKE16-NEXT: v_mul_f16_e64 v0, s2, s3 +; GFX11-GISEL-FAKE16-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: s_constained_fmul_f16_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_mul_f16 s0, s2, s3 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_3) +; GFX12-NEXT: v_mov_b32_e32 v0, s0 +; GFX12-NEXT: ; return to shader part epilog ; GFX1-GISEL-TRUE16-LABEL: s_constained_fmul_f16_fpexcept_strict: ; GFX1-GISEL-TRUE16: ; %bb.0: ; GFX1-GISEL-TRUE16-NEXT: v_mul_f16_e64 v0.l, s2, s3 ; GFX1-GISEL-TRUE16-NEXT: ; return to shader part epilog -; ; GFX1-GISEL-FAKE16-LABEL: s_constained_fmul_f16_fpexcept_strict: ; GFX1-GISEL-FAKE16: ; %bb.0: ; GFX1-GISEL-FAKE16-NEXT: v_mul_f16_e64 v0, s2, s3 @@ -468,6 +641,11 @@ define amdgpu_ps <2 x half> @s_constained_fmul_v2f16_fpexcept_strict(<2 x half> ; GFX10PLUS: ; %bb.0: ; GFX10PLUS-NEXT: v_pk_mul_f16 v0, s2, s3 ; GFX10PLUS-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: s_constained_fmul_v2f16_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_pk_mul_f16 v0, s2, s3 +; GFX12-NEXT: ; return to shader part epilog %val = call <2 x half> @llvm.experimental.constrained.fmul.v2f16(<2 x half> %x, <2 x half> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret <2 x half> %val } diff --git a/llvm/test/CodeGen/AMDGPU/strict_fmul.f32.ll b/llvm/test/CodeGen/AMDGPU/strict_fmul.f32.ll index 8df2834928395..4c1df046a6684 100644 --- a/llvm/test/CodeGen/AMDGPU/strict_fmul.f32.ll +++ b/llvm/test/CodeGen/AMDGPU/strict_fmul.f32.ll @@ -1,11 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN %s ; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN %s + ; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10 %s ; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10 %s + ; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11 %s ; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11 %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12,GFX12-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12,GFX12-GISEL %s + define float @v_constained_fmul_f32_fpexcept_strict(float %x, float %y) #0 { ; GCN-LABEL: v_constained_fmul_f32_fpexcept_strict: ; GCN: ; %bb.0: @@ -18,6 +23,16 @@ define float @v_constained_fmul_f32_fpexcept_strict(float %x, float %y) #0 { ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_mul_f32_e32 v0, v0, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fmul_f32_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mul_f32_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call float @llvm.experimental.constrained.fmul.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret float %val } @@ -34,6 +49,16 @@ define float @v_constained_fmul_f32_fpexcept_ignore(float %x, float %y) #0 { ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_mul_f32_e32 v0, v0, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fmul_f32_fpexcept_ignore: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mul_f32_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call float @llvm.experimental.constrained.fmul.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret float %val } @@ -50,6 +75,16 @@ define float @v_constained_fmul_f32_fpexcept_maytrap(float %x, float %y) #0 { ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_mul_f32_e32 v0, v0, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fmul_f32_fpexcept_maytrap: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mul_f32_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call float @llvm.experimental.constrained.fmul.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") ret float %val } @@ -74,6 +109,16 @@ define <2 x float> @v_constained_fmul_v2f32_fpexcept_strict(<2 x float> %x, <2 x ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mul_f32 v0, v0, v2 :: v_dual_mul_f32 v1, v1, v3 ; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fmul_v2f32_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_dual_mul_f32 v0, v0, v2 :: v_dual_mul_f32 v1, v1, v3 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret <2 x float> %val } @@ -98,6 +143,16 @@ define <2 x float> @v_constained_fmul_v2f32_fpexcept_ignore(<2 x float> %x, <2 x ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mul_f32 v0, v0, v2 :: v_dual_mul_f32 v1, v1, v3 ; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fmul_v2f32_fpexcept_ignore: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_dual_mul_f32 v0, v0, v2 :: v_dual_mul_f32 v1, v1, v3 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x float> %val } @@ -122,6 +177,16 @@ define <2 x float> @v_constained_fmul_v2f32_fpexcept_maytrap(<2 x float> %x, <2 ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mul_f32 v0, v0, v2 :: v_dual_mul_f32 v1, v1, v3 ; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fmul_v2f32_fpexcept_maytrap: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_dual_mul_f32 v0, v0, v2 :: v_dual_mul_f32 v1, v1, v3 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") ret <2 x float> %val } @@ -149,6 +214,17 @@ define <3 x float> @v_constained_fmul_v3f32_fpexcept_strict(<3 x float> %x, <3 x ; GFX11-NEXT: v_dual_mul_f32 v0, v0, v3 :: v_dual_mul_f32 v1, v1, v4 ; GFX11-NEXT: v_mul_f32_e32 v2, v2, v5 ; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fmul_v3f32_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_dual_mul_f32 v0, v0, v3 :: v_dual_mul_f32 v1, v1, v4 +; GFX12-NEXT: v_mul_f32_e32 v2, v2, v5 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call <3 x float> @llvm.experimental.constrained.fmul.v3f32(<3 x float> %x, <3 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret <3 x float> %val } @@ -164,6 +240,13 @@ define amdgpu_ps float @s_constained_fmul_f32_fpexcept_strict(float inreg %x, fl ; GFX10PLUS: ; %bb.0: ; GFX10PLUS-NEXT: v_mul_f32_e64 v0, s2, s3 ; GFX10PLUS-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: s_constained_fmul_f32_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_mul_f32 s0, s2, s3 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_3) +; GFX12-NEXT: v_mov_b32_e32 v0, s0 +; GFX12-NEXT: ; return to shader part epilog %val = call float @llvm.experimental.constrained.fmul.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret float %val } @@ -180,6 +263,16 @@ define float @v_constained_fmul_f32_fpexcept_strict_fabs_lhs(float %x, float %y) ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_mul_f32_e64 v0, |v0|, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fmul_f32_fpexcept_strict_fabs_lhs: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mul_f32_e64 v0, |v0|, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %fabs.x = call float @llvm.fabs.f32(float %x) #0 %val = call float @llvm.experimental.constrained.fmul.f32(float %fabs.x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret float %val @@ -197,6 +290,16 @@ define float @v_constained_fmul_f32_fpexcept_strict_fabs_rhs(float %x, float %y) ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_mul_f32_e64 v0, v0, |v1| ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fmul_f32_fpexcept_strict_fabs_rhs: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mul_f32_e64 v0, v0, |v1| +; GFX12-NEXT: s_setpc_b64 s[30:31] %fabs.y = call float @llvm.fabs.f32(float %y) #0 %val = call float @llvm.experimental.constrained.fmul.f32(float %x, float %fabs.y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret float %val @@ -214,6 +317,16 @@ define float @v_constained_fmul_f32_fpexcept_strict_fneg_fabs_lhs(float %x, floa ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_mul_f32_e64 v0, -|v0|, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fmul_f32_fpexcept_strict_fneg_fabs_lhs: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mul_f32_e64 v0, -|v0|, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %fabs.x = call float @llvm.fabs.f32(float %x) #0 %neg.fabs.x = fneg float %fabs.x %val = call float @llvm.experimental.constrained.fmul.f32(float %neg.fabs.x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") @@ -226,3 +339,6 @@ declare <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float>, <2 x declare <3 x float> @llvm.experimental.constrained.fmul.v3f32(<3 x float>, <3 x float>, metadata, metadata) attributes #0 = { strictfp } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; GFX12-GISEL: {{.*}} +; GFX12-SDAG: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/strict_fmul.f64.ll b/llvm/test/CodeGen/AMDGPU/strict_fmul.f64.ll index 8c98a662c59cc..4d2a93397e0c3 100644 --- a/llvm/test/CodeGen/AMDGPU/strict_fmul.f64.ll +++ b/llvm/test/CodeGen/AMDGPU/strict_fmul.f64.ll @@ -1,10 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN %s ; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN %s + ; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s ; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s -; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX10 %s -; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX10 %s + +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX11 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX11 %s define double @v_constained_fmul_f64_fpexcept_strict(double %x, double %y) #0 { ; GCN-LABEL: v_constained_fmul_f64_fpexcept_strict: @@ -18,6 +20,12 @@ define double @v_constained_fmul_f64_fpexcept_strict(double %x, double %y) #0 { ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] ; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_constained_fmul_f64_fpexcept_strict: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] +; GFX11-NEXT: s_setpc_b64 s[30:31] %val = call double @llvm.experimental.constrained.fmul.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret double %val } @@ -34,6 +42,12 @@ define double @v_constained_fmul_f64_fpexcept_ignore(double %x, double %y) #0 { ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] ; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_constained_fmul_f64_fpexcept_ignore: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] +; GFX11-NEXT: s_setpc_b64 s[30:31] %val = call double @llvm.experimental.constrained.fmul.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret double %val } @@ -50,6 +64,12 @@ define double @v_constained_fmul_f64_fpexcept_maytrap(double %x, double %y) #0 { ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] ; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_constained_fmul_f64_fpexcept_maytrap: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] +; GFX11-NEXT: s_setpc_b64 s[30:31] %val = call double @llvm.experimental.constrained.fmul.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") ret double %val } @@ -68,6 +88,13 @@ define <2 x double> @v_constained_fmul_v2f64_fpexcept_strict(<2 x double> %x, <2 ; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5] ; GFX10-NEXT: v_mul_f64 v[2:3], v[2:3], v[6:7] ; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_constained_fmul_v2f64_fpexcept_strict: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5] +; GFX11-NEXT: v_mul_f64 v[2:3], v[2:3], v[6:7] +; GFX11-NEXT: s_setpc_b64 s[30:31] %val = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret <2 x double> %val } @@ -86,6 +113,13 @@ define <2 x double> @v_constained_fmul_v2f64_fpexcept_ignore(<2 x double> %x, <2 ; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5] ; GFX10-NEXT: v_mul_f64 v[2:3], v[2:3], v[6:7] ; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_constained_fmul_v2f64_fpexcept_ignore: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5] +; GFX11-NEXT: v_mul_f64 v[2:3], v[2:3], v[6:7] +; GFX11-NEXT: s_setpc_b64 s[30:31] %val = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x double> %val } @@ -104,6 +138,13 @@ define <2 x double> @v_constained_fmul_v2f64_fpexcept_maytrap(<2 x double> %x, < ; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5] ; GFX10-NEXT: v_mul_f64 v[2:3], v[2:3], v[6:7] ; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_constained_fmul_v2f64_fpexcept_maytrap: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5] +; GFX11-NEXT: v_mul_f64 v[2:3], v[2:3], v[6:7] +; GFX11-NEXT: s_setpc_b64 s[30:31] %val = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") ret <2 x double> %val } @@ -124,6 +165,14 @@ define <3 x double> @v_constained_fmul_v3f64_fpexcept_strict(<3 x double> %x, <3 ; GFX10-NEXT: v_mul_f64 v[2:3], v[2:3], v[8:9] ; GFX10-NEXT: v_mul_f64 v[4:5], v[4:5], v[10:11] ; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_constained_fmul_v3f64_fpexcept_strict: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7] +; GFX11-NEXT: v_mul_f64 v[2:3], v[2:3], v[8:9] +; GFX11-NEXT: v_mul_f64 v[4:5], v[4:5], v[10:11] +; GFX11-NEXT: s_setpc_b64 s[30:31] %val = call <3 x double> @llvm.experimental.constrained.fmul.v3f64(<3 x double> %x, <3 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret <3 x double> %val } @@ -140,6 +189,11 @@ define amdgpu_ps <2 x float> @s_constained_fmul_f64_fpexcept_strict(double inreg ; GFX10: ; %bb.0: ; GFX10-NEXT: v_mul_f64 v[0:1], s[2:3], s[4:5] ; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: s_constained_fmul_f64_fpexcept_strict: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_mul_f64 v[0:1], s[2:3], s[4:5] +; GFX11-NEXT: ; return to shader part epilog %val = call double @llvm.experimental.constrained.fmul.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") %cast = bitcast double %val to <2 x float> ret <2 x float> %cast diff --git a/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll b/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll index 6daea572f58c6..45cc77486b509 100644 --- a/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll @@ -5,13 +5,16 @@ ; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji < %s | FileCheck -check-prefixes=GCN,GFX8,GFX8-SDAG %s ; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji < %s | FileCheck -check-prefixes=GCN,GFX8,GFX8-GISEL %s -; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10-SDAG %s -; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10-GISEL %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10,GFX10-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10,GFX10-GISEL %s -; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX11-SDAG-TRUE16 %s -; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX11-SDAG-FAKE16 %s -; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX11-GISEL,GFX11-GISEL-TRUE16 %s -; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX11-GISEL,GFX11-GISEL-FAKE16 %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG-TRUE16 %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG-FAKE16 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-TRUE16 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-FAKE16 %s + +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12,GFX12-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12,GFX12-GISEL %s ; FIXME: promotion not handled without f16 insts @@ -22,17 +25,11 @@ define half @v_constained_fsub_f16_fpexcept_strict(half %x, half %y) #0 { ; GCN-NEXT: v_sub_f16_e32 v0, v0, v1 ; GCN-NEXT: s_setpc_b64 s[30:31] ; -; GFX10-SDAG-LABEL: v_constained_fsub_f16_fpexcept_strict: -; GFX10-SDAG: ; %bb.0: -; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-SDAG-NEXT: v_sub_f16_e32 v0, v0, v1 -; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31] -; -; GFX10-GISEL-LABEL: v_constained_fsub_f16_fpexcept_strict: -; GFX10-GISEL: ; %bb.0: -; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-GISEL-NEXT: v_sub_f16_e32 v0, v0, v1 -; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31] +; GFX10-LABEL: v_constained_fsub_f16_fpexcept_strict: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_sub_f16_e32 v0, v0, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-SDAG-TRUE16-LABEL: v_constained_fsub_f16_fpexcept_strict: ; GFX11-SDAG-TRUE16: ; %bb.0: @@ -57,6 +54,16 @@ define half @v_constained_fsub_f16_fpexcept_strict(half %x, half %y) #0 { ; GFX11-GISEL-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-GISEL-FAKE16-NEXT: v_sub_f16_e32 v0, v0, v1 ; GFX11-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fsub_f16_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_sub_f16_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call half @llvm.experimental.constrained.fsub.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret half %val } @@ -68,17 +75,11 @@ define half @v_constained_fsub_f16_fpexcept_ignore(half %x, half %y) #0 { ; GCN-NEXT: v_sub_f16_e32 v0, v0, v1 ; GCN-NEXT: s_setpc_b64 s[30:31] ; -; GFX10-SDAG-LABEL: v_constained_fsub_f16_fpexcept_ignore: -; GFX10-SDAG: ; %bb.0: -; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-SDAG-NEXT: v_sub_f16_e32 v0, v0, v1 -; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31] -; -; GFX10-GISEL-LABEL: v_constained_fsub_f16_fpexcept_ignore: -; GFX10-GISEL: ; %bb.0: -; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-GISEL-NEXT: v_sub_f16_e32 v0, v0, v1 -; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31] +; GFX10-LABEL: v_constained_fsub_f16_fpexcept_ignore: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_sub_f16_e32 v0, v0, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-SDAG-TRUE16-LABEL: v_constained_fsub_f16_fpexcept_ignore: ; GFX11-SDAG-TRUE16: ; %bb.0: @@ -103,6 +104,16 @@ define half @v_constained_fsub_f16_fpexcept_ignore(half %x, half %y) #0 { ; GFX11-GISEL-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-GISEL-FAKE16-NEXT: v_sub_f16_e32 v0, v0, v1 ; GFX11-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fsub_f16_fpexcept_ignore: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_sub_f16_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call half @llvm.experimental.constrained.fsub.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret half %val } @@ -114,17 +125,11 @@ define half @v_constained_fsub_f16_fpexcept_maytrap(half %x, half %y) #0 { ; GCN-NEXT: v_sub_f16_e32 v0, v0, v1 ; GCN-NEXT: s_setpc_b64 s[30:31] ; -; GFX10-SDAG-LABEL: v_constained_fsub_f16_fpexcept_maytrap: -; GFX10-SDAG: ; %bb.0: -; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-SDAG-NEXT: v_sub_f16_e32 v0, v0, v1 -; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31] -; -; GFX10-GISEL-LABEL: v_constained_fsub_f16_fpexcept_maytrap: -; GFX10-GISEL: ; %bb.0: -; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-GISEL-NEXT: v_sub_f16_e32 v0, v0, v1 -; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31] +; GFX10-LABEL: v_constained_fsub_f16_fpexcept_maytrap: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_sub_f16_e32 v0, v0, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-SDAG-TRUE16-LABEL: v_constained_fsub_f16_fpexcept_maytrap: ; GFX11-SDAG-TRUE16: ; %bb.0: @@ -149,6 +154,16 @@ define half @v_constained_fsub_f16_fpexcept_maytrap(half %x, half %y) #0 { ; GFX11-GISEL-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-GISEL-FAKE16-NEXT: v_sub_f16_e32 v0, v0, v1 ; GFX11-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fsub_f16_fpexcept_maytrap: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_sub_f16_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call half @llvm.experimental.constrained.fsub.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") ret half %val } @@ -222,6 +237,31 @@ define <2 x half> @v_constained_fsub_v2f16_fpexcept_strict(<2 x half> %x, <2 x h ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: v_pk_add_f16 v0, v0, v1 neg_lo:[0,1] neg_hi:[0,1] ; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-SDAG-LABEL: v_constained_fsub_v2f16_fpexcept_strict: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-SDAG-NEXT: s_wait_expcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0 +; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX12-SDAG-NEXT: v_sub_f16_e32 v0, v0, v1 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-SDAG-NEXT: v_sub_f16_e32 v2, v3, v2 +; GFX12-SDAG-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-GISEL-LABEL: v_constained_fsub_v2f16_fpexcept_strict: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-GISEL-NEXT: s_wait_expcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0 +; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-NEXT: v_pk_add_f16 v0, v0, v1 neg_lo:[0,1] neg_hi:[0,1] +; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] ; GFX10PLUS-SDAG-LABEL: v_constained_fsub_v2f16_fpexcept_strict: ; GFX10PLUS-SDAG: ; %bb.0: ; GFX10PLUS-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -309,6 +349,31 @@ define <2 x half> @v_constained_fsub_v2f16_fpexcept_ignore(<2 x half> %x, <2 x h ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: v_pk_add_f16 v0, v0, v1 neg_lo:[0,1] neg_hi:[0,1] ; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-SDAG-LABEL: v_constained_fsub_v2f16_fpexcept_ignore: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-SDAG-NEXT: s_wait_expcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0 +; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX12-SDAG-NEXT: v_sub_f16_e32 v0, v0, v1 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-SDAG-NEXT: v_sub_f16_e32 v2, v3, v2 +; GFX12-SDAG-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-GISEL-LABEL: v_constained_fsub_v2f16_fpexcept_ignore: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-GISEL-NEXT: s_wait_expcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0 +; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-NEXT: v_pk_add_f16 v0, v0, v1 neg_lo:[0,1] neg_hi:[0,1] +; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] ; GFX10PLUS-SDAG-LABEL: v_constained_fsub_v2f16_fpexcept_ignore: ; GFX10PLUS-SDAG: ; %bb.0: ; GFX10PLUS-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -396,6 +461,31 @@ define <2 x half> @v_constained_fsub_v2f16_fpexcept_maytrap(<2 x half> %x, <2 x ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: v_pk_add_f16 v0, v0, v1 neg_lo:[0,1] neg_hi:[0,1] ; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-SDAG-LABEL: v_constained_fsub_v2f16_fpexcept_maytrap: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-SDAG-NEXT: s_wait_expcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0 +; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX12-SDAG-NEXT: v_sub_f16_e32 v0, v0, v1 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-SDAG-NEXT: v_sub_f16_e32 v2, v3, v2 +; GFX12-SDAG-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-GISEL-LABEL: v_constained_fsub_v2f16_fpexcept_maytrap: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-GISEL-NEXT: s_wait_expcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0 +; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-NEXT: v_pk_add_f16 v0, v0, v1 neg_lo:[0,1] neg_hi:[0,1] +; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] ; GFX10PLUS-SDAG-LABEL: v_constained_fsub_v2f16_fpexcept_maytrap: ; GFX10PLUS-SDAG: ; %bb.0: ; GFX10PLUS-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -509,6 +599,40 @@ define <3 x half> @v_constained_fsub_v3f16_fpexcept_strict(<3 x half> %x, <3 x h ; GFX11-GISEL-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX11-GISEL-FAKE16-NEXT: v_lshl_or_b32 v0, v2, 16, v0 ; GFX11-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-SDAG-LABEL: v_constained_fsub_v3f16_fpexcept_strict: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-SDAG-NEXT: s_wait_expcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0 +; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX12-SDAG-NEXT: v_sub_f16_e32 v0, v0, v2 +; GFX12-SDAG-NEXT: v_sub_f16_e32 v1, v1, v3 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-SDAG-NEXT: v_sub_f16_e32 v2, v5, v4 +; GFX12-SDAG-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-GISEL-LABEL: v_constained_fsub_v3f16_fpexcept_strict: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-GISEL-NEXT: s_wait_expcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0 +; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-NEXT: v_lshrrev_b32_e32 v4, 16, v0 +; GFX12-GISEL-NEXT: v_lshrrev_b32_e32 v5, 16, v2 +; GFX12-GISEL-NEXT: v_sub_f16_e32 v0, v0, v2 +; GFX12-GISEL-NEXT: v_sub_f16_e32 v1, v1, v3 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX12-GISEL-NEXT: v_sub_f16_e32 v2, v4, v5 +; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-GISEL-NEXT: v_lshl_or_b32 v0, v2, 16, v0 +; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] ; GFX10PLUS-SDAG-LABEL: v_constained_fsub_v3f16_fpexcept_strict: ; GFX10PLUS-SDAG: ; %bb.0: ; GFX10PLUS-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -654,6 +778,51 @@ define <4 x half> @v_constained_fsub_v4f16_fpexcept_strict(<4 x half> %x, <4 x h ; GFX11-GISEL-FAKE16-NEXT: v_lshl_or_b32 v0, v2, 16, v0 ; GFX11-GISEL-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1 ; GFX11-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-SDAG-LABEL: v_constained_fsub_v4f16_fpexcept_strict: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-SDAG-NEXT: s_wait_expcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0 +; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0 +; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v4, 16, v3 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v5, 16, v2 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; GFX12-SDAG-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX12-SDAG-NEXT: v_sub_f16_e32 v1, v1, v3 +; GFX12-SDAG-NEXT: v_sub_f16_e32 v0, v0, v2 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX12-SDAG-NEXT: v_sub_f16_e32 v2, v6, v5 +; GFX12-SDAG-NEXT: v_sub_f16_e32 v3, v7, v4 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-SDAG-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX12-SDAG-NEXT: v_perm_b32 v1, v3, v1, 0x5040100 +; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-GISEL-LABEL: v_constained_fsub_v4f16_fpexcept_strict: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-GISEL-NEXT: s_wait_expcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0 +; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0 +; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-NEXT: v_lshrrev_b32_e32 v4, 16, v0 +; GFX12-GISEL-NEXT: v_lshrrev_b32_e32 v5, 16, v1 +; GFX12-GISEL-NEXT: v_lshrrev_b32_e32 v6, 16, v2 +; GFX12-GISEL-NEXT: v_lshrrev_b32_e32 v7, 16, v3 +; GFX12-GISEL-NEXT: v_sub_f16_e32 v0, v0, v2 +; GFX12-GISEL-NEXT: v_sub_f16_e32 v1, v1, v3 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX12-GISEL-NEXT: v_sub_f16_e32 v2, v4, v6 +; GFX12-GISEL-NEXT: v_sub_f16_e32 v3, v5, v7 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-GISEL-NEXT: v_lshl_or_b32 v0, v2, 16, v0 +; GFX12-GISEL-NEXT: v_lshl_or_b32 v1, v3, 16, v1 +; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] ; GFX10PLUS-SDAG-LABEL: v_constained_fsub_v4f16_fpexcept_strict: ; GFX10PLUS-SDAG: ; %bb.0: ; GFX10PLUS-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -695,15 +864,10 @@ define amdgpu_ps half @s_constained_fsub_f16_fpexcept_strict(half inreg %x, half ; GCN-NEXT: v_sub_f16_e32 v0, s2, v0 ; GCN-NEXT: ; return to shader part epilog ; -; GFX10-SDAG-LABEL: s_constained_fsub_f16_fpexcept_strict: -; GFX10-SDAG: ; %bb.0: -; GFX10-SDAG-NEXT: v_sub_f16_e64 v0, s2, s3 -; GFX10-SDAG-NEXT: ; return to shader part epilog -; -; GFX10-GISEL-LABEL: s_constained_fsub_f16_fpexcept_strict: -; GFX10-GISEL: ; %bb.0: -; GFX10-GISEL-NEXT: v_sub_f16_e64 v0, s2, s3 -; GFX10-GISEL-NEXT: ; return to shader part epilog +; GFX10-LABEL: s_constained_fsub_f16_fpexcept_strict: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_sub_f16_e64 v0, s2, s3 +; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-SDAG-TRUE16-LABEL: s_constained_fsub_f16_fpexcept_strict: ; GFX11-SDAG-TRUE16: ; %bb.0: @@ -724,6 +888,13 @@ define amdgpu_ps half @s_constained_fsub_f16_fpexcept_strict(half inreg %x, half ; GFX11-GISEL-FAKE16: ; %bb.0: ; GFX11-GISEL-FAKE16-NEXT: v_sub_f16_e64 v0, s2, s3 ; GFX11-GISEL-FAKE16-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: s_constained_fsub_f16_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_sub_f16 s0, s2, s3 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_3) +; GFX12-NEXT: v_mov_b32_e32 v0, s0 +; GFX12-NEXT: ; return to shader part epilog %val = call half @llvm.experimental.constrained.fsub.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret half %val } @@ -808,6 +979,23 @@ define amdgpu_ps <2 x half> @s_constained_fsub_v2f16_fpexcept_strict(<2 x half> ; GFX11-GISEL: ; %bb.0: ; GFX11-GISEL-NEXT: v_pk_add_f16 v0, s2, s3 neg_lo:[0,1] neg_hi:[0,1] ; GFX11-GISEL-NEXT: ; return to shader part epilog +; +; GFX12-SDAG-LABEL: s_constained_fsub_v2f16_fpexcept_strict: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_lshr_b32 s0, s3, 16 +; GFX12-SDAG-NEXT: s_lshr_b32 s1, s2, 16 +; GFX12-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_3) +; GFX12-SDAG-NEXT: s_sub_f16 s0, s1, s0 +; GFX12-SDAG-NEXT: s_sub_f16 s1, s2, s3 +; GFX12-SDAG-NEXT: s_pack_ll_b32_b16 s0, s1, s0 +; GFX12-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, s0 +; GFX12-SDAG-NEXT: ; return to shader part epilog +; +; GFX12-GISEL-LABEL: s_constained_fsub_v2f16_fpexcept_strict: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: v_pk_add_f16 v0, s2, s3 neg_lo:[0,1] neg_hi:[0,1] +; GFX12-GISEL-NEXT: ; return to shader part epilog ; GFX10PLUS-SDAG-LABEL: s_constained_fsub_v2f16_fpexcept_strict: ; GFX10PLUS-SDAG: ; %bb.0: ; GFX10PLUS-SDAG-NEXT: v_sub_f16_e64 v0, s2, s3 @@ -833,5 +1021,6 @@ declare <4 x half> @llvm.experimental.constrained.fsub.v4f16(<4 x half>, <4 x ha attributes #0 = { strictfp } attributes #1 = { inaccessiblememonly nounwind willreturn } ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; GFX11: {{.*}} ; GFX8: {{.*}} ; GFX9: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/strict_fsub.f32.ll b/llvm/test/CodeGen/AMDGPU/strict_fsub.f32.ll index 23dbe21379f7f..3e6db4d4ac6c8 100644 --- a/llvm/test/CodeGen/AMDGPU/strict_fsub.f32.ll +++ b/llvm/test/CodeGen/AMDGPU/strict_fsub.f32.ll @@ -8,6 +8,9 @@ ; RUN: llc -global-isel= -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11 %s ; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11 %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12 %s + define float @v_constained_fsub_f32_fpexcept_strict(float %x, float %y) #0 { ; GCN-LABEL: v_constained_fsub_f32_fpexcept_strict: ; GCN: ; %bb.0: @@ -20,6 +23,16 @@ define float @v_constained_fsub_f32_fpexcept_strict(float %x, float %y) #0 { ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_sub_f32_e32 v0, v0, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fsub_f32_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_sub_f32_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call float @llvm.experimental.constrained.fsub.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret float %val } @@ -36,6 +49,16 @@ define float @v_constained_fsub_f32_fpexcept_ignore(float %x, float %y) #0 { ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_sub_f32_e32 v0, v0, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fsub_f32_fpexcept_ignore: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_sub_f32_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call float @llvm.experimental.constrained.fsub.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret float %val } @@ -52,6 +75,16 @@ define float @v_constained_fsub_f32_fpexcept_maytrap(float %x, float %y) #0 { ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_sub_f32_e32 v0, v0, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fsub_f32_fpexcept_maytrap: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_sub_f32_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call float @llvm.experimental.constrained.fsub.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") ret float %val } @@ -76,6 +109,16 @@ define <2 x float> @v_constained_fsub_v2f32_fpexcept_strict(<2 x float> %x, <2 x ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_sub_f32 v0, v0, v2 :: v_dual_sub_f32 v1, v1, v3 ; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fsub_v2f32_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_dual_sub_f32 v0, v0, v2 :: v_dual_sub_f32 v1, v1, v3 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret <2 x float> %val } @@ -100,6 +143,16 @@ define <2 x float> @v_constained_fsub_v2f32_fpexcept_ignore(<2 x float> %x, <2 x ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_sub_f32 v0, v0, v2 :: v_dual_sub_f32 v1, v1, v3 ; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fsub_v2f32_fpexcept_ignore: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_dual_sub_f32 v0, v0, v2 :: v_dual_sub_f32 v1, v1, v3 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x float> %val } @@ -124,6 +177,16 @@ define <2 x float> @v_constained_fsub_v2f32_fpexcept_maytrap(<2 x float> %x, <2 ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_sub_f32 v0, v0, v2 :: v_dual_sub_f32 v1, v1, v3 ; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fsub_v2f32_fpexcept_maytrap: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_dual_sub_f32 v0, v0, v2 :: v_dual_sub_f32 v1, v1, v3 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") ret <2 x float> %val } @@ -151,6 +214,17 @@ define <3 x float> @v_constained_fsub_v3f32_fpexcept_strict(<3 x float> %x, <3 x ; GFX11-NEXT: v_dual_sub_f32 v0, v0, v3 :: v_dual_sub_f32 v1, v1, v4 ; GFX11-NEXT: v_sub_f32_e32 v2, v2, v5 ; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fsub_v3f32_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_dual_sub_f32 v0, v0, v3 :: v_dual_sub_f32 v1, v1, v4 +; GFX12-NEXT: v_sub_f32_e32 v2, v2, v5 +; GFX12-NEXT: s_setpc_b64 s[30:31] %val = call <3 x float> @llvm.experimental.constrained.fsub.v3f32(<3 x float> %x, <3 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret <3 x float> %val } @@ -166,6 +240,13 @@ define amdgpu_ps float @s_constained_fsub_f32_fpexcept_strict(float inreg %x, fl ; GFX10PLUS: ; %bb.0: ; GFX10PLUS-NEXT: v_sub_f32_e64 v0, s2, s3 ; GFX10PLUS-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: s_constained_fsub_f32_fpexcept_strict: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_sub_f32 s0, s2, s3 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_3) +; GFX12-NEXT: v_mov_b32_e32 v0, s0 +; GFX12-NEXT: ; return to shader part epilog %val = call float @llvm.experimental.constrained.fsub.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret float %val } @@ -182,6 +263,16 @@ define float @v_constained_fsub_f32_fpexcept_strict_fabs_lhs(float %x, float %y) ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_sub_f32_e64 v0, |v0|, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fsub_f32_fpexcept_strict_fabs_lhs: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_sub_f32_e64 v0, |v0|, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %fabs.x = call float @llvm.fabs.f32(float %x) #0 %val = call float @llvm.experimental.constrained.fsub.f32(float %fabs.x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret float %val @@ -199,6 +290,16 @@ define float @v_constained_fsub_f32_fpexcept_strict_fabs_rhs(float %x, float %y) ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_sub_f32_e64 v0, v0, |v1| ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fsub_f32_fpexcept_strict_fabs_rhs: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_sub_f32_e64 v0, v0, |v1| +; GFX12-NEXT: s_setpc_b64 s[30:31] %fabs.y = call float @llvm.fabs.f32(float %y) #0 %val = call float @llvm.experimental.constrained.fsub.f32(float %x, float %fabs.y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret float %val @@ -216,6 +317,16 @@ define float @v_constained_fsub_f32_fpexcept_strict_fneg_fabs_lhs(float %x, floa ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_sub_f32_e64 v0, -|v0|, v1 ; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_constained_fsub_f32_fpexcept_strict_fneg_fabs_lhs: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_sub_f32_e64 v0, -|v0|, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] %fabs.x = call float @llvm.fabs.f32(float %x) #0 %neg.fabs.x = fneg float %fabs.x %val = call float @llvm.experimental.constrained.fsub.f32(float %neg.fabs.x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") @@ -228,3 +339,6 @@ declare <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float>, <2 x declare <3 x float> @llvm.experimental.constrained.fsub.v3f32(<3 x float>, <3 x float>, metadata, metadata) attributes #0 = { strictfp } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; GFX12-GISEL: {{.*}} +; GFX12-SDAG: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/strict_fsub.f64.ll b/llvm/test/CodeGen/AMDGPU/strict_fsub.f64.ll index e7d136c377079..2937065b7970a 100644 --- a/llvm/test/CodeGen/AMDGPU/strict_fsub.f64.ll +++ b/llvm/test/CodeGen/AMDGPU/strict_fsub.f64.ll @@ -1,10 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN %s ; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN %s + ; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s ; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s -; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX10 %s -; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX10 %s + +; RUN: llc -global-isel=0 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX11 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX11 %s define double @v_constained_fsub_f64_fpexcept_strict(double %x, double %y) #0 { ; GCN-LABEL: v_constained_fsub_f64_fpexcept_strict: @@ -18,6 +20,12 @@ define double @v_constained_fsub_f64_fpexcept_strict(double %x, double %y) #0 { ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], -v[2:3] ; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_constained_fsub_f64_fpexcept_strict: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], -v[2:3] +; GFX11-NEXT: s_setpc_b64 s[30:31] %val = call double @llvm.experimental.constrained.fsub.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret double %val } @@ -34,6 +42,12 @@ define double @v_constained_fsub_f64_fpexcept_ignore(double %x, double %y) #0 { ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], -v[2:3] ; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_constained_fsub_f64_fpexcept_ignore: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], -v[2:3] +; GFX11-NEXT: s_setpc_b64 s[30:31] %val = call double @llvm.experimental.constrained.fsub.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret double %val } @@ -50,6 +64,12 @@ define double @v_constained_fsub_f64_fpexcept_maytrap(double %x, double %y) #0 { ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], -v[2:3] ; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_constained_fsub_f64_fpexcept_maytrap: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], -v[2:3] +; GFX11-NEXT: s_setpc_b64 s[30:31] %val = call double @llvm.experimental.constrained.fsub.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") ret double %val } @@ -68,6 +88,13 @@ define <2 x double> @v_constained_fsub_v2f64_fpexcept_strict(<2 x double> %x, <2 ; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], -v[4:5] ; GFX10-NEXT: v_add_f64 v[2:3], v[2:3], -v[6:7] ; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_constained_fsub_v2f64_fpexcept_strict: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], -v[4:5] +; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], -v[6:7] +; GFX11-NEXT: s_setpc_b64 s[30:31] %val = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret <2 x double> %val } @@ -86,6 +113,13 @@ define <2 x double> @v_constained_fsub_v2f64_fpexcept_ignore(<2 x double> %x, <2 ; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], -v[4:5] ; GFX10-NEXT: v_add_f64 v[2:3], v[2:3], -v[6:7] ; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_constained_fsub_v2f64_fpexcept_ignore: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], -v[4:5] +; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], -v[6:7] +; GFX11-NEXT: s_setpc_b64 s[30:31] %val = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x double> %val } @@ -104,6 +138,13 @@ define <2 x double> @v_constained_fsub_v2f64_fpexcept_maytrap(<2 x double> %x, < ; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], -v[4:5] ; GFX10-NEXT: v_add_f64 v[2:3], v[2:3], -v[6:7] ; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_constained_fsub_v2f64_fpexcept_maytrap: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], -v[4:5] +; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], -v[6:7] +; GFX11-NEXT: s_setpc_b64 s[30:31] %val = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") ret <2 x double> %val } @@ -124,6 +165,14 @@ define <3 x double> @v_constained_fsub_v3f64_fpexcept_strict(<3 x double> %x, <3 ; GFX10-NEXT: v_add_f64 v[2:3], v[2:3], -v[8:9] ; GFX10-NEXT: v_add_f64 v[4:5], v[4:5], -v[10:11] ; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_constained_fsub_v3f64_fpexcept_strict: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], -v[6:7] +; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], -v[8:9] +; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], -v[10:11] +; GFX11-NEXT: s_setpc_b64 s[30:31] %val = call <3 x double> @llvm.experimental.constrained.fsub.v3f64(<3 x double> %x, <3 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") ret <3 x double> %val } @@ -140,6 +189,11 @@ define amdgpu_ps <2 x float> @s_constained_fsub_f64_fpexcept_strict(double inreg ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add_f64 v[0:1], s[2:3], -s[4:5] ; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: s_constained_fsub_f64_fpexcept_strict: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_add_f64 v[0:1], s[2:3], -s[4:5] +; GFX11-NEXT: ; return to shader part epilog %val = call double @llvm.experimental.constrained.fsub.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") %cast = bitcast double %val to <2 x float> ret <2 x float> %cast diff --git a/llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.ll b/llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.ll index f6922c75ff848..a0aee6c80703f 100644 --- a/llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.ll +++ b/llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.ll @@ -7,27 +7,36 @@ define amdgpu_kernel void @barrier_vmcnt_global(ptr addrspace(1) %arg) { ; GFX8-LABEL: barrier_vmcnt_global: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 -; GFX8-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-NEXT: v_mov_b32_e32 v3, s1 +; GFX8-NEXT: v_add_u32_e32 v1, vcc, s0, v1 +; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v3, vcc +; GFX8-NEXT: flat_load_dword v4, v[1:2] +; GFX8-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 +; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 -; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc -; GFX8-NEXT: flat_load_dword v2, v[0:1] -; GFX8-NEXT: v_add_u32_e32 v0, vcc, 4, v0 -; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: s_barrier -; GFX8-NEXT: flat_store_dword v[0:1], v2 +; GFX8-NEXT: flat_store_dword v[0:1], v4 ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: barrier_vmcnt_global: ; GFX9: s_load_dwordx2 s[0:1], s[4:5], 0x24 -; GFX9-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: global_load_dword v1, v0, s[0:1] +; GFX9-NEXT: global_load_dword v2, v1, s[0:1] +; GFX9-NEXT: v_add_u32_e32 v1, 1, v0 +; GFX9-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[0:1] +; GFX9-NEXT: v_mov_b32_e32 v3, s1 +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v3, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_barrier -; GFX9-NEXT: global_store_dword v0, v1, s[0:1] offset:4 +; GFX9-NEXT: global_store_dword v[0:1], v2, off ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() @@ -48,20 +57,22 @@ bb: define amdgpu_kernel void @barrier_vscnt_global(ptr addrspace(1) %arg) { ; GFX8-LABEL: barrier_vscnt_global: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, 2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, 0 -; GFX8-NEXT: v_lshrrev_b64 v[1:2], 30, v[0:1] +; GFX8-NEXT: v_add_u32_e32 v2, vcc, 2, v0 +; GFX8-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v3, s1 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, s0, v1 -; GFX8-NEXT: v_addc_u32_e32 v2, vcc, v3, v2, vcc -; GFX8-NEXT: flat_store_dword v[1:2], v0 -; GFX8-NEXT: v_add_u32_e32 v0, vcc, -4, v1 -; GFX8-NEXT: v_addc_u32_e32 v1, vcc, -1, v2, vcc -; GFX8-NEXT: v_mov_b32_e32 v2, 1 +; GFX8-NEXT: v_mov_b32_e32 v4, s1 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2 +; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v4, v3, vcc +; GFX8-NEXT: flat_store_dword v[2:3], v1 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 +; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] +; GFX8-NEXT: v_mov_b32_e32 v3, 1 +; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v4, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: s_barrier -; GFX8-NEXT: flat_store_dword v[0:1], v2 +; GFX8-NEXT: flat_store_dword v[0:1], v3 ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: barrier_vscnt_global: @@ -70,14 +81,18 @@ define amdgpu_kernel void @barrier_vscnt_global(ptr addrspace(1) %arg) { ; GFX9-NEXT: v_add_u32_e32 v2, 2, v0 ; GFX9-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s1 +; GFX9-NEXT: v_mov_b32_e32 v4, s1 ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 -; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v0, v3, vcc -; GFX9-NEXT: v_mov_b32_e32 v0, 1 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v4, v3, vcc ; GFX9-NEXT: global_store_dword v[2:3], v1, off +; GFX9-NEXT: v_add_u32_e32 v2, 1, v0 +; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] +; GFX9-NEXT: v_mov_b32_e32 v3, 1 +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v4, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_barrier -; GFX9-NEXT: global_store_dword v[2:3], v0, off offset:-4 +; GFX9-NEXT: global_store_dword v[0:1], v3, off ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() @@ -100,19 +115,22 @@ bb: define amdgpu_kernel void @barrier_vmcnt_vscnt_global(ptr addrspace(1) %arg) { ; GFX8-LABEL: barrier_vmcnt_vscnt_global: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, 2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, 0 -; GFX8-NEXT: v_lshrrev_b64 v[1:2], 30, v[0:1] +; GFX8-NEXT: v_add_u32_e32 v2, vcc, 2, v0 +; GFX8-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v3, s1 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, s0, v1 -; GFX8-NEXT: v_addc_u32_e32 v2, vcc, v3, v2, vcc -; GFX8-NEXT: v_add_u32_e32 v3, vcc, -8, v1 -; GFX8-NEXT: v_addc_u32_e32 v4, vcc, -1, v2, vcc -; GFX8-NEXT: flat_load_dword v3, v[3:4] -; GFX8-NEXT: flat_store_dword v[1:2], v0 -; GFX8-NEXT: v_add_u32_e32 v0, vcc, -4, v1 -; GFX8-NEXT: v_addc_u32_e32 v1, vcc, -1, v2, vcc +; GFX8-NEXT: v_mov_b32_e32 v4, s1 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2 +; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v4, v3, vcc +; GFX8-NEXT: flat_store_dword v[2:3], v1 +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2 +; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc +; GFX8-NEXT: flat_load_dword v3, v[2:3] +; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 +; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] +; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v4, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: s_barrier ; GFX8-NEXT: flat_store_dword v[0:1], v3 @@ -124,15 +142,19 @@ define amdgpu_kernel void @barrier_vmcnt_vscnt_global(ptr addrspace(1) %arg) { ; GFX9-NEXT: v_add_u32_e32 v2, 2, v0 ; GFX9-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s1 +; GFX9-NEXT: v_mov_b32_e32 v4, s1 ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 -; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v0, v3, vcc -; GFX9-NEXT: global_load_dword v0, v[2:3], off offset:-8 -; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v4, v3, vcc ; GFX9-NEXT: global_store_dword v[2:3], v1, off +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 2, v0 +; GFX9-NEXT: global_load_dword v3, v2, s[0:1] +; GFX9-NEXT: v_add_u32_e32 v2, 1, v0 +; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v4, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_barrier -; GFX9-NEXT: global_store_dword v[2:3], v0, off offset:-4 +; GFX9-NEXT: global_store_dword v[0:1], v3, off ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() @@ -157,30 +179,38 @@ bb: define amdgpu_kernel void @barrier_vmcnt_flat(ptr %arg) { ; GFX8-LABEL: barrier_vmcnt_flat: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 -; GFX8-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-NEXT: v_mov_b32_e32 v3, s1 +; GFX8-NEXT: v_add_u32_e32 v1, vcc, s0, v1 +; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v3, vcc +; GFX8-NEXT: flat_load_dword v4, v[1:2] +; GFX8-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 +; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 -; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc -; GFX8-NEXT: flat_load_dword v2, v[0:1] -; GFX8-NEXT: v_add_u32_e32 v0, vcc, 4, v0 -; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX8-NEXT: s_barrier -; GFX8-NEXT: flat_store_dword v[0:1], v2 +; GFX8-NEXT: flat_store_dword v[0:1], v4 ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: barrier_vmcnt_flat: ; GFX9: s_load_dwordx2 s[0:1], s[4:5], 0x24 -; GFX9-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: v_mov_b32_e32 v3, s1 +; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, s0, v1 +; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v3, vcc +; GFX9-NEXT: flat_load_dword v4, v[1:2] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: v_add_u32_e32 v2, 1, v0 +; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 -; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc -; GFX9-NEXT: flat_load_dword v2, v[0:1] +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v3, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: s_barrier -; GFX9-NEXT: flat_store_dword v[0:1], v2 offset:4 +; GFX9-NEXT: flat_store_dword v[0:1], v4 ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() @@ -201,20 +231,22 @@ bb: define amdgpu_kernel void @barrier_vscnt_flat(ptr %arg) { ; GFX8-LABEL: barrier_vscnt_flat: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, 2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, 0 -; GFX8-NEXT: v_lshrrev_b64 v[1:2], 30, v[0:1] +; GFX8-NEXT: v_add_u32_e32 v2, vcc, 2, v0 +; GFX8-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v3, s1 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, s0, v1 -; GFX8-NEXT: v_addc_u32_e32 v2, vcc, v3, v2, vcc -; GFX8-NEXT: flat_store_dword v[1:2], v0 -; GFX8-NEXT: v_add_u32_e32 v0, vcc, -4, v1 -; GFX8-NEXT: v_addc_u32_e32 v1, vcc, -1, v2, vcc -; GFX8-NEXT: v_mov_b32_e32 v2, 1 +; GFX8-NEXT: v_mov_b32_e32 v4, s1 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2 +; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v4, v3, vcc +; GFX8-NEXT: flat_store_dword v[2:3], v1 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 +; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] +; GFX8-NEXT: v_mov_b32_e32 v3, 1 +; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v4, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX8-NEXT: s_barrier -; GFX8-NEXT: flat_store_dword v[0:1], v2 +; GFX8-NEXT: flat_store_dword v[0:1], v3 ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: barrier_vscnt_flat: @@ -223,16 +255,18 @@ define amdgpu_kernel void @barrier_vscnt_flat(ptr %arg) { ; GFX9-NEXT: v_add_u32_e32 v2, 2, v0 ; GFX9-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s1 +; GFX9-NEXT: v_mov_b32_e32 v4, s1 ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 -; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v0, v3, vcc -; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, -4, v2 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v4, v3, vcc ; GFX9-NEXT: flat_store_dword v[2:3], v1 -; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v3, vcc -; GFX9-NEXT: v_mov_b32_e32 v2, 1 +; GFX9-NEXT: v_add_u32_e32 v2, 1, v0 +; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] +; GFX9-NEXT: v_mov_b32_e32 v3, 1 +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v4, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: s_barrier -; GFX9-NEXT: flat_store_dword v[0:1], v2 +; GFX9-NEXT: flat_store_dword v[0:1], v3 ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() @@ -255,19 +289,22 @@ bb: define amdgpu_kernel void @barrier_vmcnt_vscnt_flat(ptr %arg) { ; GFX8-LABEL: barrier_vmcnt_vscnt_flat: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, 2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, 0 -; GFX8-NEXT: v_lshrrev_b64 v[1:2], 30, v[0:1] +; GFX8-NEXT: v_add_u32_e32 v2, vcc, 2, v0 +; GFX8-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v3, s1 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, s0, v1 -; GFX8-NEXT: v_addc_u32_e32 v2, vcc, v3, v2, vcc -; GFX8-NEXT: v_add_u32_e32 v3, vcc, -8, v1 -; GFX8-NEXT: v_addc_u32_e32 v4, vcc, -1, v2, vcc -; GFX8-NEXT: flat_load_dword v3, v[3:4] -; GFX8-NEXT: flat_store_dword v[1:2], v0 -; GFX8-NEXT: v_add_u32_e32 v0, vcc, -4, v1 -; GFX8-NEXT: v_addc_u32_e32 v1, vcc, -1, v2, vcc +; GFX8-NEXT: v_mov_b32_e32 v4, s1 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2 +; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v4, v3, vcc +; GFX8-NEXT: flat_store_dword v[2:3], v1 +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2 +; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc +; GFX8-NEXT: flat_load_dword v3, v[2:3] +; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 +; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] +; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v4, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX8-NEXT: s_barrier ; GFX8-NEXT: flat_store_dword v[0:1], v3 @@ -279,18 +316,21 @@ define amdgpu_kernel void @barrier_vmcnt_vscnt_flat(ptr %arg) { ; GFX9-NEXT: v_add_u32_e32 v2, 2, v0 ; GFX9-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s1 +; GFX9-NEXT: v_mov_b32_e32 v4, s1 ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 -; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v0, v3, vcc -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, -8, v2 -; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, -1, v3, vcc -; GFX9-NEXT: flat_load_dword v4, v[4:5] -; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, -4, v2 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v4, v3, vcc ; GFX9-NEXT: flat_store_dword v[2:3], v1 -; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v3, vcc +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 2, v0 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v4, vcc +; GFX9-NEXT: flat_load_dword v3, v[2:3] +; GFX9-NEXT: v_add_u32_e32 v2, 1, v0 +; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v4, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: s_barrier -; GFX9-NEXT: flat_store_dword v[0:1], v4 +; GFX9-NEXT: flat_store_dword v[0:1], v3 ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() @@ -315,19 +355,22 @@ bb: define amdgpu_kernel void @barrier_vmcnt_vscnt_flat_workgroup(ptr %arg) { ; GFX8-LABEL: barrier_vmcnt_vscnt_flat_workgroup: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, 2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, 0 -; GFX8-NEXT: v_lshrrev_b64 v[1:2], 30, v[0:1] +; GFX8-NEXT: v_add_u32_e32 v2, vcc, 2, v0 +; GFX8-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v3, s1 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, s0, v1 -; GFX8-NEXT: v_addc_u32_e32 v2, vcc, v3, v2, vcc -; GFX8-NEXT: v_add_u32_e32 v3, vcc, -8, v1 -; GFX8-NEXT: v_addc_u32_e32 v4, vcc, -1, v2, vcc -; GFX8-NEXT: flat_load_dword v3, v[3:4] -; GFX8-NEXT: flat_store_dword v[1:2], v0 -; GFX8-NEXT: v_add_u32_e32 v0, vcc, -4, v1 -; GFX8-NEXT: v_addc_u32_e32 v1, vcc, -1, v2, vcc +; GFX8-NEXT: v_mov_b32_e32 v4, s1 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2 +; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v4, v3, vcc +; GFX8-NEXT: flat_store_dword v[2:3], v1 +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2 +; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc +; GFX8-NEXT: flat_load_dword v3, v[2:3] +; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 +; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] +; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v4, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX8-NEXT: s_barrier ; GFX8-NEXT: flat_store_dword v[0:1], v3 @@ -339,18 +382,21 @@ define amdgpu_kernel void @barrier_vmcnt_vscnt_flat_workgroup(ptr %arg) { ; GFX9-NEXT: v_add_u32_e32 v2, 2, v0 ; GFX9-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s1 +; GFX9-NEXT: v_mov_b32_e32 v4, s1 ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 -; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v0, v3, vcc -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, -8, v2 -; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, -1, v3, vcc -; GFX9-NEXT: flat_load_dword v4, v[4:5] -; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, -4, v2 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v4, v3, vcc ; GFX9-NEXT: flat_store_dword v[2:3], v1 -; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v3, vcc +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 2, v0 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v4, vcc +; GFX9-NEXT: flat_load_dword v3, v[2:3] +; GFX9-NEXT: v_add_u32_e32 v2, 1, v0 +; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v4, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: s_barrier -; GFX9-NEXT: flat_store_dword v[0:1], v4 +; GFX9-NEXT: flat_store_dword v[0:1], v3 ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() @@ -375,25 +421,34 @@ bb: define amdgpu_kernel void @load_vmcnt_global(ptr addrspace(1) %arg) { ; GFX8-LABEL: load_vmcnt_global: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 -; GFX8-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-NEXT: v_mov_b32_e32 v3, s1 +; GFX8-NEXT: v_add_u32_e32 v1, vcc, s0, v1 +; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v3, vcc +; GFX8-NEXT: flat_load_dword v4, v[1:2] +; GFX8-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 +; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 -; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc -; GFX8-NEXT: flat_load_dword v2, v[0:1] -; GFX8-NEXT: v_add_u32_e32 v0, vcc, 4, v0 -; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: flat_store_dword v[0:1], v2 +; GFX8-NEXT: flat_store_dword v[0:1], v4 ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: load_vmcnt_global: ; GFX9: s_load_dwordx2 s[0:1], s[4:5], 0x24 -; GFX9-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: global_load_dword v1, v0, s[0:1] +; GFX9-NEXT: global_load_dword v2, v1, s[0:1] +; GFX9-NEXT: v_add_u32_e32 v1, 1, v0 +; GFX9-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[0:1] +; GFX9-NEXT: v_mov_b32_e32 v3, s1 +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v3, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: global_store_dword v0, v1, s[0:1] offset:4 +; GFX9-NEXT: global_store_dword v[0:1], v2, off ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() @@ -411,28 +466,36 @@ bb: define amdgpu_kernel void @load_vmcnt_flat(ptr %arg) { ; GFX8-LABEL: load_vmcnt_flat: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 -; GFX8-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-NEXT: v_mov_b32_e32 v3, s1 +; GFX8-NEXT: v_add_u32_e32 v1, vcc, s0, v1 +; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v3, vcc +; GFX8-NEXT: flat_load_dword v4, v[1:2] +; GFX8-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 +; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 -; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc -; GFX8-NEXT: flat_load_dword v2, v[0:1] -; GFX8-NEXT: v_add_u32_e32 v0, vcc, 4, v0 -; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GFX8-NEXT: flat_store_dword v[0:1], v2 +; GFX8-NEXT: flat_store_dword v[0:1], v4 ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: load_vmcnt_flat: ; GFX9: s_load_dwordx2 s[0:1], s[4:5], 0x24 -; GFX9-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: v_mov_b32_e32 v3, s1 +; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, s0, v1 +; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v3, vcc +; GFX9-NEXT: flat_load_dword v4, v[1:2] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: v_add_u32_e32 v2, 1, v0 +; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 -; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc -; GFX9-NEXT: flat_load_dword v2, v[0:1] +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v3, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GFX9-NEXT: flat_store_dword v[0:1], v2 offset:4 +; GFX9-NEXT: flat_store_dword v[0:1], v4 ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() diff --git a/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll b/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll index 9e9fe1809c780..ff33cca0702ae 100644 --- a/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll +++ b/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 --o - %s | FileCheck -check-prefix=GCN %s -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -O0 --o - %s | FileCheck -check-prefix=GCN-O0 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -o - %s | FileCheck -check-prefix=GCN %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -O0 -o - %s | FileCheck -check-prefix=GCN-O0 %s ; Test whole-wave register spilling. diff --git a/llvm/test/CodeGen/DirectX/CBufferAccess/gep-ce-two-uses.ll b/llvm/test/CodeGen/DirectX/CBufferAccess/gep-ce-two-uses.ll index 4eda6353f47ed..8fba0a4187e81 100644 --- a/llvm/test/CodeGen/DirectX/CBufferAccess/gep-ce-two-uses.ll +++ b/llvm/test/CodeGen/DirectX/CBufferAccess/gep-ce-two-uses.ll @@ -17,12 +17,31 @@ define void @f(ptr %dst) { entry: ; CHECK: [[PTR:%.*]] = call ptr addrspace(2) @llvm.dx.resource.getpointer.{{.*}}(target("dx.CBuffer", %__cblayout_CB) {{%.*}}, i32 0) - ; CHECK: getelementptr inbounds nuw i8, ptr addrspace(2) [[PTR]], i32 16 + ; CHECK: [[GEP:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(2) [[PTR]], i32 16 + ; CHECK-COUNT-2: load float, ptr addrspace(2) [[GEP]] %a1 = load float, ptr addrspace(2) getelementptr inbounds nuw (i8, ptr addrspace(2) @a1, i32 16), align 4 store float %a1, ptr %dst, align 32 - ; CHECK: [[PTR:%.*]] = call ptr addrspace(2) @llvm.dx.resource.getpointer.{{.*}}(target("dx.CBuffer", %__cblayout_CB) {{%.*}}, i32 0) - ; CHECK: getelementptr inbounds nuw i8, ptr addrspace(2) [[PTR]], i32 16 + %a2 = load float, ptr addrspace(2) getelementptr inbounds nuw (i8, ptr addrspace(2) @a1, i32 16), align 4 + store float %a2, ptr %dst, align 32 + + ret void +} + +; CHECK: define void @g +define void @g(ptr %dst) { +entry: + ; CHECK: [[PTR1:%.*]] = call ptr addrspace(2) @llvm.dx.resource.getpointer.{{.*}}(target("dx.CBuffer", %__cblayout_CB) {{%.*}}, i32 0) + ; CHECK: [[GEP1:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(2) [[PTR1]], i32 16 + ; CHECK: load float, ptr addrspace(2) [[GEP1]] + %a1 = load float, ptr addrspace(2) getelementptr inbounds nuw (i8, ptr addrspace(2) @a1, i32 16), align 4 + store float %a1, ptr %dst, align 32 + br label %next + +next: + ; CHECK: [[PTR2:%.*]] = call ptr addrspace(2) @llvm.dx.resource.getpointer.{{.*}}(target("dx.CBuffer", %__cblayout_CB) {{%.*}}, i32 0) + ; CHECK: [[GEP2:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(2) [[PTR2]], i32 16 + ; CHECK: load float, ptr addrspace(2) [[GEP2]] %a2 = load float, ptr addrspace(2) getelementptr inbounds nuw (i8, ptr addrspace(2) @a1, i32 16), align 4 store float %a2, ptr %dst, align 32 diff --git a/llvm/test/CodeGen/LoongArch/lasx/rotl-rotr.ll b/llvm/test/CodeGen/LoongArch/lasx/rotl-rotr.ll new file mode 100644 index 0000000000000..6b8ab2cdb94e1 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/rotl-rotr.ll @@ -0,0 +1,248 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA64 + +define void @rotl_v32i8(ptr %dst, ptr %src, i8 signext %a0) nounwind { +; CHECK-LABEL: rotl_v32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvreplgr2vr.b $xr1, $a2 +; CHECK-NEXT: xvneg.b $xr1, $xr1 +; CHECK-NEXT: xvrotr.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <32 x i8>, ptr %src + %v1.ele = insertelement <32 x i8> poison, i8 %a0, i8 0 + %v1 = shufflevector <32 x i8> %v1.ele, <32 x i8> poison, <32 x i32> zeroinitializer + %v1.sub = sub <32 x i8> splat (i8 8), %v1 + %b = shl <32 x i8> %v0, %v1 + %c = lshr <32 x i8> %v0, %v1.sub + %d = or <32 x i8> %b, %c + store <32 x i8> %d, ptr %dst + ret void +} + +define void @rotr_v32i8(ptr %dst, ptr %src, i8 signext %a0) nounwind { +; CHECK-LABEL: rotr_v32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvreplgr2vr.b $xr1, $a2 +; CHECK-NEXT: xvrotr.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <32 x i8>, ptr %src + %v1.ele = insertelement <32 x i8> poison, i8 %a0, i8 0 + %v1 = shufflevector <32 x i8> %v1.ele, <32 x i8> poison, <32 x i32> zeroinitializer + %v1.sub = sub <32 x i8> splat (i8 8), %v1 + %b = lshr <32 x i8> %v0, %v1 + %c = shl <32 x i8> %v0, %v1.sub + %d = or <32 x i8> %b, %c + store <32 x i8> %d, ptr %dst + ret void +} + +define void @rotr_v32i8_imm(ptr %dst, ptr %src) nounwind { +; CHECK-LABEL: rotr_v32i8_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvrotri.b $xr0, $xr0, 2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <32 x i8>, ptr %src + %b = lshr <32 x i8> %v0, splat (i8 2) + %c = shl <32 x i8> %v0, splat (i8 6) + %d = or <32 x i8> %b, %c + store <32 x i8> %d, ptr %dst + ret void +} + +define void @rotl_v16i16(ptr %dst, ptr %src, i16 signext %a0) nounwind { +; CHECK-LABEL: rotl_v16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvreplgr2vr.h $xr1, $a2 +; CHECK-NEXT: xvneg.h $xr1, $xr1 +; CHECK-NEXT: xvrotr.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <16 x i16>, ptr %src + %v1.ele = insertelement <16 x i16> poison, i16 %a0, i16 0 + %v1 = shufflevector <16 x i16> %v1.ele, <16 x i16> poison, <16 x i32> zeroinitializer + %v1.sub = sub <16 x i16> splat (i16 16), %v1 + %b = shl <16 x i16> %v0, %v1 + %c = lshr <16 x i16> %v0, %v1.sub + %d = or <16 x i16> %b, %c + store <16 x i16> %d, ptr %dst + ret void +} + +define void @rotr_v16i16(ptr %dst, ptr %src, i16 signext %a0) nounwind { +; CHECK-LABEL: rotr_v16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvreplgr2vr.h $xr1, $a2 +; CHECK-NEXT: xvrotr.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <16 x i16>, ptr %src + %v1.ele = insertelement <16 x i16> poison, i16 %a0, i16 0 + %v1 = shufflevector <16 x i16> %v1.ele, <16 x i16> poison, <16 x i32> zeroinitializer + %v1.sub = sub <16 x i16> splat (i16 16), %v1 + %b = lshr <16 x i16> %v0, %v1 + %c = shl <16 x i16> %v0, %v1.sub + %d = or <16 x i16> %b, %c + store <16 x i16> %d, ptr %dst + ret void +} + +define void @rotr_v16i16_imm(ptr %dst, ptr %src) nounwind { +; CHECK-LABEL: rotr_v16i16_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvrotri.h $xr0, $xr0, 2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <16 x i16>, ptr %src + %b = lshr <16 x i16> %v0, splat (i16 2) + %c = shl <16 x i16> %v0, splat (i16 14) + %d = or <16 x i16> %b, %c + store <16 x i16> %d, ptr %dst + ret void +} + +define void @rotl_v8i32(ptr %dst, ptr %src, i32 signext %a0) nounwind { +; CHECK-LABEL: rotl_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvreplgr2vr.w $xr1, $a2 +; CHECK-NEXT: xvneg.w $xr1, $xr1 +; CHECK-NEXT: xvrotr.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <8 x i32>, ptr %src + %v1.ele = insertelement <8 x i32> poison, i32 %a0, i32 0 + %v1 = shufflevector <8 x i32> %v1.ele, <8 x i32> poison, <8 x i32> zeroinitializer + %v1.sub = sub <8 x i32> splat (i32 32), %v1 + %b = shl <8 x i32> %v0, %v1 + %c = lshr <8 x i32> %v0, %v1.sub + %d = or <8 x i32> %b, %c + store <8 x i32> %d, ptr %dst + ret void +} + +define void @rotr_v8i32(ptr %dst, ptr %src, i32 signext %a0) nounwind { +; CHECK-LABEL: rotr_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvreplgr2vr.w $xr1, $a2 +; CHECK-NEXT: xvrotr.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <8 x i32>, ptr %src + %v1.ele = insertelement <8 x i32> poison, i32 %a0, i32 0 + %v1 = shufflevector <8 x i32> %v1.ele, <8 x i32> poison, <8 x i32> zeroinitializer + %v1.sub = sub <8 x i32> splat (i32 32), %v1 + %b = lshr <8 x i32> %v0, %v1 + %c = shl <8 x i32> %v0, %v1.sub + %d = or <8 x i32> %b, %c + store <8 x i32> %d, ptr %dst + ret void +} + +define void @rotr_v8i32_imm(ptr %dst, ptr %src) nounwind { +; CHECK-LABEL: rotr_v8i32_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvrotri.w $xr0, $xr0, 2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <8 x i32>, ptr %src + %b = lshr <8 x i32> %v0, splat (i32 2) + %c = shl <8 x i32> %v0, splat (i32 30) + %d = or <8 x i32> %b, %c + store <8 x i32> %d, ptr %dst + ret void +} + +define void @rotl_v4i64(ptr %dst, ptr %src, i64 %a0) nounwind { +; LA32-LABEL: rotl_v4i64: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a1, 0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a2, 2 +; LA32-NEXT: xvpermi.q $xr1, $xr1, 2 +; LA32-NEXT: xvneg.d $xr1, $xr1 +; LA32-NEXT: xvrotr.d $xr0, $xr0, $xr1 +; LA32-NEXT: xvst $xr0, $a0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: rotl_v4i64: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a1, 0 +; LA64-NEXT: xvreplgr2vr.d $xr1, $a2 +; LA64-NEXT: xvneg.d $xr1, $xr1 +; LA64-NEXT: xvrotr.d $xr0, $xr0, $xr1 +; LA64-NEXT: xvst $xr0, $a0, 0 +; LA64-NEXT: ret + %v0 = load <4 x i64>, ptr %src + %v1.ele = insertelement <4 x i64> poison, i64 %a0, i64 0 + %v1 = shufflevector <4 x i64> %v1.ele, <4 x i64> poison, <4 x i32> zeroinitializer + %v1.sub = sub <4 x i64> splat (i64 64), %v1 + %b = shl <4 x i64> %v0, %v1 + %c = lshr <4 x i64> %v0, %v1.sub + %d = or <4 x i64> %b, %c + store <4 x i64> %d, ptr %dst + ret void +} + +define void @rotr_v4i64(ptr %dst, ptr %src, i64 %a0) nounwind { +; LA32-LABEL: rotr_v4i64: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a1, 0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a2, 2 +; LA32-NEXT: xvpermi.q $xr1, $xr1, 2 +; LA32-NEXT: xvrotr.d $xr0, $xr0, $xr1 +; LA32-NEXT: xvst $xr0, $a0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: rotr_v4i64: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a1, 0 +; LA64-NEXT: xvreplgr2vr.d $xr1, $a2 +; LA64-NEXT: xvrotr.d $xr0, $xr0, $xr1 +; LA64-NEXT: xvst $xr0, $a0, 0 +; LA64-NEXT: ret + %v0 = load <4 x i64>, ptr %src + %v1.ele = insertelement <4 x i64> poison, i64 %a0, i64 0 + %v1 = shufflevector <4 x i64> %v1.ele, <4 x i64> poison, <4 x i32> zeroinitializer + %v1.sub = sub <4 x i64> splat (i64 64), %v1 + %b = lshr <4 x i64> %v0, %v1 + %c = shl <4 x i64> %v0, %v1.sub + %d = or <4 x i64> %b, %c + store <4 x i64> %d, ptr %dst + ret void +} + +define void @rotr_v4i64_imm(ptr %dst, ptr %src) nounwind { +; LA32-LABEL: rotr_v4i64_imm: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a1, 0 +; LA32-NEXT: xvrepli.w $xr1, -62 +; LA32-NEXT: xvrotr.d $xr0, $xr0, $xr1 +; LA32-NEXT: xvst $xr0, $a0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: rotr_v4i64_imm: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a1, 0 +; LA64-NEXT: xvrotri.d $xr0, $xr0, 2 +; LA64-NEXT: xvst $xr0, $a0, 0 +; LA64-NEXT: ret + %v0 = load <4 x i64>, ptr %src + %b = lshr <4 x i64> %v0, splat (i64 2) + %c = shl <4 x i64> %v0, splat (i64 62) + %d = or <4 x i64> %b, %c + store <4 x i64> %d, ptr %dst + ret void +} diff --git a/llvm/test/CodeGen/LoongArch/lsx/rotl-rotr.ll b/llvm/test/CodeGen/LoongArch/lsx/rotl-rotr.ll new file mode 100644 index 0000000000000..106a7b0e3f0a5 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/rotl-rotr.ll @@ -0,0 +1,246 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA64 + +define void @rotl_v16i8(ptr %dst, ptr %src, i8 signext %a0) nounwind { +; CHECK-LABEL: rotl_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vreplgr2vr.b $vr1, $a2 +; CHECK-NEXT: vneg.b $vr1, $vr1 +; CHECK-NEXT: vrotr.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <16 x i8>, ptr %src + %v1.ele = insertelement <16 x i8> poison, i8 %a0, i8 0 + %v1 = shufflevector <16 x i8> %v1.ele, <16 x i8> poison, <16 x i32> zeroinitializer + %v1.sub = sub <16 x i8> splat (i8 8), %v1 + %b = shl <16 x i8> %v0, %v1 + %c = lshr <16 x i8> %v0, %v1.sub + %d = or <16 x i8> %b, %c + store <16 x i8> %d, ptr %dst + ret void +} + +define void @rotr_v16i8(ptr %dst, ptr %src, i8 signext %a0) nounwind { +; CHECK-LABEL: rotr_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vreplgr2vr.b $vr1, $a2 +; CHECK-NEXT: vrotr.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <16 x i8>, ptr %src + %v1.ele = insertelement <16 x i8> poison, i8 %a0, i8 0 + %v1 = shufflevector <16 x i8> %v1.ele, <16 x i8> poison, <16 x i32> zeroinitializer + %v1.sub = sub <16 x i8> splat (i8 8), %v1 + %b = lshr <16 x i8> %v0, %v1 + %c = shl <16 x i8> %v0, %v1.sub + %d = or <16 x i8> %b, %c + store <16 x i8> %d, ptr %dst + ret void +} + +define void @rotr_v16i8_imm(ptr %dst, ptr %src) nounwind { +; CHECK-LABEL: rotr_v16i8_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vrotri.b $vr0, $vr0, 2 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <16 x i8>, ptr %src + %b = lshr <16 x i8> %v0, splat (i8 2) + %c = shl <16 x i8> %v0, splat (i8 6) + %d = or <16 x i8> %b, %c + store <16 x i8> %d, ptr %dst + ret void +} + +define void @rotl_v8i16(ptr %dst, ptr %src, i16 signext %a0) nounwind { +; CHECK-LABEL: rotl_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vreplgr2vr.h $vr1, $a2 +; CHECK-NEXT: vneg.h $vr1, $vr1 +; CHECK-NEXT: vrotr.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <8 x i16>, ptr %src + %v1.ele = insertelement <8 x i16> poison, i16 %a0, i16 0 + %v1 = shufflevector <8 x i16> %v1.ele, <8 x i16> poison, <8 x i32> zeroinitializer + %v1.sub = sub <8 x i16> splat (i16 16), %v1 + %b = shl <8 x i16> %v0, %v1 + %c = lshr <8 x i16> %v0, %v1.sub + %d = or <8 x i16> %b, %c + store <8 x i16> %d, ptr %dst + ret void +} + +define void @rotr_v8i16(ptr %dst, ptr %src, i16 signext %a0) nounwind { +; CHECK-LABEL: rotr_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vreplgr2vr.h $vr1, $a2 +; CHECK-NEXT: vrotr.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <8 x i16>, ptr %src + %v1.ele = insertelement <8 x i16> poison, i16 %a0, i16 0 + %v1 = shufflevector <8 x i16> %v1.ele, <8 x i16> poison, <8 x i32> zeroinitializer + %v1.sub = sub <8 x i16> splat (i16 16), %v1 + %b = lshr <8 x i16> %v0, %v1 + %c = shl <8 x i16> %v0, %v1.sub + %d = or <8 x i16> %b, %c + store <8 x i16> %d, ptr %dst + ret void +} + +define void @rotr_v8i16_imm(ptr %dst, ptr %src) nounwind { +; CHECK-LABEL: rotr_v8i16_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vrotri.h $vr0, $vr0, 2 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <8 x i16>, ptr %src + %b = lshr <8 x i16> %v0, splat (i16 2) + %c = shl <8 x i16> %v0, splat (i16 14) + %d = or <8 x i16> %b, %c + store <8 x i16> %d, ptr %dst + ret void +} + +define void @rotl_v4i32(ptr %dst, ptr %src, i32 signext %a0) nounwind { +; CHECK-LABEL: rotl_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vreplgr2vr.w $vr1, $a2 +; CHECK-NEXT: vneg.w $vr1, $vr1 +; CHECK-NEXT: vrotr.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <4 x i32>, ptr %src + %v1.ele = insertelement <4 x i32> poison, i32 %a0, i32 0 + %v1 = shufflevector <4 x i32> %v1.ele, <4 x i32> poison, <4 x i32> zeroinitializer + %v1.sub = sub <4 x i32> splat (i32 32), %v1 + %b = shl <4 x i32> %v0, %v1 + %c = lshr <4 x i32> %v0, %v1.sub + %d = or <4 x i32> %b, %c + store <4 x i32> %d, ptr %dst + ret void +} + +define void @rotr_v4i32(ptr %dst, ptr %src, i32 signext %a0) nounwind { +; CHECK-LABEL: rotr_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vreplgr2vr.w $vr1, $a2 +; CHECK-NEXT: vrotr.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <4 x i32>, ptr %src + %v1.ele = insertelement <4 x i32> poison, i32 %a0, i32 0 + %v1 = shufflevector <4 x i32> %v1.ele, <4 x i32> poison, <4 x i32> zeroinitializer + %v1.sub = sub <4 x i32> splat (i32 32), %v1 + %b = lshr <4 x i32> %v0, %v1 + %c = shl <4 x i32> %v0, %v1.sub + %d = or <4 x i32> %b, %c + store <4 x i32> %d, ptr %dst + ret void +} + +define void @rotr_v4i32_imm(ptr %dst, ptr %src) nounwind { +; CHECK-LABEL: rotr_v4i32_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vrotri.w $vr0, $vr0, 2 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <4 x i32>, ptr %src + %b = lshr <4 x i32> %v0, splat (i32 2) + %c = shl <4 x i32> %v0, splat (i32 30) + %d = or <4 x i32> %b, %c + store <4 x i32> %d, ptr %dst + ret void +} + +define void @rotl_v2i64(ptr %dst, ptr %src, i64 %a0) nounwind { +; LA32-LABEL: rotl_v2i64: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a1, 0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a2, 2 +; LA32-NEXT: vneg.d $vr1, $vr1 +; LA32-NEXT: vrotr.d $vr0, $vr0, $vr1 +; LA32-NEXT: vst $vr0, $a0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: rotl_v2i64: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a1, 0 +; LA64-NEXT: vreplgr2vr.d $vr1, $a2 +; LA64-NEXT: vneg.d $vr1, $vr1 +; LA64-NEXT: vrotr.d $vr0, $vr0, $vr1 +; LA64-NEXT: vst $vr0, $a0, 0 +; LA64-NEXT: ret + %v0 = load <2 x i64>, ptr %src + %v1.ele = insertelement <2 x i64> poison, i64 %a0, i64 0 + %v1 = shufflevector <2 x i64> %v1.ele, <2 x i64> poison, <2 x i32> zeroinitializer + %v1.sub = sub <2 x i64> splat (i64 64), %v1 + %b = shl <2 x i64> %v0, %v1 + %c = lshr <2 x i64> %v0, %v1.sub + %d = or <2 x i64> %b, %c + store <2 x i64> %d, ptr %dst + ret void +} + +define void @rotr_v2i64(ptr %dst, ptr %src, i64 %a0) nounwind { +; LA32-LABEL: rotr_v2i64: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a1, 0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a2, 2 +; LA32-NEXT: vrotr.d $vr0, $vr0, $vr1 +; LA32-NEXT: vst $vr0, $a0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: rotr_v2i64: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a1, 0 +; LA64-NEXT: vreplgr2vr.d $vr1, $a2 +; LA64-NEXT: vrotr.d $vr0, $vr0, $vr1 +; LA64-NEXT: vst $vr0, $a0, 0 +; LA64-NEXT: ret + %v0 = load <2 x i64>, ptr %src + %v1.ele = insertelement <2 x i64> poison, i64 %a0, i64 0 + %v1 = shufflevector <2 x i64> %v1.ele, <2 x i64> poison, <2 x i32> zeroinitializer + %v1.sub = sub <2 x i64> splat (i64 64), %v1 + %b = lshr <2 x i64> %v0, %v1 + %c = shl <2 x i64> %v0, %v1.sub + %d = or <2 x i64> %b, %c + store <2 x i64> %d, ptr %dst + ret void +} + +define void @rotr_v2i64_imm(ptr %dst, ptr %src) nounwind { +; LA32-LABEL: rotr_v2i64_imm: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a1, 0 +; LA32-NEXT: vrepli.w $vr1, -62 +; LA32-NEXT: vrotr.d $vr0, $vr0, $vr1 +; LA32-NEXT: vst $vr0, $a0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: rotr_v2i64_imm: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a1, 0 +; LA64-NEXT: vrotri.d $vr0, $vr0, 2 +; LA64-NEXT: vst $vr0, $a0, 0 +; LA64-NEXT: ret + %v0 = load <2 x i64>, ptr %src + %b = lshr <2 x i64> %v0, splat (i64 2) + %c = shl <2 x i64> %v0, splat (i64 62) + %d = or <2 x i64> %b, %c + store <2 x i64> %d, ptr %dst + ret void +} diff --git a/llvm/test/CodeGen/MIR/AArch64/deactivation-symbols.mir b/llvm/test/CodeGen/MIR/AArch64/deactivation-symbols.mir new file mode 100644 index 0000000000000..6542508ede116 --- /dev/null +++ b/llvm/test/CodeGen/MIR/AArch64/deactivation-symbols.mir @@ -0,0 +1,12 @@ +# RUN: llc < %s -O0 -mtriple=aarch64-none-linux-gnu -mattr=+pauth -run-pass irtranslator -x mir | \ +# RUN: llc -x mir -run-pass legalizer | FileCheck %s + +--- | + @ds = external global i8 + + define i64 @pauth_sign_zero(i64 %p) { + ; CHECK: G_INTRINSIC intrinsic(@llvm.ptrauth.sign), %0(s64), 0, %2(s64), deactivation-symbol @ds + %signed = call i64 @llvm.ptrauth.sign(i64 %p, i32 0, i64 0) [ "deactivation-symbol"(ptr @ds) ] + ret i64 %signed + } +... diff --git a/llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_print.txt b/llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_print.txt index bd26938c93cb7..74ef1e608d4ba 100644 --- a/llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_print.txt +++ b/llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_print.txt @@ -1737,8 +1737,8 @@ Key: TILELOADDRST: [ 0.00 0.00 ] Key: TILELOADDRS_EVEX: [ 0.00 0.00 ] Key: TILELOADDT: [ 0.00 0.00 ] Key: TILELOADD_EVEX: [ 0.00 0.00 ] -Key: TILEMOVROWrre: [ 0.00 0.00 ] -Key: TILEMOVROWrri: [ 0.00 0.00 ] +Key: TILEMOVROWrte: [ 0.00 0.00 ] +Key: TILEMOVROWrti: [ 0.00 0.00 ] Key: TILERELEASE: [ 0.00 0.00 ] Key: TILESTORED: [ 0.00 0.00 ] Key: TILESTORED_EVEX: [ 0.00 0.00 ] diff --git a/llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_wo=0.5_print.txt b/llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_wo=0.5_print.txt index a3810c19e584a..1ba4f13e69c92 100644 --- a/llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_wo=0.5_print.txt +++ b/llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_wo=0.5_print.txt @@ -1737,8 +1737,8 @@ Key: TILELOADDRST: [ 0.00 0.00 ] Key: TILELOADDRS_EVEX: [ 0.00 0.00 ] Key: TILELOADDT: [ 0.00 0.00 ] Key: TILELOADD_EVEX: [ 0.00 0.00 ] -Key: TILEMOVROWrre: [ 0.00 0.00 ] -Key: TILEMOVROWrri: [ 0.00 0.00 ] +Key: TILEMOVROWrte: [ 0.00 0.00 ] +Key: TILEMOVROWrti: [ 0.00 0.00 ] Key: TILERELEASE: [ 0.00 0.00 ] Key: TILESTORED: [ 0.00 0.00 ] Key: TILESTORED_EVEX: [ 0.00 0.00 ] diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll index 4246aa545dd0e..ae6418ee97ba5 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll @@ -121,8 +121,6 @@ define double @fdiv_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.sqrt.f64(double) - define double @fsqrt_d(double %a) nounwind { ; CHECKIFD-LABEL: fsqrt_d: ; CHECKIFD: # %bb.0: @@ -150,8 +148,6 @@ define double @fsqrt_d(double %a) nounwind { ret double %1 } -declare double @llvm.copysign.f64(double, double) - define double @fsgnj_d(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fsgnj_d: ; CHECKIFD: # %bb.0: @@ -261,8 +257,6 @@ define double @fsgnjn_d(double %a, double %b) nounwind { ret double %2 } -declare double @llvm.fabs.f64(double) - ; This function performs extra work to ensure that ; DAGCombiner::visitBITCAST doesn't replace the fabs with an and. define double @fabs_d(double %a, double %b) nounwind { @@ -305,8 +299,6 @@ define double @fabs_d(double %a, double %b) nounwind { ret double %3 } -declare double @llvm.minnum.f64(double, double) - define double @fmin_d(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fmin_d: ; CHECKIFD: # %bb.0: @@ -334,8 +326,6 @@ define double @fmin_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.maxnum.f64(double, double) - define double @fmax_d(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fmax_d: ; CHECKIFD: # %bb.0: @@ -363,8 +353,6 @@ define double @fmax_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.minimumnum.f64(double, double) - define double @fminimumnum_d(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fminimumnum_d: ; CHECKIFD: # %bb.0: @@ -392,8 +380,6 @@ define double @fminimumnum_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.maximumnum.f64(double, double) - define double @fmaximumnum_d(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fmaximumnum_d: ; CHECKIFD: # %bb.0: @@ -421,8 +407,6 @@ define double @fmaximumnum_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.fma.f64(double, double, double) - define double @fmadd_d(double %a, double %b, double %c) nounwind { ; CHECKIFD-LABEL: fmadd_d: ; CHECKIFD: # %bb.0: @@ -771,7 +755,6 @@ define double @fnmadd_d_3(double %a, double %b, double %c) nounwind { ret double %neg } - define double @fnmadd_nsz(double %a, double %b, double %c) nounwind { ; CHECKIFD-LABEL: fnmadd_nsz: ; CHECKIFD: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll index 4b0acda839ad6..906e4bc41d960 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll @@ -10,8 +10,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel \ ; RUN: | FileCheck -check-prefix=RV64I %s -declare double @llvm.sqrt.f64(double) - define double @sqrt_f64(double %a) nounwind { ; CHECKIFD-LABEL: sqrt_f64: ; CHECKIFD: # %bb.0: @@ -81,8 +79,6 @@ define double @powi_f64(double %a, i32 %b) nounwind { ret double %1 } -declare double @llvm.sin.f64(double) - define double @sin_f64(double %a) nounwind { ; RV32IFD-LABEL: sin_f64: ; RV32IFD: # %bb.0: @@ -123,8 +119,6 @@ define double @sin_f64(double %a) nounwind { ret double %1 } -declare double @llvm.cos.f64(double) - define double @cos_f64(double %a) nounwind { ; RV32IFD-LABEL: cos_f64: ; RV32IFD: # %bb.0: @@ -257,8 +251,6 @@ define double @sincos_f64(double %a) nounwind { ret double %3 } -declare double @llvm.pow.f64(double, double) - define double @pow_f64(double %a, double %b) nounwind { ; RV32IFD-LABEL: pow_f64: ; RV32IFD: # %bb.0: @@ -299,8 +291,6 @@ define double @pow_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.exp.f64(double) - define double @exp_f64(double %a) nounwind { ; RV32IFD-LABEL: exp_f64: ; RV32IFD: # %bb.0: @@ -341,8 +331,6 @@ define double @exp_f64(double %a) nounwind { ret double %1 } -declare double @llvm.exp2.f64(double) - define double @exp2_f64(double %a) nounwind { ; RV32IFD-LABEL: exp2_f64: ; RV32IFD: # %bb.0: @@ -423,8 +411,6 @@ define double @exp10_f64(double %a) nounwind { ret double %1 } -declare double @llvm.log.f64(double) - define double @log_f64(double %a) nounwind { ; RV32IFD-LABEL: log_f64: ; RV32IFD: # %bb.0: @@ -465,8 +451,6 @@ define double @log_f64(double %a) nounwind { ret double %1 } -declare double @llvm.log10.f64(double) - define double @log10_f64(double %a) nounwind { ; RV32IFD-LABEL: log10_f64: ; RV32IFD: # %bb.0: @@ -507,8 +491,6 @@ define double @log10_f64(double %a) nounwind { ret double %1 } -declare double @llvm.log2.f64(double) - define double @log2_f64(double %a) nounwind { ; RV32IFD-LABEL: log2_f64: ; RV32IFD: # %bb.0: @@ -549,8 +531,6 @@ define double @log2_f64(double %a) nounwind { ret double %1 } -declare double @llvm.fma.f64(double, double, double) - define double @fma_f64(double %a, double %b, double %c) nounwind { ; CHECKIFD-LABEL: fma_f64: ; CHECKIFD: # %bb.0: @@ -578,8 +558,6 @@ define double @fma_f64(double %a, double %b, double %c) nounwind { ret double %1 } -declare double @llvm.fmuladd.f64(double, double, double) - define double @fmuladd_f64(double %a, double %b, double %c) nounwind { ; CHECKIFD-LABEL: fmuladd_f64: ; CHECKIFD: # %bb.0: @@ -621,8 +599,6 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind { ret double %1 } -declare double @llvm.fabs.f64(double) - define double @fabs_f64(double %a) nounwind { ; CHECKIFD-LABEL: fabs_f64: ; CHECKIFD: # %bb.0: @@ -644,8 +620,6 @@ define double @fabs_f64(double %a) nounwind { ret double %1 } -declare double @llvm.minnum.f64(double, double) - define double @minnum_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: minnum_f64: ; CHECKIFD: # %bb.0: @@ -673,8 +647,6 @@ define double @minnum_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.maxnum.f64(double, double) - define double @maxnum_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: maxnum_f64: ; CHECKIFD: # %bb.0: @@ -702,8 +674,6 @@ define double @maxnum_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.copysign.f64(double, double) - define double @copysign_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: copysign_f64: ; CHECKIFD: # %bb.0: @@ -731,8 +701,6 @@ define double @copysign_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.floor.f64(double) - define double @floor_f64(double %a) nounwind { ; RV32IFD-LABEL: floor_f64: ; RV32IFD: # %bb.0: @@ -773,8 +741,6 @@ define double @floor_f64(double %a) nounwind { ret double %1 } -declare double @llvm.ceil.f64(double) - define double @ceil_f64(double %a) nounwind { ; RV32IFD-LABEL: ceil_f64: ; RV32IFD: # %bb.0: @@ -815,8 +781,6 @@ define double @ceil_f64(double %a) nounwind { ret double %1 } -declare double @llvm.trunc.f64(double) - define double @trunc_f64(double %a) nounwind { ; RV32IFD-LABEL: trunc_f64: ; RV32IFD: # %bb.0: @@ -857,8 +821,6 @@ define double @trunc_f64(double %a) nounwind { ret double %1 } -declare double @llvm.rint.f64(double) - define double @rint_f64(double %a) nounwind { ; RV32IFD-LABEL: rint_f64: ; RV32IFD: # %bb.0: @@ -899,8 +861,6 @@ define double @rint_f64(double %a) nounwind { ret double %1 } -declare double @llvm.nearbyint.f64(double) - define double @nearbyint_f64(double %a) nounwind { ; RV32IFD-LABEL: nearbyint_f64: ; RV32IFD: # %bb.0: @@ -941,8 +901,6 @@ define double @nearbyint_f64(double %a) nounwind { ret double %1 } -declare double @llvm.round.f64(double) - define double @round_f64(double %a) nounwind { ; RV32IFD-LABEL: round_f64: ; RV32IFD: # %bb.0: @@ -983,8 +941,6 @@ define double @round_f64(double %a) nounwind { ret double %1 } -declare double @llvm.roundeven.f64(double) - define double @roundeven_f64(double %a) nounwind { ; RV32IFD-LABEL: roundeven_f64: ; RV32IFD: # %bb.0: @@ -1025,7 +981,6 @@ define double @roundeven_f64(double %a) nounwind { ret double %1 } -declare i1 @llvm.is.fpclass.f64(double, i32) define i1 @isnan_d_fpclass(double %x) { ; CHECKIFD-LABEL: isnan_d_fpclass: ; CHECKIFD: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll index 3222849641baf..06eeaa8d4e503 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll @@ -121,8 +121,6 @@ define float @fdiv_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.sqrt.f32(float) - define float @fsqrt_s(float %a) nounwind { ; CHECKIF-LABEL: fsqrt_s: ; CHECKIF: # %bb.0: @@ -150,8 +148,6 @@ define float @fsqrt_s(float %a) nounwind { ret float %1 } -declare float @llvm.copysign.f32(float, float) - define float @fsgnj_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fsgnj_s: ; CHECKIF: # %bb.0: @@ -270,8 +266,6 @@ define float @fsgnjn_s(float %a, float %b) nounwind { ret float %3 } -declare float @llvm.fabs.f32(float) - define float @fabs_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fabs_s: ; CHECKIF: # %bb.0: @@ -311,8 +305,6 @@ define float @fabs_s(float %a, float %b) nounwind { ret float %3 } -declare float @llvm.minimumnum.f32(float, float) - define float @fminimumnum_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fminimumnum_s: ; CHECKIF: # %bb.0: @@ -340,8 +332,6 @@ define float @fminimumnum_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.maximumnum.f32(float, float) - define float @fmaximumnum_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fmaximumnum_s: ; CHECKIF: # %bb.0: @@ -369,8 +359,6 @@ define float @fmaximumnum_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.minnum.f32(float, float) - define float @fmin_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fmin_s: ; CHECKIF: # %bb.0: @@ -398,8 +386,6 @@ define float @fmin_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.maxnum.f32(float, float) - define float @fmax_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fmax_s: ; CHECKIF: # %bb.0: @@ -427,8 +413,6 @@ define float @fmax_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.fma.f32(float, float, float) - define float @fmadd_s(float %a, float %b, float %c) nounwind { ; CHECKIF-LABEL: fmadd_s: ; CHECKIF: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll b/llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll index 31a78d4f72ceb..8ced3155c58ec 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll @@ -8,11 +8,6 @@ ; RUN: llc -mtriple=riscv64 -global-isel -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefix=RV64ZBB -declare i8 @llvm.abs.i8(i8, i1 immarg) -declare i16 @llvm.abs.i16(i16, i1 immarg) -declare i32 @llvm.abs.i32(i32, i1 immarg) -declare i64 @llvm.abs.i64(i64, i1 immarg) - define i8 @abs8(i8 %x) { ; RV32I-LABEL: abs8: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vacopy.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vacopy.ll index 48d72108335e4..cd1e95e88ab8a 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vacopy.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vacopy.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64I %s -declare void @llvm.va_copy(ptr, ptr) define void @test_va_copy(ptr %dest_list, ptr %src_list) { ; RV32I-LABEL: name: test_va_copy ; RV32I: bb.1 (%ir-block.0): diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll index 74961d12c1c85..ad1544db84391 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll @@ -25,9 +25,6 @@ ; The nounwind attribute is omitted for some of the tests, to check that CFI ; directives are correctly generated. -declare void @llvm.va_start(ptr) -declare void @llvm.va_end(ptr) - declare void @notdead(ptr) ; Although frontends are recommended to not generate va_arg due to the lack of @@ -453,7 +450,6 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind { ret i32 %1 } - define i32 @va1_va_arg(ptr %fmt, ...) nounwind { ; RV32-LABEL: name: va1_va_arg ; RV32: bb.1 (%ir-block.0): @@ -1249,8 +1245,6 @@ define void @va3_caller() nounwind { ret void } -declare void @llvm.va_copy(ptr, ptr) - define i32 @va4_va_copy(i32 %argno, ...) nounwind { ; ILP32-LABEL: name: va4_va_copy ; ILP32: bb.1 (%ir-block.0): diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rotl-rotr.ll index 46d1661983c6a..f70e27906474a 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rotl-rotr.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rotl-rotr.ll @@ -1749,7 +1749,6 @@ define signext i32 @rotl_32_mask_shared(i32 signext %a, i32 signext %b, i32 sign %3 = add i32 %1, %2 ret i32 %3 } -declare i32 @llvm.fshl.i32(i32, i32, i32) define signext i64 @rotl_64_mask_shared(i64 signext %a, i64 signext %b, i64 signext %amt) nounwind { ; RV32I-LABEL: rotl_64_mask_shared: @@ -1984,7 +1983,6 @@ define signext i64 @rotl_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign %3 = add i64 %1, %2 ret i64 %3 } -declare i64 @llvm.fshl.i64(i64, i64, i64) define signext i32 @rotr_32_mask_shared(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { ; RV32I-LABEL: rotr_32_mask_shared: @@ -2050,7 +2048,6 @@ define signext i32 @rotr_32_mask_shared(i32 signext %a, i32 signext %b, i32 sign %3 = add i32 %1, %2 ret i32 %3 } -declare i32 @llvm.fshr.i32(i32, i32, i32) define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 signext %amt) nounwind { ; RV32I-LABEL: rotr_64_mask_shared: @@ -2291,7 +2288,6 @@ define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign %3 = add i64 %1, %2 ret i64 %3 } -declare i64 @llvm.fshr.i64(i64, i64, i64) define signext i32 @rotl_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { ; RV32I-LABEL: rotl_32_mask_multiple: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll index da95481a5e588..83cf228402295 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll @@ -111,8 +111,6 @@ define i64 @xnor_i64(i64 %a, i64 %b) nounwind { ret i64 %xor } -declare i32 @llvm.fshl.i32(i32, i32, i32) - define i32 @rol_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: rol_i32: ; RV32I: # %bb.0: @@ -133,8 +131,6 @@ define i32 @rol_i32(i32 %a, i32 %b) nounwind { ; This test is presented here in case future expansions of the Bitmanip ; extensions introduce instructions suitable for this pattern. -declare i64 @llvm.fshl.i64(i64, i64, i64) - define i64 @rol_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: rol_i64: ; CHECK: # %bb.0: @@ -187,8 +183,6 @@ define i64 @rol_i64(i64 %a, i64 %b) nounwind { ret i64 %or } -declare i32 @llvm.fshr.i32(i32, i32, i32) - define i32 @ror_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: ror_i32: ; RV32I: # %bb.0: @@ -209,8 +203,6 @@ define i32 @ror_i32(i32 %a, i32 %b) nounwind { ; This test is presented here in case future expansions of the Bitmanip ; extensions introduce instructions suitable for this pattern. -declare i64 @llvm.fshr.i64(i64, i64, i64) - define i64 @ror_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: ror_i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll index 0b376dd779887..a59a46bdd0e7f 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -global-isel -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=CHECK,RV32ZBB -declare i32 @llvm.ctlz.i32(i32, i1) - define i32 @ctlz_i32(i32 %a) nounwind { ; RV32I-LABEL: ctlz_i32: ; RV32I: # %bb.0: @@ -57,8 +55,6 @@ define i32 @ctlz_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.ctlz.i64(i64, i1) - define i64 @ctlz_i64(i64 %a) nounwind { ; RV32I-LABEL: ctlz_i64: ; RV32I: # %bb.0: @@ -153,8 +149,6 @@ define i64 @ctlz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define i32 @cttz_i32(i32 %a) nounwind { ; RV32I-LABEL: cttz_i32: ; RV32I: # %bb.0: @@ -197,8 +191,6 @@ define i32 @cttz_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.cttz.i64(i64, i1) - define i64 @cttz_i64(i64 %a) nounwind { ; RV32I-LABEL: cttz_i64: ; RV32I: # %bb.0: @@ -276,8 +268,6 @@ define i64 @cttz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.ctpop.i32(i32) - define i32 @ctpop_i32(i32 %a) nounwind { ; RV32I-LABEL: ctpop_i32: ; RV32I: # %bb.0: @@ -312,8 +302,6 @@ define i32 @ctpop_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.ctpop.i64(i64) - define i64 @ctpop_i64(i64 %a) nounwind { ; RV32I-LABEL: ctpop_i64: ; RV32I: # %bb.0: @@ -737,8 +725,6 @@ define i64 @maxu_i64(i64 %a, i64 %b) nounwind { ret i64 %cond } -declare i32 @llvm.abs.i32(i32, i1 immarg) - define i32 @abs_i32(i32 %x) { ; RV32I-LABEL: abs_i32: ; RV32I: # %bb.0: @@ -756,8 +742,6 @@ define i32 @abs_i32(i32 %x) { ret i32 %abs } -declare i64 @llvm.abs.i64(i64, i1 immarg) - define i64 @abs_i64(i64 %x) { ; CHECK-LABEL: abs_i64: ; CHECK: # %bb.0: @@ -805,8 +789,6 @@ define i64 @zexth_i64(i64 %a) nounwind { ret i64 %and } -declare i32 @llvm.bswap.i32(i32) - define i32 @bswap_i32(i32 %a) nounwind { ; RV32I-LABEL: bswap_i32: ; RV32I: # %bb.0: @@ -831,8 +813,6 @@ define i32 @bswap_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.bswap.i64(i64) - define i64 @bswap_i64(i64 %a) { ; RV32I-LABEL: bswap_i64: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll index b7f84ba696c26..8a21889334fb8 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll @@ -102,8 +102,6 @@ define i64 @xnor_i64(i64 %a, i64 %b) nounwind { ret i64 %xor } -declare i32 @llvm.fshl.i32(i32, i32, i32) - define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: rol_i32: ; RV64I: # %bb.0: @@ -161,8 +159,6 @@ define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind { ret i32 %1 } -declare i64 @llvm.fshl.i64(i64, i64, i64) - define i64 @rol_i64(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: rol_i64: ; RV64I: # %bb.0: @@ -180,8 +176,6 @@ define i64 @rol_i64(i64 %a, i64 %b) nounwind { ret i64 %or } -declare i32 @llvm.fshr.i32(i32, i32, i32) - define signext i32 @ror_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: ror_i32: ; RV64I: # %bb.0: @@ -239,8 +233,6 @@ define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind { ret i32 %1 } -declare i64 @llvm.fshr.i64(i64, i64, i64) - define i64 @ror_i64(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: ror_i64: ; RV64I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll index 2dd3bb3119dd3..daac8440e5763 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -global-isel -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZBB -declare i32 @llvm.ctlz.i32(i32, i1) - ; FIXME: We don't need the shift pair before the beqz for RV64I. define signext i32 @ctlz_i32(i32 signext %a) nounwind { ; RV64I-LABEL: ctlz_i32: @@ -318,8 +316,6 @@ define i32 @ctlz_lshr_i32(i32 signext %a) { ret i32 %2 } -declare i64 @llvm.ctlz.i64(i64, i1) - define i64 @ctlz_i64(i64 %a) nounwind { ; RV64I-LABEL: ctlz_i64: ; RV64I: # %bb.0: @@ -393,8 +389,6 @@ define i64 @ctlz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define signext i32 @cttz_i32(i32 signext %a) nounwind { ; RV64I-LABEL: cttz_i32: ; RV64I: # %bb.0: @@ -595,8 +589,6 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind { ret i32 %4 } -declare i64 @llvm.cttz.i64(i64, i1) - define i64 @cttz_i64(i64 %a) nounwind { ; RV64I-LABEL: cttz_i64: ; RV64I: # %bb.0: @@ -659,8 +651,6 @@ define i64 @cttz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.ctpop.i32(i32) - define signext i32 @ctpop_i32(i32 signext %a) nounwind { ; RV64I-LABEL: ctpop_i32: ; RV64I: # %bb.0: @@ -756,8 +746,6 @@ define signext i32 @ctpop_i32_load(ptr %p) nounwind { ret i32 %1 } -declare i64 @llvm.ctpop.i64(i64) - define i64 @ctpop_i64(i64 %a) nounwind { ; RV64I-LABEL: ctpop_i64: ; RV64I: # %bb.0: @@ -1028,8 +1016,6 @@ define i64 @maxu_i64(i64 %a, i64 %b) nounwind { ret i64 %cond } -declare i32 @llvm.abs.i32(i32, i1 immarg) - define i32 @abs_i32(i32 %x) { ; RV64I-LABEL: abs_i32: ; RV64I: # %bb.0: @@ -1067,8 +1053,6 @@ define signext i32 @abs_i32_sext(i32 signext %x) { ret i32 %abs } -declare i64 @llvm.abs.i64(i64, i1 immarg) - define i64 @abs_i64(i64 %x) { ; RV64I-LABEL: abs_i64: ; RV64I: # %bb.0: @@ -1116,8 +1100,6 @@ define i64 @zexth_i64(i64 %a) nounwind { ret i64 %and } -declare i32 @llvm.bswap.i32(i32) - define signext i32 @bswap_i32(i32 signext %a) nounwind { ; RV64I-LABEL: bswap_i32: ; RV64I: # %bb.0: @@ -1173,8 +1155,6 @@ define void @bswap_i32_nosext(i32 signext %a, ptr %x) nounwind { ret void } -declare i64 @llvm.bswap.i64(i64) - define i64 @bswap_i64(i64 %a) { ; RV64I-LABEL: bswap_i64: ; RV64I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll index 21f14d941993b..c3cc472c4706f 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -global-isel \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vadd.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -327,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -349,13 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -373,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -395,13 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -419,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -441,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -465,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -487,13 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -511,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -533,13 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -557,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -579,13 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -604,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -650,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -672,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -696,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,13 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -742,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -764,13 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -788,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -810,13 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -835,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -857,13 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -881,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -903,13 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -927,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -949,13 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -973,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -995,13 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1020,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1042,13 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1066,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1088,13 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1112,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1134,13 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1180,13 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1226,13 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1250,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1272,13 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1296,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1318,13 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1342,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1364,13 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1388,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1410,13 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1434,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1456,13 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1480,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1502,13 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1526,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1548,13 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1572,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1594,13 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1618,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1640,13 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1664,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1686,13 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1710,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1732,13 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1756,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1778,13 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1802,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1824,13 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll index 9e092e4337526..1dd3a831903b5 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll @@ -10,12 +10,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfhmin,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d -global-isel | FileCheck %s -declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -34,13 +28,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -60,12 +47,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -84,13 +65,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -110,12 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -134,13 +102,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -160,12 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -184,13 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -210,12 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -234,13 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -260,12 +195,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv32f16.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -284,13 +213,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -311,12 +233,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -335,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -361,12 +270,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -385,13 +288,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -411,12 +307,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -435,13 +325,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -461,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -485,13 +362,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -511,12 +381,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16f32.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -535,13 +399,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -562,12 +419,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -586,13 +437,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -612,12 +456,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -636,13 +474,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -662,12 +493,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -686,13 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -712,12 +530,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f64.nxv8f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -736,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -763,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -787,13 +586,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -813,12 +605,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -837,13 +623,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -863,12 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -887,13 +660,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -913,12 +679,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -937,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -963,12 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -987,13 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -1013,12 +753,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv32f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -1037,13 +771,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv32f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -1063,12 +790,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1087,13 +808,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1113,12 +827,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1137,13 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1163,12 +864,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1187,13 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1213,12 +901,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1237,13 +919,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1263,12 +938,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1287,13 +956,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1313,12 +975,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1337,13 +993,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1363,12 +1012,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1387,13 +1030,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1413,12 +1049,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1437,13 +1067,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1463,12 +1086,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1487,13 +1104,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll index 3a74bcd06222b..943fe3e201c4b 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vle.nxv1i64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1i64_nxv1i64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2i64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2i64_nxv2i64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2i64_nxv2i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4i64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4i64_nxv4i64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4i64_nxv4i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8i64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8i64_nxv8i64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8i64_nxv8i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1f64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1f64_nxv1f64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1f64_nxv1f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2f64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2f64_nxv2f64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2f64_nxv2f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4f64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4f64_nxv4f64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4f64_nxv4f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8f64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8f64_nxv8f64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8f64_nxv8f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1i32_nxv1i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2i32_nxv2i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4i32_nxv4i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8i32_nxv8i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16i32_nxv16i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1f32_nxv1f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1f32_nxv1f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2f32_nxv2f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2f32_nxv2f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -649,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4f32_nxv4f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -669,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4f32_nxv4f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -692,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8f32_nxv8f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -712,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8f32_nxv8f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -735,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16f32_nxv16f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -755,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16f32_nxv16f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -778,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1i16_nxv1i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -798,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -821,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2i16_nxv2i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -841,13 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -864,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4i16_nxv4i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -884,13 +639,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -907,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8i16_nxv8i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -927,13 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -950,11 +686,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16i16_nxv16i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -970,13 +701,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -993,11 +717,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv32i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv32i16_nxv32i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1013,13 +732,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv32i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1036,11 +748,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1f16_nxv1f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1056,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1f16_nxv1f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1079,11 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2f16_nxv2f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1099,13 +794,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2f16_nxv2f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1122,11 +810,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4f16_nxv4f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1142,13 +825,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4f16_nxv4f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1165,11 +841,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8f16_nxv8f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1185,13 +856,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8f16_nxv8f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1208,11 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16f16_nxv16f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1228,13 +887,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16f16_nxv16f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +903,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv32f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv32f16_nxv32f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv32f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv32f16_nxv32f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1294,11 +934,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1i8_nxv1i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1314,13 +949,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1337,11 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2i8_nxv2i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1357,13 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1380,11 +996,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4i8_nxv4i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1400,13 +1011,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1423,11 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8i8_nxv8i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1443,13 +1042,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1466,11 +1058,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16i8_nxv16i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1486,13 +1073,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1509,11 +1089,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv32i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv32i8_nxv32i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1529,13 +1104,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv32i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1552,11 +1120,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv64i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv64i8_nxv64i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1572,13 +1135,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv64i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlm.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlm.ll index 12279639893bc..ba1e365084165 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlm.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlm.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -global-isel -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vlm.nxv1i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv1i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -17,8 +15,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv2i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv2i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -30,8 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv4i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv4i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -43,8 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv8i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv8i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -56,8 +48,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv16i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv16i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -69,8 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv32i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv32i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -82,8 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv64i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv64i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll index 5cb55f15c7c8c..48b162078ea86 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll @@ -4,12 +4,6 @@ ; The intrinsics are not supported with RV32. -declare @llvm.riscv.vloxei.nxv1i8.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i8.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i8.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i8.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i32.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i32.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -507,14 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -532,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i32.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -555,14 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -580,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i64.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -602,14 +428,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -627,12 +445,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i64.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -649,14 +461,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -674,12 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i64.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -696,14 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -721,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i64.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -743,14 +527,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -768,12 +544,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -791,14 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -816,12 +578,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -839,14 +595,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,12 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -887,14 +629,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -912,12 +646,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -935,14 +663,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -960,12 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f32.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -983,14 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1008,12 +714,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f32.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1031,14 +731,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1056,12 +748,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f32.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1079,14 +765,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f32.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1127,14 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1152,12 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f64.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1174,14 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1199,12 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f64.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1221,14 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1246,12 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f64.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1268,14 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1293,12 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f64.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1315,14 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll index fafd45b7579e8..6b676890bcb65 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vloxei.nxv1i8.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i8.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i8.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i8.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i8.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i32.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -506,14 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -531,12 +377,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i32.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -553,14 +393,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,12 +410,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i32.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -600,14 +426,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -625,12 +443,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i32.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -647,14 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -672,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i32.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -694,14 +492,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -719,12 +509,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i64.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -742,14 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -767,12 +543,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i64.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -791,14 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -816,12 +578,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i64.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -840,14 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -865,12 +613,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i64.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -889,14 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -914,12 +648,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -937,14 +665,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -962,12 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -985,14 +699,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1010,12 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1033,14 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1058,12 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1081,14 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1106,12 +784,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1129,14 +801,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1154,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f32.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1176,14 +834,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1201,12 +851,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f32.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1223,14 +867,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1248,12 +884,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f32.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1270,14 +900,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1295,12 +917,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f32.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1317,14 +933,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1342,12 +950,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f32.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1364,14 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1389,12 +983,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f64.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1412,14 +1000,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1437,12 +1017,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f64.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1461,14 +1035,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1486,12 +1052,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f64.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1510,14 +1070,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1535,12 +1087,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f64.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1559,14 +1105,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1584,12 +1122,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i8.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1607,14 +1139,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1632,12 +1156,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i8.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1655,14 +1173,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1680,12 +1190,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i8.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1703,14 +1207,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1728,12 +1224,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i8.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1751,14 +1241,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1776,12 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i8.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1799,14 +1275,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1824,12 +1292,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32i8.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1847,14 +1309,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1872,12 +1326,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i16.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1894,14 +1342,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1919,12 +1359,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i16.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1941,14 +1375,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1966,12 +1392,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i16.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1988,14 +1408,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2013,12 +1425,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i16.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2035,14 +1441,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2060,12 +1458,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i16.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2082,14 +1474,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2107,12 +1491,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32i16.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2129,14 +1507,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2154,12 +1524,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i32.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2177,14 +1541,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2202,12 +1558,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i32.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2225,14 +1575,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2250,12 +1592,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i32.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2274,14 +1610,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2299,12 +1627,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i32.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2323,14 +1645,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2348,12 +1662,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i32.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2372,14 +1680,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2397,12 +1697,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i64.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2420,14 +1714,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2445,12 +1731,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i64.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2469,14 +1749,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2494,12 +1766,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i64.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2518,14 +1784,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2543,12 +1801,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i64.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2567,14 +1819,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2592,12 +1836,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f16.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2614,14 +1852,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2639,12 +1869,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f16.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2661,14 +1885,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2686,12 +1902,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f16.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2708,14 +1918,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2733,12 +1935,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f16.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2755,14 +1951,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2780,12 +1968,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f16.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2802,14 +1984,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2827,12 +2001,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32f16.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2849,14 +2017,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2874,12 +2034,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f32.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2897,14 +2051,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2922,12 +2068,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f32.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2945,14 +2085,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2970,12 +2102,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f32.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2994,14 +2120,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3019,12 +2137,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f32.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3043,14 +2155,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3068,12 +2172,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f32.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3092,14 +2190,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3117,12 +2207,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f64.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3140,14 +2224,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3165,12 +2241,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f64.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3189,14 +2259,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3214,12 +2276,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f64.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3238,14 +2294,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3263,12 +2311,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f64.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3287,14 +2329,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3312,12 +2346,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i8.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3334,14 +2362,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3359,12 +2379,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i8.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3381,14 +2395,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3406,12 +2412,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i8.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3428,14 +2428,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3453,12 +2445,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i8.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3475,14 +2461,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3500,12 +2478,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i8.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3522,14 +2494,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3547,12 +2511,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32i8.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3569,14 +2527,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3594,12 +2544,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv64i8.nxv64i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3616,14 +2560,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3641,12 +2577,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i16.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3664,14 +2594,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3689,12 +2611,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i16.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3712,14 +2628,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3737,12 +2645,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i16.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3760,14 +2662,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3785,12 +2679,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i16.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3809,14 +2697,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3834,12 +2714,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i16.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3858,14 +2732,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3883,12 +2749,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32i16.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3907,14 +2767,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3932,12 +2784,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i32.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3955,14 +2801,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3980,12 +2818,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i32.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4003,14 +2835,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4028,12 +2852,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i32.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4052,14 +2870,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4077,12 +2887,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i32.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4101,14 +2905,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4126,12 +2922,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i32.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4150,14 +2940,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4175,12 +2957,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i64.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4198,14 +2974,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4223,12 +2991,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i64.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4247,14 +3009,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4272,12 +3026,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i64.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4296,14 +3044,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4321,12 +3061,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i64.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4345,14 +3079,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4370,12 +3096,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f16.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4393,14 +3113,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4418,12 +3130,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f16.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4441,14 +3147,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4466,12 +3164,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f16.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4489,14 +3181,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4514,12 +3198,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f16.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4538,14 +3216,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4563,12 +3233,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f16.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4587,14 +3251,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4612,12 +3268,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32f16.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4636,14 +3286,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4661,12 +3303,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f32.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4684,14 +3320,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4709,12 +3337,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f32.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4732,14 +3354,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4757,12 +3371,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f32.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4781,14 +3389,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4806,12 +3406,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f32.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4830,14 +3424,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4855,12 +3441,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f32.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4879,14 +3459,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4904,12 +3476,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f64.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4927,14 +3493,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4952,12 +3510,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f64.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4976,14 +3528,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5001,12 +3545,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f64.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5025,14 +3563,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5050,12 +3580,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f64.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5074,14 +3598,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlse.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlse.ll index 14abfa1b44ca7..871fc31f6f33b 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlse.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlse.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vlse.nxv1i64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2i64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2i64_nxv2i64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4i64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4i64_nxv4i64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8i64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8i64_nxv8i64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1f64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1f64_nxv1f64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1f64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2f64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2f64_nxv2f64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2f64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4f64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4f64_nxv4f64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4f64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -333,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8f64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8f64_nxv8f64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -355,14 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8f64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -380,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1i32_nxv1i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -402,14 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -427,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2i32_nxv2i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -449,14 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -474,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4i32_nxv4i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -496,14 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -521,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8i32_nxv8i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -543,14 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -568,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16i32_nxv16i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -590,14 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -615,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1f32_nxv1f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -637,14 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -662,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2f32_nxv2f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -684,14 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -709,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4f32_nxv4f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -731,14 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -756,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8f32_nxv8f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -778,14 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -803,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16f32_nxv16f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -825,14 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -850,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1i16_nxv1i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -872,14 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -897,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2i16_nxv2i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -919,14 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -944,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4i16_nxv4i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -966,14 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -991,12 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8i16_nxv8i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1013,14 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1038,12 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16i16_nxv16i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1060,14 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1085,12 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv32i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv32i16_nxv32i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1107,14 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv32i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1132,12 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1f16_nxv1f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1154,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1179,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2f16_nxv2f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1201,14 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1226,12 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4f16_nxv4f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1248,14 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1273,12 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8f16_nxv8f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1295,14 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1320,12 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16f16_nxv16f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1342,14 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1367,12 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv32f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv32f16_nxv32f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1389,14 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv32f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1414,12 +994,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1i8_nxv1i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1436,14 +1010,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1461,12 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2i8_nxv2i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1483,14 +1043,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1508,12 +1060,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4i8_nxv4i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1530,14 +1076,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1555,12 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8i8_nxv8i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1577,14 +1109,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1602,12 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16i8_nxv16i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1624,14 +1142,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1649,12 +1159,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv32i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv32i8_nxv32i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1671,14 +1175,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv32i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1696,12 +1192,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv64i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv64i8_nxv64i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1718,14 +1208,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv64i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll index 916af2556c6a8..0c4afaf0a7397 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll @@ -4,12 +4,6 @@ ; The intrinsics are not supported with RV32. -declare @llvm.riscv.vluxei.nxv1i8.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i8.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i8.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i8.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i32.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i32.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -507,14 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -532,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i32.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -555,14 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -580,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i64.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -602,14 +428,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -627,12 +445,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i64.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -649,14 +461,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -674,12 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i64.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -696,14 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -721,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i64.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -743,14 +527,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -768,12 +544,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -791,14 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -816,12 +578,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -839,14 +595,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,12 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -887,14 +629,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -912,12 +646,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -935,14 +663,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -960,12 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f32.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -983,14 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1008,12 +714,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f32.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1031,14 +731,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1056,12 +748,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f32.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1079,14 +765,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f32.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1127,14 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1152,12 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f64.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1174,14 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1199,12 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f64.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1221,14 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1246,12 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f64.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1268,14 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1293,12 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f64.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1315,14 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll index 8dd32a1d640dc..ce6ba6c3209d8 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vluxei.nxv1i8.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i8.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i8.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i8.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i8.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i32.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -506,14 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -531,12 +377,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i32.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -553,14 +393,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,12 +410,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -600,14 +426,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -625,12 +443,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i32.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -647,14 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -672,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i32.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -694,14 +492,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -719,12 +509,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i64.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -742,14 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -767,12 +543,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i64.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -791,14 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -816,12 +578,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i64.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -840,14 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -865,12 +613,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i64.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -889,14 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -914,12 +648,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -937,14 +665,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -962,12 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -985,14 +699,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1010,12 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1033,14 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1058,12 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1081,14 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1106,12 +784,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1129,14 +801,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1154,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f32.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1176,14 +834,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1201,12 +851,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f32.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1223,14 +867,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1248,12 +884,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f32.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1270,14 +900,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1295,12 +917,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f32.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1317,14 +933,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1342,12 +950,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f32.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1364,14 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1389,12 +983,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f64.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1412,14 +1000,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1437,12 +1017,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f64.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1461,14 +1035,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1486,12 +1052,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f64.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1510,14 +1070,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1535,12 +1087,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f64.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1559,14 +1105,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1584,12 +1122,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i8.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1607,14 +1139,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1632,12 +1156,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i8.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1655,14 +1173,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1680,12 +1190,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i8.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1703,14 +1207,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1728,12 +1224,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i8.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1751,14 +1241,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1776,12 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i8.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1799,14 +1275,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1824,12 +1292,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32i8.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1847,14 +1309,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1872,12 +1326,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i16.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1894,14 +1342,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1919,12 +1359,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i16.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1941,14 +1375,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1966,12 +1392,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i16.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1988,14 +1408,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2013,12 +1425,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i16.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2035,14 +1441,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2060,12 +1458,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i16.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2082,14 +1474,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2107,12 +1491,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32i16.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2129,14 +1507,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2154,12 +1524,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i32.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2177,14 +1541,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2202,12 +1558,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i32.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2225,14 +1575,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2250,12 +1592,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2274,14 +1610,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2299,12 +1627,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i32.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2323,14 +1645,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2348,12 +1662,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i32.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2372,14 +1680,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2397,12 +1697,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i64.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2420,14 +1714,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2445,12 +1731,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i64.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2469,14 +1749,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2494,12 +1766,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i64.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2518,14 +1784,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2543,12 +1801,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i64.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2567,14 +1819,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2592,12 +1836,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f16.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2614,14 +1852,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2639,12 +1869,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f16.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2661,14 +1885,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2686,12 +1902,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f16.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2708,14 +1918,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2733,12 +1935,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f16.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2755,14 +1951,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2780,12 +1968,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f16.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2802,14 +1984,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2827,12 +2001,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32f16.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2849,14 +2017,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2874,12 +2034,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f32.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2897,14 +2051,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2922,12 +2068,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f32.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2945,14 +2085,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2970,12 +2102,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f32.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2994,14 +2120,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3019,12 +2137,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f32.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3043,14 +2155,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3068,12 +2172,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f32.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3092,14 +2190,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3117,12 +2207,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f64.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3140,14 +2224,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3165,12 +2241,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f64.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3189,14 +2259,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3214,12 +2276,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f64.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3238,14 +2294,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3263,12 +2311,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f64.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3287,14 +2329,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3312,12 +2346,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i8.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3334,14 +2362,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3359,12 +2379,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i8.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3381,14 +2395,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3406,12 +2412,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i8.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3428,14 +2428,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3453,12 +2445,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i8.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3475,14 +2461,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3500,12 +2478,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i8.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3522,14 +2494,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3547,12 +2511,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32i8.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3569,14 +2527,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3594,12 +2544,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv64i8.nxv64i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3616,14 +2560,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3641,12 +2577,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i16.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3664,14 +2594,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3689,12 +2611,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i16.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3712,14 +2628,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3737,12 +2645,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i16.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3760,14 +2662,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3785,12 +2679,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i16.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3809,14 +2697,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3834,12 +2714,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i16.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3858,14 +2732,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3883,12 +2749,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32i16.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3907,14 +2767,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3932,12 +2784,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i32.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3955,14 +2801,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3980,12 +2818,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i32.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4003,14 +2835,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4028,12 +2852,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4052,14 +2870,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4077,12 +2887,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i32.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4101,14 +2905,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4126,12 +2922,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i32.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4150,14 +2940,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4175,12 +2957,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i64.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4198,14 +2974,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4223,12 +2991,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i64.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4247,14 +3009,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4272,12 +3026,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i64.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4296,14 +3044,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4321,12 +3061,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i64.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4345,14 +3079,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4370,12 +3096,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f16.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4393,14 +3113,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4418,12 +3130,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f16.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4441,14 +3147,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4466,12 +3164,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f16.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4489,14 +3181,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4514,12 +3198,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f16.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4538,14 +3216,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4563,12 +3233,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f16.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4587,14 +3251,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4612,12 +3268,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32f16.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4636,14 +3286,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4661,12 +3303,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f32.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4684,14 +3320,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4709,12 +3337,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f32.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4732,14 +3354,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4757,12 +3371,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f32.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4781,14 +3389,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4806,12 +3406,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f32.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4830,14 +3424,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4855,12 +3441,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f32.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4879,14 +3459,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4904,12 +3476,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f64.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4927,14 +3493,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4952,12 +3510,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f64.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4976,14 +3528,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5001,12 +3545,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f64.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5025,14 +3563,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5050,12 +3580,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f64.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5074,14 +3598,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vse.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vse.ll index 785d9fc6a7970..adf73e35ccdc5 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vse.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vse.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare void @llvm.riscv.vse.nxv1i64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -24,12 +19,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1i64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -62,11 +51,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2i64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -82,12 +66,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2i64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -104,11 +82,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4i64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -124,12 +97,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4i64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -146,11 +113,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8i64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -166,12 +128,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8i64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1f64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -208,12 +159,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1f64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -230,11 +175,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2f64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -250,12 +190,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2f64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -272,11 +206,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4f64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -292,12 +221,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4f64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -314,11 +237,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8f64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -334,12 +252,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8f64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -356,11 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -376,12 +283,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -398,11 +299,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +314,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -440,11 +330,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -460,12 +345,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -482,11 +361,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -502,12 +376,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -524,11 +392,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -544,12 +407,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -566,11 +423,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -586,12 +438,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -608,11 +454,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -628,12 +469,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -650,11 +485,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -670,12 +500,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -692,11 +516,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -712,12 +531,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -734,11 +547,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -754,12 +562,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -776,11 +578,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -796,12 +593,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -818,11 +609,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -838,12 +624,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -860,11 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -880,12 +655,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -902,11 +671,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -922,12 +686,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -944,11 +702,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -964,12 +717,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -986,11 +733,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv32i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1006,12 +748,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1028,11 +764,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1048,12 +779,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1070,11 +795,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1090,12 +810,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1112,11 +826,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1132,12 +841,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1154,11 +857,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1174,12 +872,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1196,11 +888,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1216,12 +903,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1238,11 +919,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv32f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1258,12 +934,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv32f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1280,11 +950,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1300,12 +965,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1322,11 +981,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1342,12 +996,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1364,11 +1012,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1384,12 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1406,11 +1043,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1426,12 +1058,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1448,11 +1074,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1468,12 +1089,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1490,11 +1105,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv32i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1510,12 +1120,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1532,11 +1136,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv64i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1552,12 +1151,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv64i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsm.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsm.ll index 5237536c07740..0a7e74398ae4b 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsm.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsm.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -global-isel -verify-machineinstrs | FileCheck %s -declare void @llvm.riscv.vsm.nxv1i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv1i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -17,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv2i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv2i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -30,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv4i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv4i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -43,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv8i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv8i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -56,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv16i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv16i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -69,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv32i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv32i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -82,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv64i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv64i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -95,11 +81,6 @@ entry: ret void } -declare @llvm.riscv.vmseq.nxv1i16( - , - , - iXLen); - ; Make sure we can use the vsetvli from the producing instruction. define void @test_vsetvli_i16( %0, %1, ptr %2, iXLen %3) nounwind { ; CHECK-LABEL: test_vsetvli_i16: @@ -117,11 +98,6 @@ entry: ret void } -declare @llvm.riscv.vmseq.nxv1i32( - , - , - iXLen); - define void @test_vsetvli_i32( %0, %1, ptr %2, iXLen %3) nounwind { ; CHECK-LABEL: test_vsetvli_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll index 4963d91a14988..ba4851e18b6fb 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll @@ -4,12 +4,6 @@ ; The intrinsics are not supported with RV32. -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -970,12 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -992,13 +713,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1016,12 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1038,13 +746,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1062,12 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1084,13 +779,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1108,12 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1130,13 +812,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1176,13 +845,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1200,12 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1222,13 +878,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1246,12 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1268,13 +911,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll index 7ea2e1734e5a2..334265feaf19b 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -970,12 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -992,13 +713,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1016,12 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1038,13 +746,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1062,12 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1084,13 +779,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1108,12 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1130,13 +812,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1154,12 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1176,13 +845,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1200,12 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1222,13 +878,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1246,12 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1268,13 +911,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1292,12 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1314,13 +944,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1338,12 +961,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1360,13 +977,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1384,12 +994,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1406,13 +1010,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1430,12 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1452,13 +1043,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1476,12 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1498,13 +1076,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1522,12 +1093,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1544,13 +1109,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1568,12 +1126,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1590,13 +1142,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1614,12 +1159,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1636,13 +1175,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1660,12 +1192,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1682,13 +1208,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1706,12 +1225,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1728,13 +1241,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1752,12 +1258,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1774,13 +1274,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1798,12 +1291,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1820,13 +1307,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1844,12 +1324,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1866,13 +1340,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1890,12 +1357,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1912,13 +1373,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1936,12 +1390,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1958,13 +1406,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1423,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2004,13 +1439,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2028,12 +1456,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2050,13 +1472,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2074,12 +1489,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2096,13 +1505,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2120,12 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2142,13 +1538,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2166,12 +1555,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2188,13 +1571,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2212,12 +1588,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2234,13 +1604,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2258,12 +1621,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2280,13 +1637,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2304,12 +1654,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2326,13 +1670,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2350,12 +1687,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2372,13 +1703,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2396,12 +1720,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2418,13 +1736,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2442,12 +1753,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2464,13 +1769,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2488,12 +1786,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2510,13 +1802,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2534,12 +1819,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2556,13 +1835,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2580,12 +1852,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2602,13 +1868,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2626,12 +1885,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2648,13 +1901,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2672,12 +1918,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2694,13 +1934,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2718,12 +1951,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2740,13 +1967,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2764,12 +1984,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2786,13 +2000,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2810,12 +2017,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2832,13 +2033,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2856,12 +2050,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2878,13 +2066,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2902,12 +2083,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2924,13 +2099,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2948,12 +2116,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2970,13 +2132,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2994,12 +2149,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3016,13 +2165,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3040,12 +2182,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3062,13 +2198,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3086,12 +2215,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3108,13 +2231,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3132,12 +2248,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3154,13 +2264,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3178,12 +2281,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3200,13 +2297,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3224,12 +2314,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3246,13 +2330,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3270,12 +2347,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3292,13 +2363,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3316,12 +2380,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3338,13 +2396,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3362,12 +2413,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3384,13 +2429,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3408,12 +2446,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3430,13 +2462,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3454,12 +2479,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv64i8.nxv64i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3476,13 +2495,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3500,12 +2512,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3522,13 +2528,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3546,12 +2545,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3568,13 +2561,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3592,12 +2578,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3614,13 +2594,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3638,12 +2611,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3660,13 +2627,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3684,12 +2644,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3706,13 +2660,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3730,12 +2677,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3752,13 +2693,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3776,12 +2710,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3798,13 +2726,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3822,12 +2743,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3844,13 +2759,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3868,12 +2776,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3890,13 +2792,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3914,12 +2809,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3936,13 +2825,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3960,12 +2842,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3982,13 +2858,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4006,12 +2875,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4028,13 +2891,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4052,12 +2908,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4074,13 +2924,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4098,12 +2941,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4120,13 +2957,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4144,12 +2974,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4166,13 +2990,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4190,12 +3007,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4212,13 +3023,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4236,12 +3040,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4258,13 +3056,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4282,12 +3073,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4304,13 +3089,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4328,12 +3106,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4350,13 +3122,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4374,12 +3139,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4396,13 +3155,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4420,12 +3172,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4442,13 +3188,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4466,12 +3205,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4488,13 +3221,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4512,12 +3238,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4534,13 +3254,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4558,12 +3271,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4580,13 +3287,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4604,12 +3304,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4626,13 +3320,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4650,12 +3337,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4672,13 +3353,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4696,12 +3370,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4718,13 +3386,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4742,12 +3403,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4764,13 +3419,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4788,12 +3436,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4810,13 +3452,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4834,12 +3469,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4856,13 +3485,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsse.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsse.ll index b7609ff5fd1cd..94285ae6c2615 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsse.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsse.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare void @llvm.riscv.vsse.nxv1i64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1i64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -67,12 +54,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2i64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -89,13 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2i64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -113,12 +87,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4i64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -135,13 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4i64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -159,12 +120,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8i64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -181,13 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8i64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -205,12 +153,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1f64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -227,13 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1f64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -251,12 +186,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2f64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -273,13 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2f64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -297,12 +219,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4f64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -319,13 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4f64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -343,12 +252,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8f64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -365,13 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8f64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -389,12 +285,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -411,13 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -435,12 +318,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -457,13 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -481,12 +351,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -503,13 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -527,12 +384,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -549,13 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -573,12 +417,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -595,13 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -619,12 +450,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -641,13 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -665,12 +483,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -687,13 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -711,12 +516,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -733,13 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -757,12 +549,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -779,13 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -803,12 +582,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -825,13 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -849,12 +615,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -871,13 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -895,12 +648,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -917,13 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -941,12 +681,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -963,13 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -987,12 +714,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1009,13 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1033,12 +747,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1055,13 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1079,12 +780,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv32i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1101,13 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv32i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1125,12 +813,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1147,13 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1171,12 +846,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1193,13 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1217,12 +879,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1239,13 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1263,12 +912,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1285,13 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1309,12 +945,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1331,13 +961,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1355,12 +978,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv32f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1377,13 +994,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv32f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1401,12 +1011,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1423,13 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1447,12 +1044,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1469,13 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1493,12 +1077,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1515,13 +1093,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1539,12 +1110,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1561,13 +1126,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1585,12 +1143,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1607,13 +1159,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1631,12 +1176,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv32i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1653,13 +1192,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv32i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1677,12 +1209,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv64i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1699,13 +1225,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv64i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll index 9bd272a368d20..80aeb52857036 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll @@ -4,12 +4,6 @@ ; The intrinsics are not supported with RV32. -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -67,12 +54,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -89,13 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -113,12 +87,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -135,13 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -159,12 +120,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -181,13 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -205,12 +153,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -227,13 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -251,12 +186,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -273,13 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -297,12 +219,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -319,13 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -343,12 +252,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -365,13 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -389,12 +285,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -411,13 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -435,12 +318,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -457,13 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -481,12 +351,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -503,13 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -527,12 +384,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -549,13 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -573,12 +417,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -595,13 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -619,12 +450,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -641,13 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -665,12 +483,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -687,13 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -711,12 +516,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -733,13 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -757,12 +549,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -779,13 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -803,12 +582,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -825,13 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -849,12 +615,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -871,13 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -895,12 +648,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -917,13 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -941,12 +681,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -963,13 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -987,12 +714,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1009,13 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1033,12 +747,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1055,13 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1079,12 +780,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1101,13 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1125,12 +813,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1147,13 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1171,12 +846,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1193,13 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1217,12 +879,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1239,13 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1263,12 +912,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1285,13 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll index 7cd15454d40b9..660b78e4685e4 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -970,12 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -992,13 +713,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1016,12 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1038,13 +746,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1062,12 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1084,13 +779,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1108,12 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1130,13 +812,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1154,12 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1176,13 +845,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1200,12 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1222,13 +878,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1246,12 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1268,13 +911,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1292,12 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1314,13 +944,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1338,12 +961,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1360,13 +977,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1384,12 +994,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1406,13 +1010,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1430,12 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1452,13 +1043,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1476,12 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1498,13 +1076,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1522,12 +1093,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1544,13 +1109,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1568,12 +1126,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1590,13 +1142,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1614,12 +1159,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1636,13 +1175,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1660,12 +1192,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1682,13 +1208,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1706,12 +1225,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1728,13 +1241,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1752,12 +1258,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1774,13 +1274,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1798,12 +1291,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1820,13 +1307,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1844,12 +1324,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1866,13 +1340,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1890,12 +1357,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1912,13 +1373,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1936,12 +1390,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1958,13 +1406,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1423,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2004,13 +1439,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2028,12 +1456,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2050,13 +1472,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2074,12 +1489,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2096,13 +1505,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2120,12 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2142,13 +1538,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2166,12 +1555,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2188,13 +1571,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2212,12 +1588,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2234,13 +1604,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2258,12 +1621,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2280,13 +1637,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2304,12 +1654,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2326,13 +1670,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2350,12 +1687,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2372,13 +1703,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2396,12 +1720,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2418,13 +1736,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2442,12 +1753,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2464,13 +1769,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2488,12 +1786,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2510,13 +1802,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2534,12 +1819,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2556,13 +1835,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2580,12 +1852,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2602,13 +1868,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2626,12 +1885,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2648,13 +1901,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2672,12 +1918,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2694,13 +1934,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2718,12 +1951,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2740,13 +1967,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2764,12 +1984,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2786,13 +2000,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2810,12 +2017,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2832,13 +2033,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2856,12 +2050,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2878,13 +2066,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2902,12 +2083,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2924,13 +2099,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2948,12 +2116,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2970,13 +2132,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2994,12 +2149,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3016,13 +2165,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3040,12 +2182,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3062,13 +2198,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3086,12 +2215,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3108,13 +2231,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3132,12 +2248,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3154,13 +2264,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3178,12 +2281,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3200,13 +2297,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3224,12 +2314,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3246,13 +2330,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3270,12 +2347,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3292,13 +2363,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3316,12 +2380,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3338,13 +2396,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3362,12 +2413,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3384,13 +2429,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3408,12 +2446,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3430,13 +2462,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3454,12 +2479,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv64i8.nxv64i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3476,13 +2495,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3500,12 +2512,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3522,13 +2528,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3546,12 +2545,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3568,13 +2561,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3592,12 +2578,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3614,13 +2594,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3638,12 +2611,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3660,13 +2627,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3684,12 +2644,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3706,13 +2660,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3730,12 +2677,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3752,13 +2693,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3776,12 +2710,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3798,13 +2726,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3822,12 +2743,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3844,13 +2759,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3868,12 +2776,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3890,13 +2792,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3914,12 +2809,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3936,13 +2825,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3960,12 +2842,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3982,13 +2858,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4006,12 +2875,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4028,13 +2891,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4052,12 +2908,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4074,13 +2924,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4098,12 +2941,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4120,13 +2957,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4144,12 +2974,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4166,13 +2990,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4190,12 +3007,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4212,13 +3023,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4236,12 +3040,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4258,13 +3056,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4282,12 +3073,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4304,13 +3089,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4328,12 +3106,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4350,13 +3122,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4374,12 +3139,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4396,13 +3155,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4420,12 +3172,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4442,13 +3188,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4466,12 +3205,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4488,13 +3221,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4512,12 +3238,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4534,13 +3254,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4558,12 +3271,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4580,13 +3287,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4604,12 +3304,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4626,13 +3320,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4650,12 +3337,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4672,13 +3353,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4696,12 +3370,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4718,13 +3386,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4742,12 +3403,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4764,13 +3419,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4788,12 +3436,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4810,13 +3452,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4834,12 +3469,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4856,13 +3485,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/shifts.ll b/llvm/test/CodeGen/RISCV/GlobalISel/shifts.ll index d634cc9f6395c..6e9b263a1a6b4 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/shifts.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/shifts.ll @@ -7,9 +7,6 @@ ; Basic shift support is tested as part of ALU.ll. This file ensures that ; shifts which may not be supported natively are lowered properly. -declare i64 @llvm.fshr.i64(i64, i64, i64) -declare i128 @llvm.fshr.i128(i128, i128, i128) - define i64 @lshr64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: lshr64: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll index bb96ba7e5b1fb..6345011e3d9ce 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll @@ -31,9 +31,6 @@ ; The nounwind attribute is omitted for some of the tests, to check that CFI ; directives are correctly generated. -declare void @llvm.va_start(ptr) -declare void @llvm.va_end(ptr) - declare void @notdead(ptr) ; Although frontends are recommended to not generate va_arg due to the lack of @@ -1214,8 +1211,6 @@ define void @va3_caller() nounwind { ret void } -declare void @llvm.va_copy(ptr, ptr) - define iXLen @va4_va_copy(i32 %argno, ...) nounwind { ; RV32-LABEL: va4_va_copy: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/abds-neg.ll b/llvm/test/CodeGen/RISCV/abds-neg.ll index 41f73f51fe7b6..10d40ca4c774e 100644 --- a/llvm/test/CodeGen/RISCV/abds-neg.ll +++ b/llvm/test/CodeGen/RISCV/abds-neg.ll @@ -2670,18 +2670,3 @@ define i128 @abd_subnsw_i128_undef(i128 %a, i128 %b) nounwind { ret i128 %nabs } -declare i8 @llvm.abs.i8(i8, i1) -declare i16 @llvm.abs.i16(i16, i1) -declare i32 @llvm.abs.i32(i32, i1) -declare i64 @llvm.abs.i64(i64, i1) -declare i128 @llvm.abs.i128(i128, i1) - -declare i8 @llvm.smax.i8(i8, i8) -declare i16 @llvm.smax.i16(i16, i16) -declare i32 @llvm.smax.i32(i32, i32) -declare i64 @llvm.smax.i64(i64, i64) - -declare i8 @llvm.smin.i8(i8, i8) -declare i16 @llvm.smin.i16(i16, i16) -declare i32 @llvm.smin.i32(i32, i32) -declare i64 @llvm.smin.i64(i64, i64) diff --git a/llvm/test/CodeGen/RISCV/abds.ll b/llvm/test/CodeGen/RISCV/abds.ll index f11a9c854c465..b89885bc32dba 100644 --- a/llvm/test/CodeGen/RISCV/abds.ll +++ b/llvm/test/CodeGen/RISCV/abds.ll @@ -2701,18 +2701,3 @@ define i128 @abd_select_i128(i128 %a, i128 %b) nounwind { ret i128 %sub } -declare i8 @llvm.abs.i8(i8, i1) -declare i16 @llvm.abs.i16(i16, i1) -declare i32 @llvm.abs.i32(i32, i1) -declare i64 @llvm.abs.i64(i64, i1) -declare i128 @llvm.abs.i128(i128, i1) - -declare i8 @llvm.smax.i8(i8, i8) -declare i16 @llvm.smax.i16(i16, i16) -declare i32 @llvm.smax.i32(i32, i32) -declare i64 @llvm.smax.i64(i64, i64) - -declare i8 @llvm.smin.i8(i8, i8) -declare i16 @llvm.smin.i16(i16, i16) -declare i32 @llvm.smin.i32(i32, i32) -declare i64 @llvm.smin.i64(i64, i64) diff --git a/llvm/test/CodeGen/RISCV/abdu-neg.ll b/llvm/test/CodeGen/RISCV/abdu-neg.ll index 713b52f53e3d9..e362c1819f4be 100644 --- a/llvm/test/CodeGen/RISCV/abdu-neg.ll +++ b/llvm/test/CodeGen/RISCV/abdu-neg.ll @@ -1941,18 +1941,3 @@ define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind { ret i128 %sel } -declare i8 @llvm.abs.i8(i8, i1) -declare i16 @llvm.abs.i16(i16, i1) -declare i32 @llvm.abs.i32(i32, i1) -declare i64 @llvm.abs.i64(i64, i1) -declare i128 @llvm.abs.i128(i128, i1) - -declare i8 @llvm.umax.i8(i8, i8) -declare i16 @llvm.umax.i16(i16, i16) -declare i32 @llvm.umax.i32(i32, i32) -declare i64 @llvm.umax.i64(i64, i64) - -declare i8 @llvm.umin.i8(i8, i8) -declare i16 @llvm.umin.i16(i16, i16) -declare i32 @llvm.umin.i32(i32, i32) -declare i64 @llvm.umin.i64(i64, i64) diff --git a/llvm/test/CodeGen/RISCV/abdu.ll b/llvm/test/CodeGen/RISCV/abdu.ll index 6ef172a6cd618..37c46e3370521 100644 --- a/llvm/test/CodeGen/RISCV/abdu.ll +++ b/llvm/test/CodeGen/RISCV/abdu.ll @@ -2114,21 +2114,6 @@ define i128 @abd_select_i128(i128 %a, i128 %b) nounwind { ret i128 %sub } -declare i8 @llvm.abs.i8(i8, i1) -declare i16 @llvm.abs.i16(i16, i1) -declare i32 @llvm.abs.i32(i32, i1) -declare i64 @llvm.abs.i64(i64, i1) -declare i128 @llvm.abs.i128(i128, i1) - -declare i8 @llvm.umax.i8(i8, i8) -declare i16 @llvm.umax.i16(i16, i16) -declare i32 @llvm.umax.i32(i32, i32) -declare i64 @llvm.umax.i64(i64, i64) - -declare i8 @llvm.umin.i8(i8, i8) -declare i16 @llvm.umin.i16(i16, i16) -declare i32 @llvm.umin.i32(i32, i32) -declare i64 @llvm.umin.i64(i64, i64) ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; CHECK: {{.*}} ; NOZBB: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/addcarry.ll b/llvm/test/CodeGen/RISCV/addcarry.ll index ff0d1e75c746c..153c97faddec8 100644 --- a/llvm/test/CodeGen/RISCV/addcarry.ll +++ b/llvm/test/CodeGen/RISCV/addcarry.ll @@ -4,9 +4,6 @@ ; Test ADDCARRY node expansion on a target that does not currently support ADDCARRY. ; Signed fixed point multiplication eventually expands down to an ADDCARRY. -declare i64 @llvm.smul.fix.i64 (i64, i64, i32) -declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) - define i64 @addcarry(i64 %x, i64 %y) nounwind { ; RISCV32-LABEL: addcarry: ; RISCV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/alloca.ll b/llvm/test/CodeGen/RISCV/alloca.ll index 975fc93c830af..9ea5471e4c633 100644 --- a/llvm/test/CodeGen/RISCV/alloca.ll +++ b/llvm/test/CodeGen/RISCV/alloca.ll @@ -29,9 +29,6 @@ define void @simple_alloca(i32 %n) nounwind { ret void } -declare ptr @llvm.stacksave() -declare void @llvm.stackrestore(ptr) - define void @scoped_alloca(i32 %n) nounwind { ; RV32I-LABEL: scoped_alloca: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/allow-check.ll b/llvm/test/CodeGen/RISCV/allow-check.ll index 0ddb5266db8f5..1e0716c9af008 100644 --- a/llvm/test/CodeGen/RISCV/allow-check.ll +++ b/llvm/test/CodeGen/RISCV/allow-check.ll @@ -17,8 +17,6 @@ entry: ret i1 %allow } -declare i1 @llvm.allow.runtime.check(metadata) nounwind - define i1 @test_ubsan() local_unnamed_addr { ; CHECK-LABEL: test_ubsan: ; CHECK: # %bb.0: # %entry @@ -29,4 +27,3 @@ entry: ret i1 %allow } -declare i1 @llvm.allow.ubsan.check(i8) nounwind diff --git a/llvm/test/CodeGen/RISCV/arith-with-overflow.ll b/llvm/test/CodeGen/RISCV/arith-with-overflow.ll index 551d8864033f3..557b4b7c2afa2 100644 --- a/llvm/test/CodeGen/RISCV/arith-with-overflow.ll +++ b/llvm/test/CodeGen/RISCV/arith-with-overflow.ll @@ -2,11 +2,6 @@ ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32I %s -declare {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) -declare {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) -declare {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) -declare {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b) - define i1 @sadd(i32 %a, i32 %b, ptr %c) nounwind { ; RV32I-LABEL: sadd: ; RV32I: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll index 7fe5fa7365eb5..74ff20db12b62 100644 --- a/llvm/test/CodeGen/RISCV/atomic-signext.ll +++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll @@ -157,7 +157,6 @@ define signext i32 @atomic_load_i32_unordered(ptr %a) nounwind { ret i32 %1 } - define signext i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind { ; RV32I-LABEL: atomicrmw_xchg_i8_monotonic: ; RV32I: # %bb.0: @@ -7508,7 +7507,6 @@ merge: %4 = phi i32 [ %1, %then ], [ %2, %else ] ret i32 %4 } -declare i32 @llvm.smax.i32(i32, i32) define signext i32 @atomicrmw_min_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind { ; RV32I-LABEL: atomicrmw_min_i32_monotonic_crossbb: @@ -7720,7 +7718,6 @@ merge: %4 = phi i32 [ %1, %then ], [ %2, %else ] ret i32 %4 } -declare i32 @llvm.smin.i32(i32, i32) define signext i32 @atomicrmw_umax_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind { ; RV32I-LABEL: atomicrmw_umax_i32_monotonic_crossbb: @@ -7904,7 +7901,6 @@ merge: %4 = phi i32 [ %1, %then ], [ %2, %else ] ret i32 %4 } -declare i32 @llvm.umax.i32(i32, i32) define signext i32 @atomicrmw_umin_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind { ; RV32I-LABEL: atomicrmw_umin_i32_monotonic_crossbb: @@ -8122,7 +8118,6 @@ merge: %4 = phi i32 [ %1, %then ], [ %2, %else ] ret i32 %4 } -declare i32 @llvm.umin.i32(i32, i32) define signext i32 @cmpxchg_i32_monotonic_crossbb(ptr %ptr, i32 signext %cmp, i32 signext %val, i1 zeroext %c) nounwind { ; RV32I-LABEL: cmpxchg_i32_monotonic_crossbb: diff --git a/llvm/test/CodeGen/RISCV/bfloat-arith.ll b/llvm/test/CodeGen/RISCV/bfloat-arith.ll index 871b43e61df50..c3bd658a57229 100644 --- a/llvm/test/CodeGen/RISCV/bfloat-arith.ll +++ b/llvm/test/CodeGen/RISCV/bfloat-arith.ll @@ -55,8 +55,6 @@ define bfloat @fdiv_bf16(bfloat %a, bfloat %b) nounwind { ret bfloat %1 } -declare bfloat @llvm.sqrt.bf16(bfloat) - define bfloat @fsqrt_bf16(bfloat %a) nounwind { ; CHECK-LABEL: fsqrt_bf16: ; CHECK: # %bb.0: @@ -68,8 +66,6 @@ define bfloat @fsqrt_bf16(bfloat %a) nounwind { ret bfloat %1 } -declare bfloat @llvm.copysign.bf16(bfloat, bfloat) - define bfloat @fsgnj_bf16(bfloat %a, bfloat %b) nounwind { ; RV32IZFBFMIN-LABEL: fsgnj_bf16: ; RV32IZFBFMIN: # %bb.0: @@ -159,8 +155,6 @@ define bfloat @fsgnjn_bf16(bfloat %a, bfloat %b) nounwind { ret bfloat %3 } -declare bfloat @llvm.fabs.bf16(bfloat) - define bfloat @fabs_bf16(bfloat %a, bfloat %b) nounwind { ; RV32IZFBFMIN-LABEL: fabs_bf16: ; RV32IZFBFMIN: # %bb.0: @@ -199,8 +193,6 @@ define bfloat @fabs_bf16(bfloat %a, bfloat %b) nounwind { ret bfloat %3 } -declare bfloat @llvm.minnum.bf16(bfloat, bfloat) - define bfloat @fmin_bf16(bfloat %a, bfloat %b) nounwind { ; CHECK-LABEL: fmin_bf16: ; CHECK: # %bb.0: @@ -213,8 +205,6 @@ define bfloat @fmin_bf16(bfloat %a, bfloat %b) nounwind { ret bfloat %1 } -declare bfloat @llvm.maxnum.bf16(bfloat, bfloat) - define bfloat @fmax_bf16(bfloat %a, bfloat %b) nounwind { ; CHECK-LABEL: fmax_bf16: ; CHECK: # %bb.0: @@ -227,8 +217,6 @@ define bfloat @fmax_bf16(bfloat %a, bfloat %b) nounwind { ret bfloat %1 } -declare bfloat @llvm.fma.bf16(bfloat, bfloat, bfloat) - define bfloat @fmadd_bf16(bfloat %a, bfloat %b, bfloat %c) nounwind { ; CHECK-LABEL: fmadd_bf16: ; CHECK: # %bb.0: @@ -345,7 +333,6 @@ define bfloat @fnmadd_s_3(bfloat %a, bfloat %b, bfloat %c) nounwind { ret bfloat %neg } - define bfloat @fnmadd_nsz(bfloat %a, bfloat %b, bfloat %c) nounwind { ; CHECK-LABEL: fnmadd_nsz: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/bfloat-convert.ll b/llvm/test/CodeGen/RISCV/bfloat-convert.ll index 73ff888e44b3b..3de0753369c01 100644 --- a/llvm/test/CodeGen/RISCV/bfloat-convert.ll +++ b/llvm/test/CodeGen/RISCV/bfloat-convert.ll @@ -119,7 +119,6 @@ start: %0 = tail call i16 @llvm.fptosi.sat.i16.bf16(bfloat %a) ret i16 %0 } -declare i16 @llvm.fptosi.sat.i16.bf16(bfloat) define i16 @fcvt_ui_bf16(bfloat %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_ui_bf16: @@ -209,7 +208,6 @@ start: %0 = tail call i16 @llvm.fptoui.sat.i16.bf16(bfloat %a) ret i16 %0 } -declare i16 @llvm.fptoui.sat.i16.bf16(bfloat) define i32 @fcvt_w_bf16(bfloat %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_w_bf16: @@ -291,7 +289,6 @@ start: %0 = tail call i32 @llvm.fptosi.sat.i32.bf16(bfloat %a) ret i32 %0 } -declare i32 @llvm.fptosi.sat.i32.bf16(bfloat) define i32 @fcvt_wu_bf16(bfloat %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_wu_bf16: @@ -419,7 +416,6 @@ start: %0 = tail call i32 @llvm.fptoui.sat.i32.bf16(bfloat %a) ret i32 %0 } -declare i32 @llvm.fptoui.sat.i32.bf16(bfloat) define i64 @fcvt_l_bf16(bfloat %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_l_bf16: @@ -609,7 +605,6 @@ start: %0 = tail call i64 @llvm.fptosi.sat.i64.bf16(bfloat %a) ret i64 %0 } -declare i64 @llvm.fptosi.sat.i64.bf16(bfloat) define i64 @fcvt_lu_bf16(bfloat %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_lu_bf16: @@ -759,7 +754,6 @@ start: %0 = tail call i64 @llvm.fptoui.sat.i64.bf16(bfloat %a) ret i64 %0 } -declare i64 @llvm.fptoui.sat.i64.bf16(bfloat) define bfloat @fcvt_bf16_si(i16 %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_bf16_si: @@ -1685,7 +1679,6 @@ start: %0 = tail call i8 @llvm.fptosi.sat.i8.bf16(bfloat %a) ret i8 %0 } -declare i8 @llvm.fptosi.sat.i8.bf16(bfloat) define zeroext i8 @fcvt_wu_s_i8(bfloat %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_wu_s_i8: @@ -1771,7 +1764,6 @@ start: %0 = tail call i8 @llvm.fptoui.sat.i8.bf16(bfloat %a) ret i8 %0 } -declare i8 @llvm.fptoui.sat.i8.bf16(bfloat) define zeroext i32 @fcvt_wu_bf16_sat_zext(bfloat %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_wu_bf16_sat_zext: diff --git a/llvm/test/CodeGen/RISCV/bitreverse-shift.ll b/llvm/test/CodeGen/RISCV/bitreverse-shift.ll index 92610f22c4b72..83e7d1e250c5e 100644 --- a/llvm/test/CodeGen/RISCV/bitreverse-shift.ll +++ b/llvm/test/CodeGen/RISCV/bitreverse-shift.ll @@ -8,11 +8,6 @@ ; fold (bitreverse(srl (bitreverse c), x)) -> (shl c, x) ; fold (bitreverse(shl (bitreverse c), x)) -> (srl c, x) -declare i8 @llvm.bitreverse.i8(i8) -declare i16 @llvm.bitreverse.i16(i16) -declare i32 @llvm.bitreverse.i32(i32) -declare i64 @llvm.bitreverse.i64(i64) - define i8 @test_bitreverse_srli_bitreverse_i8(i8 %a) nounwind { ; CHECK-LABEL: test_bitreverse_srli_bitreverse_i8: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll index 1605e686e9177..9450eea5a6666 100644 --- a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll +++ b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll @@ -12,14 +12,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=RV64ZB,RV64ZBKB -declare i16 @llvm.bswap.i16(i16) -declare i32 @llvm.bswap.i32(i32) -declare i64 @llvm.bswap.i64(i64) -declare i8 @llvm.bitreverse.i8(i8) -declare i16 @llvm.bitreverse.i16(i16) -declare i32 @llvm.bitreverse.i32(i32) -declare i64 @llvm.bitreverse.i64(i64) - define i16 @test_bswap_i16(i16 %a) nounwind { ; RV32I-LABEL: test_bswap_i16: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/bswap-shift.ll b/llvm/test/CodeGen/RISCV/bswap-shift.ll index 23f32ae327fd9..63fb69b0285e7 100644 --- a/llvm/test/CodeGen/RISCV/bswap-shift.ll +++ b/llvm/test/CodeGen/RISCV/bswap-shift.ll @@ -8,10 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=RV64ZB -declare i16 @llvm.bswap.i16(i16) -declare i32 @llvm.bswap.i32(i32) -declare i64 @llvm.bswap.i64(i64) - define i16 @test_bswap_srli_7_bswap_i16(i16 %a) nounwind { ; RV32ZB-LABEL: test_bswap_srli_7_bswap_i16: ; RV32ZB: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/clear-cache.ll b/llvm/test/CodeGen/RISCV/clear-cache.ll index d598a98a330e9..6f26e82f07bd3 100644 --- a/llvm/test/CodeGen/RISCV/clear-cache.ll +++ b/llvm/test/CodeGen/RISCV/clear-cache.ll @@ -6,8 +6,6 @@ ; RUN: llc -mtriple=riscv32-unknown-linux-musl < %s | FileCheck --check-prefix=RV32-LINUX %s ; RUN: llc -mtriple=riscv64-unknown-linux-musl < %s | FileCheck --check-prefix=RV64-LINUX %s -declare void @llvm.clear_cache(ptr, ptr) - define void @foo(ptr %a, ptr %b) nounwind { ; RV32-LABEL: foo: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/copy-frameindex.mir b/llvm/test/CodeGen/RISCV/copy-frameindex.mir index 31ffc3f0f83c6..9a307d44cf21a 100644 --- a/llvm/test/CodeGen/RISCV/copy-frameindex.mir +++ b/llvm/test/CodeGen/RISCV/copy-frameindex.mir @@ -15,8 +15,6 @@ ret void } - declare void @llvm.dbg.value(metadata, metadata, metadata) - !llvm.dbg.cu = !{!0} !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !4) diff --git a/llvm/test/CodeGen/RISCV/copysign-casts.ll b/llvm/test/CodeGen/RISCV/copysign-casts.ll index 53de36f1699a9..e2b8840518b0d 100644 --- a/llvm/test/CodeGen/RISCV/copysign-casts.ll +++ b/llvm/test/CodeGen/RISCV/copysign-casts.ll @@ -37,10 +37,6 @@ ; Test fcopysign scenarios where the sign argument is casted to the type of the ; magnitude argument. Those casts can be folded away by the DAGCombiner. -declare double @llvm.copysign.f64(double, double) -declare float @llvm.copysign.f32(float, float) -declare half @llvm.copysign.f16(half, half) - define double @fold_promote_d_s(double %a, float %b) nounwind { ; RV32I-LABEL: fold_promote_d_s: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll index e6b22b2b9deea..976c57e422761 100644 --- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll @@ -16,19 +16,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+xtheadbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64XTHEADBB -declare i8 @llvm.cttz.i8(i8, i1) -declare i16 @llvm.cttz.i16(i16, i1) -declare i32 @llvm.cttz.i32(i32, i1) -declare i64 @llvm.cttz.i64(i64, i1) -declare i8 @llvm.ctlz.i8(i8, i1) -declare i16 @llvm.ctlz.i16(i16, i1) -declare i32 @llvm.ctlz.i32(i32, i1) -declare i64 @llvm.ctlz.i64(i64, i1) -declare i8 @llvm.ctpop.i8(i8) -declare i16 @llvm.ctpop.i16(i16) -declare i32 @llvm.ctpop.i32(i32) -declare i64 @llvm.ctpop.i64(i64) - define i8 @test_cttz_i8(i8 %a) nounwind { ; RV32_NOZBB-LABEL: test_cttz_i8: ; RV32_NOZBB: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll index cb213172c6c88..e92ff1a1b1b40 100644 --- a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll +++ b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll @@ -106,9 +106,6 @@ define signext i32 @ctz_dereferencing_pointer(ptr %b) nounwind { ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret - - - entry: %0 = load i64, ptr %b, align 8 %1 = tail call i64 @llvm.cttz.i64(i64 %0, i1 true) @@ -196,9 +193,6 @@ define i64 @ctz_dereferencing_pointer_zext(ptr %b) nounwind { ; RV64I-NEXT: andi a0, a0, 31 ; RV64I-NEXT: ret - - - entry: %0 = load i32, ptr %b, align 8 %1 = tail call i32 @llvm.cttz.i32(i32 %0, i1 true) @@ -281,9 +275,6 @@ define signext i32 @ctz1(i32 signext %x) nounwind { ; RV64I-NEXT: andi a0, a0, 31 ; RV64I-NEXT: ret - - - entry: %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 true) %1 = icmp eq i32 %x, 0 @@ -364,9 +355,6 @@ define signext i32 @ctz1_flipped(i32 signext %x) nounwind { ; RV64I-NEXT: andi a0, a0, 31 ; RV64I-NEXT: ret - - - entry: %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 true) %1 = icmp ne i32 %x, 0 @@ -444,9 +432,6 @@ define signext i32 @ctz2(i32 signext %x) nounwind { ; RV64I-NEXT: li a0, 32 ; RV64I-NEXT: ret - - - entry: %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 false) ret i32 %0 @@ -522,9 +507,6 @@ define signext i32 @ctz3(i32 signext %x) nounwind { ; RV64I-NEXT: li a0, 32 ; RV64I-NEXT: ret - - - entry: %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 false) ret i32 %0 @@ -626,9 +608,6 @@ define signext i32 @ctz4(i64 %b) nounwind { ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret - - - entry: %0 = tail call i64 @llvm.cttz.i64(i64 %b, i1 true) %1 = icmp eq i64 %b, 0 @@ -773,9 +752,6 @@ define signext i32 @ctlz(i64 %b) nounwind { ; RV64I-NEXT: srli a0, a0, 58 ; RV64I-NEXT: ret - - - entry: %0 = tail call i64 @llvm.ctlz.i64(i64 %b, i1 true) %1 = icmp eq i64 %b, 0 @@ -857,9 +833,6 @@ define signext i32 @ctz5(i32 signext %x) nounwind { ; RV64I-NEXT: andi a0, a0, 31 ; RV64I-NEXT: ret - - - entry: %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 true) %1 = icmp eq i32 %x, 0 @@ -940,9 +913,6 @@ define signext i32 @ctz6(i32 signext %x) nounwind { ; RV64I-NEXT: andi a0, a0, 31 ; RV64I-NEXT: ret - - - entry: %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 true) %1 = icmp eq i32 %x, 0 @@ -1030,9 +1000,6 @@ define signext i32 @globalVar() nounwind { ; RV64I-NEXT: andi a0, a0, 31 ; RV64I-NEXT: ret - - - entry: %0 = load i32, ptr @global_x, align 4 %1 = tail call i32 @llvm.cttz.i32(i32 %0, i1 true) @@ -1803,6 +1770,3 @@ define i32 @test_ctlz_select_i32(i32 %0) { ret i32 %4 } -declare i64 @llvm.cttz.i64(i64, i1 immarg) -declare i32 @llvm.cttz.i32(i32, i1 immarg) -declare i64 @llvm.ctlz.i64(i64, i1 immarg) diff --git a/llvm/test/CodeGen/RISCV/double-arith-strict.ll b/llvm/test/CodeGen/RISCV/double-arith-strict.ll index 4e48e54b3ca81..0071f3c168964 100644 --- a/llvm/test/CodeGen/RISCV/double-arith-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-arith-strict.ll @@ -52,7 +52,6 @@ define double @fadd_d(double %a, double %b) nounwind strictfp { %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata) define double @fsub_d(double %a, double %b) nounwind strictfp { ; CHECKIFD-LABEL: fsub_d: @@ -90,7 +89,6 @@ define double @fsub_d(double %a, double %b) nounwind strictfp { %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata) define double @fmul_d(double %a, double %b) nounwind strictfp { ; CHECKIFD-LABEL: fmul_d: @@ -128,7 +126,6 @@ define double @fmul_d(double %a, double %b) nounwind strictfp { %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata) define double @fdiv_d(double %a, double %b) nounwind strictfp { ; CHECKIFD-LABEL: fdiv_d: @@ -166,7 +163,6 @@ define double @fdiv_d(double %a, double %b) nounwind strictfp { %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata) define double @fsqrt_d(double %a) nounwind strictfp { ; CHECKIFD-LABEL: fsqrt_d: @@ -204,7 +200,6 @@ define double @fsqrt_d(double %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata) define double @fmin_d(double %a, double %b) nounwind strictfp { ; RV32IFD-LABEL: fmin_d: @@ -263,7 +258,6 @@ define double @fmin_d(double %a, double %b) nounwind strictfp { %1 = call double @llvm.experimental.constrained.minnum.f64(double %a, double %b, metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata) strictfp define double @fmax_d(double %a, double %b) nounwind strictfp { ; RV32IFD-LABEL: fmax_d: @@ -322,7 +316,6 @@ define double @fmax_d(double %a, double %b) nounwind strictfp { %1 = call double @llvm.experimental.constrained.maxnum.f64(double %a, double %b, metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata) strictfp define double @fmadd_d(double %a, double %b, double %c) nounwind strictfp { ; CHECKIFD-LABEL: fmadd_d: @@ -360,7 +353,6 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind strictfp { %1 = call double @llvm.experimental.constrained.fma.f64(double %a, double %b, double %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) strictfp define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp { ; RV32IFD-LABEL: fmsub_d: diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll index f960bc19c57c3..ec66d17f96980 100644 --- a/llvm/test/CodeGen/RISCV/double-arith.ll +++ b/llvm/test/CodeGen/RISCV/double-arith.ll @@ -165,8 +165,6 @@ define double @fdiv_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.sqrt.f64(double) - define double @fsqrt_d(double %a) nounwind { ; CHECKIFD-LABEL: fsqrt_d: ; CHECKIFD: # %bb.0: @@ -204,8 +202,6 @@ define double @fsqrt_d(double %a) nounwind { ret double %1 } -declare double @llvm.copysign.f64(double, double) - define double @fsgnj_d(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fsgnj_d: ; CHECKIFD: # %bb.0: @@ -344,8 +340,6 @@ define double @fsgnjn_d(double %a, double %b) nounwind { ret double %2 } -declare double @llvm.fabs.f64(double) - ; This function performs extra work to ensure that ; DAGCombiner::visitBITCAST doesn't replace the fabs with an and. define double @fabs_d(double %a, double %b) nounwind { @@ -402,8 +396,6 @@ define double @fabs_d(double %a, double %b) nounwind { ret double %3 } -declare double @llvm.minnum.f64(double, double) - define double @fmin_d(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fmin_d: ; CHECKIFD: # %bb.0: @@ -441,8 +433,6 @@ define double @fmin_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.maxnum.f64(double, double) - define double @fmax_d(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fmax_d: ; CHECKIFD: # %bb.0: @@ -480,8 +470,6 @@ define double @fmax_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.fma.f64(double, double, double) - define double @fmadd_d(double %a, double %b, double %c) nounwind { ; CHECKIFD-LABEL: fmadd_d: ; CHECKIFD: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll index 14193bf4cb169..d4bd69b06a298 100644 --- a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll @@ -61,8 +61,6 @@ define double @fneg(double %a) nounwind { ret double %1 } -declare double @llvm.fabs.f64(double) - define double @fabs(double %a) nounwind { ; RV32I-LABEL: fabs: ; RV32I: # %bb.0: @@ -101,8 +99,6 @@ define double @fabs(double %a) nounwind { ret double %1 } -declare double @llvm.copysign.f64(double, double) - ; DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN will convert to bitwise ; operations if floating point isn't supported. A combine could be written to ; do the same even when f64 is legal. diff --git a/llvm/test/CodeGen/RISCV/double-convert-strict.ll b/llvm/test/CodeGen/RISCV/double-convert-strict.ll index 9a5e357b05a17..eb31c5a110cd2 100644 --- a/llvm/test/CodeGen/RISCV/double-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-convert-strict.ll @@ -56,7 +56,6 @@ define float @fcvt_s_d(double %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata) define double @fcvt_d_s(float %a) nounwind strictfp { ; CHECKIFD-LABEL: fcvt_d_s: @@ -94,7 +93,6 @@ define double @fcvt_d_s(float %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.fpext.f64.f32(float %a, metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata) define i32 @fcvt_w_d(double %a) nounwind strictfp { ; CHECKIFD-LABEL: fcvt_w_d: @@ -132,7 +130,6 @@ define i32 @fcvt_w_d(double %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata) ; For RV64D, fcvt.lu.d is semantically equivalent to fcvt.wu.d in this case ; because fptosi will produce poison if the result doesn't fit into an i32. @@ -172,7 +169,6 @@ define i32 @fcvt_wu_d(double %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata) ; Test where the fptoui has multiple uses, one of which causes a sext to be ; inserted on RV64. @@ -262,7 +258,6 @@ define double @fcvt_d_w(i32 %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata) define double @fcvt_d_w_load(ptr %p) nounwind strictfp { ; CHECKIFD-LABEL: fcvt_d_w_load: @@ -344,7 +339,6 @@ define double @fcvt_d_wu(i32 %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata) define double @fcvt_d_wu_load(ptr %p) nounwind strictfp { ; CHECKIFD-LABEL: fcvt_d_wu_load: @@ -438,7 +432,6 @@ define i64 @fcvt_l_d(double %a) nounwind strictfp { %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %a, metadata !"fpexcept.strict") ret i64 %1 } -declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata) define i64 @fcvt_lu_d(double %a) nounwind strictfp { ; RV32IFD-LABEL: fcvt_lu_d: @@ -489,7 +482,6 @@ define i64 @fcvt_lu_d(double %a) nounwind strictfp { %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %a, metadata !"fpexcept.strict") ret i64 %1 } -declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata) define double @fcvt_d_l(i64 %a) nounwind strictfp { ; RV32IFD-LABEL: fcvt_d_l: @@ -540,7 +532,6 @@ define double @fcvt_d_l(i64 %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata) define double @fcvt_d_lu(i64 %a) nounwind strictfp { ; RV32IFD-LABEL: fcvt_d_lu: @@ -591,7 +582,6 @@ define double @fcvt_d_lu(i64 %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata) define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp { ; CHECKIFD-LABEL: fcvt_d_w_i8: @@ -629,7 +619,6 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.sitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.sitofp.f64.i8(i8, metadata, metadata) define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp { ; CHECKIFD-LABEL: fcvt_d_wu_i8: @@ -667,7 +656,6 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.uitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.uitofp.f64.i8(i8, metadata, metadata) define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp { ; CHECKIFD-LABEL: fcvt_d_w_i16: @@ -705,7 +693,6 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.sitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.sitofp.f64.i16(i16, metadata, metadata) define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp { ; CHECKIFD-LABEL: fcvt_d_wu_i16: @@ -743,7 +730,6 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.uitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.uitofp.f64.i16(i16, metadata, metadata) ; Make sure we select W version of addi on RV64. define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp { diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll index c3e729800616d..eb03f8ee1d532 100644 --- a/llvm/test/CodeGen/RISCV/double-convert.ll +++ b/llvm/test/CodeGen/RISCV/double-convert.ll @@ -251,7 +251,6 @@ start: %0 = tail call i32 @llvm.fptosi.sat.i32.f64(double %a) ret i32 %0 } -declare i32 @llvm.fptosi.sat.i32.f64(double) ; For RV64D, fcvt.lu.d is semantically equivalent to fcvt.wu.d in this case ; because fptosi will produce poison if the result doesn't fit into an i32. @@ -460,7 +459,6 @@ start: %0 = tail call i32 @llvm.fptoui.sat.i32.f64(double %a) ret i32 %0 } -declare i32 @llvm.fptoui.sat.i32.f64(double) define double @fcvt_d_w(i32 %a) nounwind { ; CHECKIFD-LABEL: fcvt_d_w: @@ -885,7 +883,6 @@ start: %0 = tail call i64 @llvm.fptosi.sat.i64.f64(double %a) ret i64 %0 } -declare i64 @llvm.fptosi.sat.i64.f64(double) define i64 @fcvt_lu_d(double %a) nounwind { ; RV32IFD-LABEL: fcvt_lu_d: @@ -1077,7 +1074,6 @@ start: %0 = tail call i64 @llvm.fptoui.sat.i64.f64(double %a) ret i64 %0 } -declare i64 @llvm.fptoui.sat.i64.f64(double) define i64 @fmv_x_d(double %a, double %b) nounwind { ; RV32IFD-LABEL: fmv_x_d: @@ -1783,7 +1779,6 @@ start: %0 = tail call i16 @llvm.fptosi.sat.i16.f64(double %a) ret i16 %0 } -declare i16 @llvm.fptosi.sat.i16.f64(double) define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind { ; RV32IFD-LABEL: fcvt_wu_s_i16: @@ -1954,7 +1949,6 @@ start: %0 = tail call i16 @llvm.fptoui.sat.i16.f64(double %a) ret i16 %0 } -declare i16 @llvm.fptoui.sat.i16.f64(double) define signext i8 @fcvt_w_s_i8(double %a) nounwind { ; RV32IFD-LABEL: fcvt_w_s_i8: @@ -2158,7 +2152,6 @@ start: %0 = tail call i8 @llvm.fptosi.sat.i8.f64(double %a) ret i8 %0 } -declare i8 @llvm.fptosi.sat.i8.f64(double) define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind { ; @@ -2327,7 +2320,6 @@ start: %0 = tail call i8 @llvm.fptoui.sat.i8.f64(double %a) ret i8 %0 } -declare i8 @llvm.fptoui.sat.i8.f64(double) define zeroext i32 @fcvt_wu_d_sat_zext(double %a) nounwind { ; RV32IFD-LABEL: fcvt_wu_d_sat_zext: diff --git a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll index b1c63af3e7e07..610f34dba7397 100644 --- a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll @@ -55,7 +55,6 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind strictfp { %2 = zext i1 %1 to i32 ret i32 %2 } -declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata) define i32 @fcmp_ogt(double %a, double %b) nounwind strictfp { ; CHECKIFD-LABEL: fcmp_ogt: @@ -871,7 +870,6 @@ define i32 @fcmps_oeq(double %a, double %b) nounwind strictfp { %2 = zext i1 %1 to i32 ret i32 %2 } -declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata) define i32 @fcmps_ogt(double %a, double %b) nounwind strictfp { ; CHECKIFD-LABEL: fcmps_ogt: diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll index fddb86de58f51..117a00dce4b10 100644 --- a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll @@ -18,8 +18,6 @@ ; RUN: -verify-machineinstrs -disable-strictnode-mutation \ ; RUN: | FileCheck -check-prefix=RV64I %s -declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata) - define double @sqrt_f64(double %a) nounwind strictfp { ; CHECKIFD-LABEL: sqrt_f64: ; CHECKIFD: # %bb.0: @@ -57,8 +55,6 @@ define double @sqrt_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata) - define double @powi_f64(double %a, i32 %b) nounwind strictfp { ; RV32IFD-LABEL: powi_f64: ; RV32IFD: # %bb.0: @@ -120,8 +116,6 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata) - define double @sin_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: sin_f64: ; RV32IFD: # %bb.0: @@ -180,8 +174,6 @@ define double @sin_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata) - define double @cos_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: cos_f64: ; RV32IFD: # %bb.0: @@ -375,8 +367,6 @@ define double @sincos_f64(double %a) nounwind strictfp { ret double %3 } -declare double @llvm.experimental.constrained.tan.f64(double, metadata, metadata) - define double @tan_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: tan_f64: ; RV32IFD: # %bb.0: @@ -609,8 +599,6 @@ define double @atan_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.atan2.f64(double, double, metadata, metadata) - define double @atan2_f64(double %a, double %b) nounwind strictfp { ; RV32IFD-LABEL: atan2_f64: ; RV32IFD: # %bb.0: @@ -843,8 +831,6 @@ define double @tanh_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata) - define double @pow_f64(double %a, double %b) nounwind strictfp { ; RV32IFD-LABEL: pow_f64: ; RV32IFD: # %bb.0: @@ -903,8 +889,6 @@ define double @pow_f64(double %a, double %b) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata) - define double @exp_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: exp_f64: ; RV32IFD: # %bb.0: @@ -963,8 +947,6 @@ define double @exp_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata) - define double @exp2_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: exp2_f64: ; RV32IFD: # %bb.0: @@ -1023,8 +1005,6 @@ define double @exp2_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata) - define double @log_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: log_f64: ; RV32IFD: # %bb.0: @@ -1083,8 +1063,6 @@ define double @log_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata) - define double @log10_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: log10_f64: ; RV32IFD: # %bb.0: @@ -1143,8 +1121,6 @@ define double @log10_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata) - define double @log2_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: log2_f64: ; RV32IFD: # %bb.0: @@ -1203,8 +1179,6 @@ define double @log2_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) - define double @fma_f64(double %a, double %b, double %c) nounwind strictfp { ; CHECKIFD-LABEL: fma_f64: ; CHECKIFD: # %bb.0: @@ -1242,8 +1216,6 @@ define double @fma_f64(double %a, double %b, double %c) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.fmuladd.f64(double, double, double, metadata, metadata) - define double @fmuladd_f64(double %a, double %b, double %c) nounwind strictfp { ; CHECKIFD-LABEL: fmuladd_f64: ; CHECKIFD: # %bb.0: @@ -1295,8 +1267,6 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata) - define double @minnum_f64(double %a, double %b) nounwind strictfp { ; RV32IFD-LABEL: minnum_f64: ; RV32IFD: # %bb.0: @@ -1355,8 +1325,6 @@ define double @minnum_f64(double %a, double %b) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata) - define double @maxnum_f64(double %a, double %b) nounwind strictfp { ; RV32IFD-LABEL: maxnum_f64: ; RV32IFD: # %bb.0: @@ -1432,8 +1400,6 @@ define double @maxnum_f64(double %a, double %b) nounwind strictfp { ; ret double %1 ; } -declare double @llvm.experimental.constrained.floor.f64(double, metadata) - define double @floor_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: floor_f64: ; RV32IFD: # %bb.0: @@ -1492,8 +1458,6 @@ define double @floor_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.ceil.f64(double, metadata) - define double @ceil_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: ceil_f64: ; RV32IFD: # %bb.0: @@ -1552,8 +1516,6 @@ define double @ceil_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.trunc.f64(double, metadata) - define double @trunc_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: trunc_f64: ; RV32IFD: # %bb.0: @@ -1612,8 +1574,6 @@ define double @trunc_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata) - define double @rint_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: rint_f64: ; RV32IFD: # %bb.0: @@ -1672,8 +1632,6 @@ define double @rint_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata) - define double @nearbyint_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: nearbyint_f64: ; RV32IFD: # %bb.0: @@ -1732,8 +1690,6 @@ define double @nearbyint_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.round.f64(double, metadata) - define double @round_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: round_f64: ; RV32IFD: # %bb.0: @@ -1792,8 +1748,6 @@ define double @round_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.roundeven.f64(double, metadata) - define double @roundeven_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: roundeven_f64: ; RV32IFD: # %bb.0: @@ -1852,8 +1806,6 @@ define double @roundeven_f64(double %a) nounwind strictfp { ret double %1 } -declare iXLen @llvm.experimental.constrained.lrint.iXLen.f64(double, metadata, metadata) - define iXLen @lrint_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: lrint_f64: ; RV32IFD: # %bb.0: @@ -1896,8 +1848,6 @@ define iXLen @lrint_f64(double %a) nounwind strictfp { ret iXLen %1 } -declare iXLen @llvm.experimental.constrained.lround.iXLen.f64(double, metadata) - define iXLen @lround_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: lround_f64: ; RV32IFD: # %bb.0: @@ -1940,8 +1890,6 @@ define iXLen @lround_f64(double %a) nounwind strictfp { ret iXLen %1 } -declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata) - define i64 @llrint_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: llrint_f64: ; RV32IFD: # %bb.0: @@ -1992,8 +1940,6 @@ define i64 @llrint_f64(double %a) nounwind strictfp { ret i64 %1 } -declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata) - define i64 @llround_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: llround_f64: ; RV32IFD: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll index aaa08b577c4f4..81e6d84af17cb 100644 --- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll @@ -16,8 +16,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 \ ; RUN: -verify-machineinstrs | FileCheck -check-prefix=RV64I %s -declare double @llvm.sqrt.f64(double) - define double @sqrt_f64(double %a) nounwind { ; CHECKIFD-LABEL: sqrt_f64: ; CHECKIFD: # %bb.0: @@ -55,8 +53,6 @@ define double @sqrt_f64(double %a) nounwind { ret double %1 } -declare double @llvm.powi.f64.i32(double, i32) - define double @powi_f64(double %a, i32 %b) nounwind { ; RV32IFD-LABEL: powi_f64: ; RV32IFD: # %bb.0: @@ -113,8 +109,6 @@ define double @powi_f64(double %a, i32 %b) nounwind { ret double %1 } -declare double @llvm.sin.f64(double) - define double @sin_f64(double %a) nounwind { ; CHECKIFD-LABEL: sin_f64: ; CHECKIFD: # %bb.0: @@ -154,8 +148,6 @@ define double @sin_f64(double %a) nounwind { ret double %1 } -declare double @llvm.cos.f64(double) - define double @cos_f64(double %a) nounwind { ; CHECKIFD-LABEL: cos_f64: ; CHECKIFD: # %bb.0: @@ -330,8 +322,6 @@ define double @sincos_f64(double %a) nounwind { ret double %3 } -declare double @llvm.pow.f64(double, double) - define double @pow_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: pow_f64: ; CHECKIFD: # %bb.0: @@ -371,8 +361,6 @@ define double @pow_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.exp.f64(double) - define double @exp_f64(double %a) nounwind { ; CHECKIFD-LABEL: exp_f64: ; CHECKIFD: # %bb.0: @@ -412,8 +400,6 @@ define double @exp_f64(double %a) nounwind { ret double %1 } -declare double @llvm.exp2.f64(double) - define double @exp2_f64(double %a) nounwind { ; CHECKIFD-LABEL: exp2_f64: ; CHECKIFD: # %bb.0: @@ -492,8 +478,6 @@ define double @exp10_f64(double %a) nounwind { ret double %1 } -declare double @llvm.log.f64(double) - define double @log_f64(double %a) nounwind { ; CHECKIFD-LABEL: log_f64: ; CHECKIFD: # %bb.0: @@ -533,8 +517,6 @@ define double @log_f64(double %a) nounwind { ret double %1 } -declare double @llvm.log10.f64(double) - define double @log10_f64(double %a) nounwind { ; CHECKIFD-LABEL: log10_f64: ; CHECKIFD: # %bb.0: @@ -574,8 +556,6 @@ define double @log10_f64(double %a) nounwind { ret double %1 } -declare double @llvm.log2.f64(double) - define double @log2_f64(double %a) nounwind { ; CHECKIFD-LABEL: log2_f64: ; CHECKIFD: # %bb.0: @@ -615,8 +595,6 @@ define double @log2_f64(double %a) nounwind { ret double %1 } -declare double @llvm.fma.f64(double, double, double) - define double @fma_f64(double %a, double %b, double %c) nounwind { ; CHECKIFD-LABEL: fma_f64: ; CHECKIFD: # %bb.0: @@ -654,8 +632,6 @@ define double @fma_f64(double %a, double %b, double %c) nounwind { ret double %1 } -declare double @llvm.fmuladd.f64(double, double, double) - define double @fmuladd_f64(double %a, double %b, double %c) nounwind { ; CHECKIFD-LABEL: fmuladd_f64: ; CHECKIFD: # %bb.0: @@ -707,8 +683,6 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind { ret double %1 } -declare double @llvm.fabs.f64(double) - define double @fabs_f64(double %a) nounwind { ; CHECKIFD-LABEL: fabs_f64: ; CHECKIFD: # %bb.0: @@ -740,8 +714,6 @@ define double @fabs_f64(double %a) nounwind { ret double %1 } -declare double @llvm.minnum.f64(double, double) - define double @minnum_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: minnum_f64: ; CHECKIFD: # %bb.0: @@ -779,8 +751,6 @@ define double @minnum_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.maxnum.f64(double, double) - define double @maxnum_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: maxnum_f64: ; CHECKIFD: # %bb.0: @@ -818,8 +788,6 @@ define double @maxnum_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.copysign.f64(double, double) - define double @copysign_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: copysign_f64: ; CHECKIFD: # %bb.0: @@ -857,8 +825,6 @@ define double @copysign_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.floor.f64(double) - define double @floor_f64(double %a) nounwind { ; RV32IFD-LABEL: floor_f64: ; RV32IFD: # %bb.0: @@ -923,8 +889,6 @@ define double @floor_f64(double %a) nounwind { ret double %1 } -declare double @llvm.ceil.f64(double) - define double @ceil_f64(double %a) nounwind { ; RV32IFD-LABEL: ceil_f64: ; RV32IFD: # %bb.0: @@ -989,8 +953,6 @@ define double @ceil_f64(double %a) nounwind { ret double %1 } -declare double @llvm.trunc.f64(double) - define double @trunc_f64(double %a) nounwind { ; RV32IFD-LABEL: trunc_f64: ; RV32IFD: # %bb.0: @@ -1055,8 +1017,6 @@ define double @trunc_f64(double %a) nounwind { ret double %1 } -declare double @llvm.rint.f64(double) - define double @rint_f64(double %a) nounwind { ; RV32IFD-LABEL: rint_f64: ; RV32IFD: # %bb.0: @@ -1121,8 +1081,6 @@ define double @rint_f64(double %a) nounwind { ret double %1 } -declare double @llvm.nearbyint.f64(double) - define double @nearbyint_f64(double %a) nounwind { ; CHECKIFD-LABEL: nearbyint_f64: ; CHECKIFD: # %bb.0: @@ -1162,8 +1120,6 @@ define double @nearbyint_f64(double %a) nounwind { ret double %1 } -declare double @llvm.round.f64(double) - define double @round_f64(double %a) nounwind { ; RV32IFD-LABEL: round_f64: ; RV32IFD: # %bb.0: @@ -1228,8 +1184,6 @@ define double @round_f64(double %a) nounwind { ret double %1 } -declare double @llvm.roundeven.f64(double) - define double @roundeven_f64(double %a) nounwind { ; RV32IFD-LABEL: roundeven_f64: ; RV32IFD: # %bb.0: @@ -1294,8 +1248,6 @@ define double @roundeven_f64(double %a) nounwind { ret double %1 } -declare iXLen @llvm.lrint.iXLen.f64(double) - define iXLen @lrint_f64(double %a) nounwind { ; RV32IFD-LABEL: lrint_f64: ; RV32IFD: # %bb.0: @@ -1338,9 +1290,6 @@ define iXLen @lrint_f64(double %a) nounwind { ret iXLen %1 } -declare i32 @llvm.lround.i32.f64(double) -declare i64 @llvm.lround.i64.f64(double) - define iXLen @lround_f64(double %a) nounwind { ; RV32IFD-LABEL: lround_f64: ; RV32IFD: # %bb.0: @@ -1420,8 +1369,6 @@ define i32 @lround_i32_f64(double %a) nounwind { ret i32 %1 } -declare i64 @llvm.llrint.i64.f64(double) - define i64 @llrint_f64(double %a) nounwind { ; RV32IFD-LABEL: llrint_f64: ; RV32IFD: # %bb.0: @@ -1472,8 +1419,6 @@ define i64 @llrint_f64(double %a) nounwind { ret i64 %1 } -declare i64 @llvm.llround.i64.f64(double) - define i64 @llround_f64(double %a) nounwind { ; RV32IFD-LABEL: llround_f64: ; RV32IFD: # %bb.0: @@ -1524,7 +1469,6 @@ define i64 @llround_f64(double %a) nounwind { ret i64 %1 } -declare i1 @llvm.is.fpclass.f64(double, i32) define i1 @isnan_d_fpclass(double %x) { ; CHECKIFD-LABEL: isnan_d_fpclass: ; CHECKIFD: # %bb.0: @@ -1611,8 +1555,6 @@ define double @tan_f64(double %a) nounwind { ret double %1 } -declare double @llvm.maximumnum.f64(double, double) - define double @maximumnum_double(double %x, double %y) { ; CHECKIFD-LABEL: maximumnum_double: ; CHECKIFD: # %bb.0: @@ -1658,8 +1600,6 @@ define double @maximumnum_double(double %x, double %y) { ret double %z } -declare double @llvm.minimumnum.f64(double, double) - define double @minimumnum_double(double %x, double %y) { ; CHECKIFD-LABEL: minimumnum_double: ; CHECKIFD: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/double-maximum-minimum.ll b/llvm/test/CodeGen/RISCV/double-maximum-minimum.ll index 6202e92b4dc65..8b509e901e833 100644 --- a/llvm/test/CodeGen/RISCV/double-maximum-minimum.ll +++ b/llvm/test/CodeGen/RISCV/double-maximum-minimum.ll @@ -12,8 +12,6 @@ ; RUN: -verify-machineinstrs -target-abi=lp64 \ ; RUN: | FileCheck -check-prefix=RV64IZFINXZDINX %s -declare double @llvm.minimum.f64(double, double) - define double @fminimum_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fminimum_f64: ; CHECKIFD: # %bb.0: @@ -75,8 +73,6 @@ define double @fminimum_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.maximum.f64(double, double) - define double @fmaximum_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fmaximum_f64: ; CHECKIFD: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll index a574e68671a74..1fb3d34907caa 100644 --- a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll +++ b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll @@ -1502,13 +1502,3 @@ define i64 @test_rint_ui64(double %x) nounwind { ret i64 %b } -declare double @llvm.floor.f64(double) -declare double @llvm.ceil.f64(double) -declare double @llvm.trunc.f64(double) -declare double @llvm.round.f64(double) -declare double @llvm.roundeven.f64(double) -declare double @llvm.rint.f64(double) -declare i32 @llvm.fptosi.sat.i32.f64(double) -declare i64 @llvm.fptosi.sat.i64.f64(double) -declare i32 @llvm.fptoui.sat.i32.f64(double) -declare i64 @llvm.fptoui.sat.i64.f64(double) diff --git a/llvm/test/CodeGen/RISCV/double-round-conv.ll b/llvm/test/CodeGen/RISCV/double-round-conv.ll index 6dd24c056e386..43fe87111d7a3 100644 --- a/llvm/test/CodeGen/RISCV/double-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/double-round-conv.ll @@ -1388,8 +1388,3 @@ define double @test_roundeven_double(double %x) { ret double %a } -declare double @llvm.floor.f64(double) -declare double @llvm.ceil.f64(double) -declare double @llvm.trunc.f64(double) -declare double @llvm.round.f64(double) -declare double @llvm.roundeven.f64(double) diff --git a/llvm/test/CodeGen/RISCV/double-zfa.ll b/llvm/test/CodeGen/RISCV/double-zfa.ll index f17c63ddb6cae..a93ec86e363fc 100644 --- a/llvm/test/CodeGen/RISCV/double-zfa.ll +++ b/llvm/test/CodeGen/RISCV/double-zfa.ll @@ -183,8 +183,6 @@ define double @loadfpimm18() { ret double 0x8010000000000000 } -declare double @llvm.minimum.f64(double, double) - define double @fminm_d(double %a, double %b) nounwind { ; CHECK-LABEL: fminm_d: ; CHECK: # %bb.0: @@ -194,8 +192,6 @@ define double @fminm_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.maximum.f64(double, double) - define double @fmaxm_d(double %a, double %b) nounwind { ; CHECK-LABEL: fmaxm_d: ; CHECK: # %bb.0: @@ -216,7 +212,6 @@ define double @fround_d_1(double %a) nounwind { declare double @round(double) nounwind readnone - define double @fround_d_2(double %a) nounwind { ; CHECK-LABEL: fround_d_2: ; CHECK: # %bb.0: @@ -228,7 +223,6 @@ define double @fround_d_2(double %a) nounwind { declare double @floor(double) nounwind readnone - define double @fround_d_3(double %a) nounwind { ; CHECK-LABEL: fround_d_3: ; CHECK: # %bb.0: @@ -240,7 +234,6 @@ define double @fround_d_3(double %a) nounwind { declare double @ceil(double) nounwind readnone - define double @fround_d_4(double %a) nounwind { ; CHECK-LABEL: fround_d_4: ; CHECK: # %bb.0: @@ -252,7 +245,6 @@ define double @fround_d_4(double %a) nounwind { declare double @trunc(double) nounwind readnone - define double @fround_d_5(double %a) nounwind { ; CHECK-LABEL: fround_d_5: ; CHECK: # %bb.0: @@ -273,9 +265,6 @@ define double @fround_d_6(double %a) nounwind { ret double %call } -declare double @llvm.roundeven.f64(double) nounwind readnone - - define double @froundnx_d(double %a) nounwind { ; CHECK-LABEL: froundnx_d: ; CHECK: # %bb.0: @@ -287,8 +276,6 @@ define double @froundnx_d(double %a) nounwind { declare double @rint(double) nounwind readnone -declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata) - define i32 @fcmp_olt_q(double %a, double %b) nounwind strictfp { ; CHECK-LABEL: fcmp_olt_q: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/double_reduct.ll b/llvm/test/CodeGen/RISCV/double_reduct.ll index cecdd77a079e4..6f25892fce20f 100644 --- a/llvm/test/CodeGen/RISCV/double_reduct.ll +++ b/llvm/test/CodeGen/RISCV/double_reduct.ll @@ -69,7 +69,6 @@ define float @fmax_f32(<4 x float> %a, <4 x float> %b) { ret float %r } - define i32 @add_i32(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: add_i32: ; CHECK: # %bb.0: @@ -261,24 +260,3 @@ define i32 @smax_i32(<4 x i32> %a, <4 x i32> %b) { ret i32 %r } -declare float @llvm.vector.reduce.fadd.f32.v4f32(float, <4 x float>) -declare float @llvm.vector.reduce.fmul.f32.v4f32(float, <4 x float>) -declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>) -declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>) -declare i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32>) -declare i16 @llvm.vector.reduce.add.i16.v32i16(<32 x i16>) -declare i16 @llvm.vector.reduce.add.i16.v16i16(<16 x i16>) -declare i32 @llvm.vector.reduce.mul.i32.v4i32(<4 x i32>) -declare i32 @llvm.vector.reduce.and.i32.v4i32(<4 x i32>) -declare i32 @llvm.vector.reduce.or.i32.v4i32(<4 x i32>) -declare i32 @llvm.vector.reduce.xor.i32.v4i32(<4 x i32>) -declare i32 @llvm.vector.reduce.umin.i32.v4i32(<4 x i32>) -declare i32 @llvm.vector.reduce.umax.i32.v4i32(<4 x i32>) -declare i32 @llvm.vector.reduce.smin.i32.v4i32(<4 x i32>) -declare i32 @llvm.vector.reduce.smax.i32.v4i32(<4 x i32>) -declare float @llvm.minnum.f32(float, float) -declare float @llvm.maxnum.f32(float, float) -declare i32 @llvm.umin.i32(i32, i32) -declare i32 @llvm.umax.i32(i32, i32) -declare i32 @llvm.smin.i32(i32, i32) -declare i32 @llvm.smax.i32(i32, i32) diff --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll index 85867a4ab2c6f..d785e4c4ac29e 100644 --- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll +++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll @@ -95,16 +95,3 @@ entry: ret void } -declare @llvm.riscv.vle.nxv8i16.i64(, ptr nocapture, i64) - -declare @llvm.riscv.vle.nxv8i8.i64(, ptr nocapture, i64) - -declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) - -declare @llvm.riscv.vmsbc.nxv8i16.i16.i64(, i16, i64) - -declare @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(, , , i64, i64 immarg) - -declare target("riscv.vector.tuple", , 4) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), , i32) - -declare void @llvm.riscv.vsseg4.nxv8i16.i64(target("riscv.vector.tuple", , 4), ptr nocapture, i64, i64) diff --git a/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll b/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll index 62dd3fe1e2f30..202451b698c48 100644 --- a/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll +++ b/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll @@ -38,4 +38,3 @@ entry: declare void @foo(ptr) -declare ptr @llvm.eh.dwarf.cfa(i32) nounwind diff --git a/llvm/test/CodeGen/RISCV/fixed-csr.ll b/llvm/test/CodeGen/RISCV/fixed-csr.ll index f39085132e4a2..406c2afcc63d9 100644 --- a/llvm/test/CodeGen/RISCV/fixed-csr.ll +++ b/llvm/test/CodeGen/RISCV/fixed-csr.ll @@ -11,8 +11,6 @@ define noundef signext i32 @foo() { ret i32 0 } -declare void @llvm.write_register.i64(metadata, i64) - define noundef signext i32 @bar() nounwind { ; CHECK-LABEL: bar: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/float-arith-strict.ll b/llvm/test/CodeGen/RISCV/float-arith-strict.ll index 90ce034eafd3b..6a47c3f3c3926 100644 --- a/llvm/test/CodeGen/RISCV/float-arith-strict.ll +++ b/llvm/test/CodeGen/RISCV/float-arith-strict.ll @@ -47,7 +47,6 @@ define float @fadd_s(float %a, float %b) nounwind strictfp { %1 = call float @llvm.experimental.constrained.fadd.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata) define float @fsub_s(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fsub_s: @@ -80,7 +79,6 @@ define float @fsub_s(float %a, float %b) nounwind strictfp { %1 = call float @llvm.experimental.constrained.fsub.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata) define float @fmul_s(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fmul_s: @@ -113,7 +111,6 @@ define float @fmul_s(float %a, float %b) nounwind strictfp { %1 = call float @llvm.experimental.constrained.fmul.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata) define float @fdiv_s(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fdiv_s: @@ -146,7 +143,6 @@ define float @fdiv_s(float %a, float %b) nounwind strictfp { %1 = call float @llvm.experimental.constrained.fdiv.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata) define float @fsqrt_s(float %a) nounwind strictfp { ; CHECKIF-LABEL: fsqrt_s: @@ -179,7 +175,6 @@ define float @fsqrt_s(float %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.sqrt.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata) define float @fmin_s(float %a, float %b) nounwind strictfp { ; RV32IF-LABEL: fmin_s: @@ -238,7 +233,6 @@ define float @fmin_s(float %a, float %b) nounwind strictfp { %1 = call float @llvm.experimental.constrained.minnum.f32(float %a, float %b, metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata) strictfp define float @fmax_s(float %a, float %b) nounwind strictfp { ; RV32IF-LABEL: fmax_s: @@ -297,7 +291,6 @@ define float @fmax_s(float %a, float %b) nounwind strictfp { %1 = call float @llvm.experimental.constrained.maxnum.f32(float %a, float %b, metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata) strictfp define float @fmadd_s(float %a, float %b, float %c) nounwind strictfp { ; CHECKIF-LABEL: fmadd_s: @@ -330,7 +323,6 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind strictfp { %1 = call float @llvm.experimental.constrained.fma.f32(float %a, float %b, float %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata) strictfp define float @fmsub_s(float %a, float %b, float %c) nounwind strictfp { ; CHECKIF-LABEL: fmsub_s: diff --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll index 95f1fc6899206..af9e996fa2ef1 100644 --- a/llvm/test/CodeGen/RISCV/float-arith.ll +++ b/llvm/test/CodeGen/RISCV/float-arith.ll @@ -145,8 +145,6 @@ define float @fdiv_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.sqrt.f32(float) - define float @fsqrt_s(float %a) nounwind { ; CHECKIF-LABEL: fsqrt_s: ; CHECKIF: # %bb.0: @@ -179,8 +177,6 @@ define float @fsqrt_s(float %a) nounwind { ret float %1 } -declare float @llvm.copysign.f32(float, float) - define float @fsgnj_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fsgnj_s: ; CHECKIF: # %bb.0: @@ -316,8 +312,6 @@ define float @fsgnjn_s(float %a, float %b) nounwind { ret float %3 } -declare float @llvm.fabs.f32(float) - define float @fabs_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fabs_s: ; CHECKIF: # %bb.0: @@ -364,8 +358,6 @@ define float @fabs_s(float %a, float %b) nounwind { ret float %3 } -declare float @llvm.minnum.f32(float, float) - define float @fmin_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fmin_s: ; CHECKIF: # %bb.0: @@ -398,8 +390,6 @@ define float @fmin_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.maxnum.f32(float, float) - define float @fmax_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fmax_s: ; CHECKIF: # %bb.0: @@ -432,8 +422,6 @@ define float @fmax_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.fma.f32(float, float, float) - define float @fmadd_s(float %a, float %b, float %c) nounwind { ; CHECKIF-LABEL: fmadd_s: ; CHECKIF: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll index aaeb1b7c0b1fb..ff2eab615a87e 100644 --- a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll @@ -56,8 +56,6 @@ define float @fneg(float %a) nounwind { ret float %1 } -declare float @llvm.fabs.f32(float) - define float @fabs(float %a) nounwind { ; RV32I-LABEL: fabs: ; RV32I: # %bb.0: @@ -96,8 +94,6 @@ define float @fabs(float %a) nounwind { ret float %1 } -declare float @llvm.copysign.f32(float, float) - ; DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN will convert to bitwise ; operations if floating point isn't supported. A combine could be written to ; do the same even when f32 is legal. diff --git a/llvm/test/CodeGen/RISCV/float-convert-strict.ll b/llvm/test/CodeGen/RISCV/float-convert-strict.ll index 1b25a2b64f4d3..8daaf83c0f3c0 100644 --- a/llvm/test/CodeGen/RISCV/float-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/float-convert-strict.ll @@ -51,7 +51,6 @@ define i32 @fcvt_w_s(float %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata) define i32 @fcvt_wu_s(float %a) nounwind strictfp { ; CHECKIF-LABEL: fcvt_wu_s: @@ -84,7 +83,6 @@ define i32 @fcvt_wu_s(float %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata) ; Test where the fptoui has multiple uses, one of which causes a sext to be ; inserted on RV64. @@ -162,7 +160,6 @@ define float @fcvt_s_w(i32 %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata) define float @fcvt_s_w_load(ptr %p) nounwind strictfp { ; CHECKIF-LABEL: fcvt_s_w_load: @@ -233,7 +230,6 @@ define float @fcvt_s_wu(i32 %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata, metadata) define float @fcvt_s_wu_load(ptr %p) nounwind strictfp { ; CHECKIF-LABEL: fcvt_s_wu_load: @@ -321,7 +317,6 @@ define i64 @fcvt_l_s(float %a) nounwind strictfp { %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %a, metadata !"fpexcept.strict") ret i64 %1 } -declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata) define i64 @fcvt_lu_s(float %a) nounwind strictfp { ; RV32IF-LABEL: fcvt_lu_s: @@ -372,7 +367,6 @@ define i64 @fcvt_lu_s(float %a) nounwind strictfp { %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %a, metadata !"fpexcept.strict") ret i64 %1 } -declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata) define float @fcvt_s_l(i64 %a) nounwind strictfp { ; RV32IF-LABEL: fcvt_s_l: @@ -423,7 +417,6 @@ define float @fcvt_s_l(i64 %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata) define float @fcvt_s_lu(i64 %a) nounwind strictfp { ; RV32IF-LABEL: fcvt_s_lu: @@ -474,7 +467,6 @@ define float @fcvt_s_lu(i64 %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata) define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp { ; CHECKIF-LABEL: fcvt_s_w_i8: @@ -507,7 +499,6 @@ define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.sitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.sitofp.f32.i8(i8, metadata, metadata) define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind strictfp { ; CHECKIF-LABEL: fcvt_s_wu_i8: @@ -540,7 +531,6 @@ define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.uitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.uitofp.f32.i8(i8, metadata, metadata) define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp { ; CHECKIF-LABEL: fcvt_s_w_i16: @@ -573,7 +563,6 @@ define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.sitofp.f32.i16(i16, metadata, metadata) define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp { ; CHECKIF-LABEL: fcvt_s_wu_i16: @@ -606,7 +595,6 @@ define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.uitofp.f32.i16(i16, metadata, metadata) ; Make sure we select W version of addi on RV64. define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp { diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll index e6e4f6642f685..4a637bf4ae327 100644 --- a/llvm/test/CodeGen/RISCV/float-convert.ll +++ b/llvm/test/CodeGen/RISCV/float-convert.ll @@ -150,7 +150,6 @@ start: %0 = tail call i32 @llvm.fptosi.sat.i32.f32(float %a) ret i32 %0 } -declare i32 @llvm.fptosi.sat.i32.f32(float) define i32 @fcvt_wu_s(float %a) nounwind { ; CHECKIF-LABEL: fcvt_wu_s: @@ -334,7 +333,6 @@ start: %0 = tail call i32 @llvm.fptoui.sat.i32.f32(float %a) ret i32 %0 } -declare i32 @llvm.fptoui.sat.i32.f32(float) define signext i32 @fmv_x_w(float %a, float %b) nounwind { ; CHECKIF-LABEL: fmv_x_w: @@ -801,7 +799,6 @@ start: %0 = tail call i64 @llvm.fptosi.sat.i64.f32(float %a) ret i64 %0 } -declare i64 @llvm.fptosi.sat.i64.f32(float) define i64 @fcvt_lu_s(float %a) nounwind { ; RV32IF-LABEL: fcvt_lu_s: @@ -989,7 +986,6 @@ start: %0 = tail call i64 @llvm.fptoui.sat.i64.f32(float %a) ret i64 %0 } -declare i64 @llvm.fptoui.sat.i64.f32(float) define float @fcvt_s_l(i64 %a) nounwind { ; RV32IF-LABEL: fcvt_s_l: @@ -1547,7 +1543,6 @@ start: %0 = tail call i16 @llvm.fptosi.sat.i16.f32(float %a) ret i16 %0 } -declare i16 @llvm.fptosi.sat.i16.f32(float) define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind { ; RV32IF-LABEL: fcvt_wu_s_i16: @@ -1709,7 +1704,6 @@ start: %0 = tail call i16 @llvm.fptoui.sat.i16.f32(float %a) ret i16 %0 } -declare i16 @llvm.fptoui.sat.i16.f32(float) define signext i8 @fcvt_w_s_i8(float %a) nounwind { ; RV32IF-LABEL: fcvt_w_s_i8: @@ -1889,7 +1883,6 @@ start: %0 = tail call i8 @llvm.fptosi.sat.i8.f32(float %a) ret i8 %0 } -declare i8 @llvm.fptosi.sat.i8.f32(float) define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind { ; RV32IF-LABEL: fcvt_wu_s_i8: @@ -2041,7 +2034,6 @@ start: %0 = tail call i8 @llvm.fptoui.sat.i8.f32(float %a) ret i8 %0 } -declare i8 @llvm.fptoui.sat.i8.f32(float) define zeroext i32 @fcvt_wu_s_sat_zext(float %a) nounwind { ; RV32IF-LABEL: fcvt_wu_s_sat_zext: diff --git a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll index 7cdd1826b4522..0334d2556cd9a 100644 --- a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll +++ b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll @@ -50,7 +50,6 @@ define i32 @fcmp_oeq(float %a, float %b) nounwind strictfp { %2 = zext i1 %1 to i32 ret i32 %2 } -declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata) define i32 @fcmp_ogt(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmp_ogt: @@ -717,7 +716,6 @@ define i32 @fcmps_oeq(float %a, float %b) nounwind strictfp { %2 = zext i1 %1 to i32 ret i32 %2 } -declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata) define i32 @fcmps_ogt(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmps_ogt: diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll index 8b883f781c9d9..3a4acfd8a41ee 100644 --- a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll +++ b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll @@ -18,8 +18,6 @@ ; RUN: -verify-machineinstrs -disable-strictnode-mutation \ ; RUN: | FileCheck -check-prefix=RV64I %s -declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata) - define float @sqrt_f32(float %a) nounwind strictfp { ; CHECKIF-LABEL: sqrt_f32: ; CHECKIF: # %bb.0: @@ -52,8 +50,6 @@ define float @sqrt_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata) - define float @powi_f32(float %a, i32 %b) nounwind strictfp { ; RV32IF-LABEL: powi_f32: ; RV32IF: # %bb.0: @@ -115,8 +111,6 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata) - define float @sin_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: sin_f32: ; RV32IF: # %bb.0: @@ -175,8 +169,6 @@ define float @sin_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.cos.f32(float, metadata, metadata) - define float @cos_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: cos_f32: ; RV32IF: # %bb.0: @@ -354,8 +346,6 @@ define float @sincos_f32(float %a) nounwind strictfp { ret float %3 } -declare float @llvm.experimental.constrained.tan.f32(float, metadata, metadata) - define float @tan_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: tan_f32: ; RV32IF: # %bb.0: @@ -588,8 +578,6 @@ define float @atan_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.atan2.f32(float, float, metadata, metadata) - define float @atan2_f32(float %a, float %b) nounwind strictfp { ; RV32IF-LABEL: atan2_f32: ; RV32IF: # %bb.0: @@ -822,8 +810,6 @@ define float @tanh_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.pow.f32(float, float, metadata, metadata) - define float @pow_f32(float %a, float %b) nounwind strictfp { ; RV32IF-LABEL: pow_f32: ; RV32IF: # %bb.0: @@ -882,8 +868,6 @@ define float @pow_f32(float %a, float %b) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.exp.f32(float, metadata, metadata) - define float @exp_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: exp_f32: ; RV32IF: # %bb.0: @@ -942,8 +926,6 @@ define float @exp_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.exp2.f32(float, metadata, metadata) - define float @exp2_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: exp2_f32: ; RV32IF: # %bb.0: @@ -1002,8 +984,6 @@ define float @exp2_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.log.f32(float, metadata, metadata) - define float @log_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: log_f32: ; RV32IF: # %bb.0: @@ -1062,8 +1042,6 @@ define float @log_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.log10.f32(float, metadata, metadata) - define float @log10_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: log10_f32: ; RV32IF: # %bb.0: @@ -1122,8 +1100,6 @@ define float @log10_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.log2.f32(float, metadata, metadata) - define float @log2_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: log2_f32: ; RV32IF: # %bb.0: @@ -1182,8 +1158,6 @@ define float @log2_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata) - define float @fma_f32(float %a, float %b, float %c) nounwind strictfp { ; CHECKIF-LABEL: fma_f32: ; CHECKIF: # %bb.0: @@ -1216,8 +1190,6 @@ define float @fma_f32(float %a, float %b, float %c) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.fmuladd.f32(float, float, float, metadata, metadata) - define float @fmuladd_f32(float %a, float %b, float %c) nounwind strictfp { ; CHECKIF-LABEL: fmuladd_f32: ; CHECKIF: # %bb.0: @@ -1260,8 +1232,6 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata) - define float @minnum_f32(float %a, float %b) nounwind strictfp { ; RV32IF-LABEL: minnum_f32: ; RV32IF: # %bb.0: @@ -1320,8 +1290,6 @@ define float @minnum_f32(float %a, float %b) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata) - define float @maxnum_f32(float %a, float %b) nounwind strictfp { ; RV32IF-LABEL: maxnum_f32: ; RV32IF: # %bb.0: @@ -1397,8 +1365,6 @@ define float @maxnum_f32(float %a, float %b) nounwind strictfp { ; ret float %1 ; } -declare float @llvm.experimental.constrained.floor.f32(float, metadata) - define float @floor_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: floor_f32: ; RV32IF: # %bb.0: @@ -1457,8 +1423,6 @@ define float @floor_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.ceil.f32(float, metadata) - define float @ceil_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: ceil_f32: ; RV32IF: # %bb.0: @@ -1517,8 +1481,6 @@ define float @ceil_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.trunc.f32(float, metadata) - define float @trunc_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: trunc_f32: ; RV32IF: # %bb.0: @@ -1577,8 +1539,6 @@ define float @trunc_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata) - define float @rint_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: rint_f32: ; RV32IF: # %bb.0: @@ -1637,8 +1597,6 @@ define float @rint_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata) - define float @nearbyint_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: nearbyint_f32: ; RV32IF: # %bb.0: @@ -1697,8 +1655,6 @@ define float @nearbyint_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.round.f32(float, metadata) - define float @round_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: round_f32: ; RV32IF: # %bb.0: @@ -1757,8 +1713,6 @@ define float @round_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.roundeven.f32(float, metadata) - define float @roundeven_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: roundeven_f32: ; RV32IF: # %bb.0: @@ -1817,8 +1771,6 @@ define float @roundeven_f32(float %a) nounwind strictfp { ret float %1 } -declare iXLen @llvm.experimental.constrained.lrint.iXLen.f32(float, metadata, metadata) - define iXLen @lrint_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: lrint_f32: ; RV32IF: # %bb.0: @@ -1861,8 +1813,6 @@ define iXLen @lrint_f32(float %a) nounwind strictfp { ret iXLen %1 } -declare iXLen @llvm.experimental.constrained.lround.iXLen.f32(float, metadata) - define iXLen @lround_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: lround_f32: ; RV32IF: # %bb.0: @@ -1905,8 +1855,6 @@ define iXLen @lround_f32(float %a) nounwind strictfp { ret iXLen %1 } -declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata) - define i64 @llrint_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: llrint_f32: ; RV32IF: # %bb.0: @@ -1957,8 +1905,6 @@ define i64 @llrint_f32(float %a) nounwind strictfp { ret i64 %1 } -declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata) - define i64 @llround_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: llround_f32: ; RV32IF: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll index 5f673ac17d569..069e20da7b908 100644 --- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll @@ -22,8 +22,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 \ ; RUN: -verify-machineinstrs | FileCheck -check-prefix=RV64I %s -declare float @llvm.sqrt.f32(float) - define float @sqrt_f32(float %a) nounwind { ; RV32IF-LABEL: sqrt_f32: ; RV32IF: # %bb.0: @@ -71,8 +69,6 @@ define float @sqrt_f32(float %a) nounwind { ret float %1 } -declare float @llvm.powi.f32.i32(float, i32) - define float @powi_f32(float %a, i32 %b) nounwind { ; RV32IF-LABEL: powi_f32: ; RV32IF: # %bb.0: @@ -134,8 +130,6 @@ define float @powi_f32(float %a, i32 %b) nounwind { ret float %1 } -declare float @llvm.sin.f32(float) - define float @sin_f32(float %a) nounwind { ; RV32IF-LABEL: sin_f32: ; RV32IF: # %bb.0: @@ -178,8 +172,6 @@ define float @sin_f32(float %a) nounwind { ret float %1 } -declare float @llvm.cos.f32(float) - define float @cos_f32(float %a) nounwind { ; RV32IF-LABEL: cos_f32: ; RV32IF: # %bb.0: @@ -359,8 +351,6 @@ define float @sincos_f32(float %a) nounwind { ret float %3 } -declare float @llvm.pow.f32(float, float) - define float @pow_f32(float %a, float %b) nounwind { ; RV32IF-LABEL: pow_f32: ; RV32IF: # %bb.0: @@ -403,8 +393,6 @@ define float @pow_f32(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.exp.f32(float) - define float @exp_f32(float %a) nounwind { ; RV32IF-LABEL: exp_f32: ; RV32IF: # %bb.0: @@ -447,8 +435,6 @@ define float @exp_f32(float %a) nounwind { ret float %1 } -declare float @llvm.exp2.f32(float) - define float @exp2_f32(float %a) nounwind { ; RV32IF-LABEL: exp2_f32: ; RV32IF: # %bb.0: @@ -533,8 +519,6 @@ define float @exp10_f32(float %a) nounwind { ret float %1 } -declare float @llvm.log.f32(float) - define float @log_f32(float %a) nounwind { ; RV32IF-LABEL: log_f32: ; RV32IF: # %bb.0: @@ -577,8 +561,6 @@ define float @log_f32(float %a) nounwind { ret float %1 } -declare float @llvm.log10.f32(float) - define float @log10_f32(float %a) nounwind { ; RV32IF-LABEL: log10_f32: ; RV32IF: # %bb.0: @@ -621,8 +603,6 @@ define float @log10_f32(float %a) nounwind { ret float %1 } -declare float @llvm.log2.f32(float) - define float @log2_f32(float %a) nounwind { ; RV32IF-LABEL: log2_f32: ; RV32IF: # %bb.0: @@ -665,8 +645,6 @@ define float @log2_f32(float %a) nounwind { ret float %1 } -declare float @llvm.fma.f32(float, float, float) - define float @fma_f32(float %a, float %b, float %c) nounwind { ; RV32IF-LABEL: fma_f32: ; RV32IF: # %bb.0: @@ -714,8 +692,6 @@ define float @fma_f32(float %a, float %b, float %c) nounwind { ret float %1 } -declare float @llvm.fmuladd.f32(float, float, float) - define float @fmuladd_f32(float %a, float %b, float %c) nounwind { ; RV32IF-LABEL: fmuladd_f32: ; RV32IF: # %bb.0: @@ -773,8 +749,6 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind { ret float %1 } -declare float @llvm.fabs.f32(float) - define float @fabs_f32(float %a) nounwind { ; RV32IF-LABEL: fabs_f32: ; RV32IF: # %bb.0: @@ -816,8 +790,6 @@ define float @fabs_f32(float %a) nounwind { ret float %1 } -declare float @llvm.minnum.f32(float, float) - define float @minnum_f32(float %a, float %b) nounwind { ; RV32IF-LABEL: minnum_f32: ; RV32IF: # %bb.0: @@ -865,8 +837,6 @@ define float @minnum_f32(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.maxnum.f32(float, float) - define float @maxnum_f32(float %a, float %b) nounwind { ; RV32IF-LABEL: maxnum_f32: ; RV32IF: # %bb.0: @@ -914,8 +884,6 @@ define float @maxnum_f32(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.copysign.f32(float, float) - define float @copysign_f32(float %a, float %b) nounwind { ; RV32IF-LABEL: copysign_f32: ; RV32IF: # %bb.0: @@ -963,8 +931,6 @@ define float @copysign_f32(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.floor.f32(float) - define float @floor_f32(float %a) nounwind { ; RV32IF-LABEL: floor_f32: ; RV32IF: # %bb.0: @@ -1055,8 +1021,6 @@ define float @floor_f32(float %a) nounwind { ret float %1 } -declare float @llvm.ceil.f32(float) - define float @ceil_f32(float %a) nounwind { ; RV32IF-LABEL: ceil_f32: ; RV32IF: # %bb.0: @@ -1147,8 +1111,6 @@ define float @ceil_f32(float %a) nounwind { ret float %1 } -declare float @llvm.trunc.f32(float) - define float @trunc_f32(float %a) nounwind { ; RV32IF-LABEL: trunc_f32: ; RV32IF: # %bb.0: @@ -1239,8 +1201,6 @@ define float @trunc_f32(float %a) nounwind { ret float %1 } -declare float @llvm.rint.f32(float) - define float @rint_f32(float %a) nounwind { ; RV32IF-LABEL: rint_f32: ; RV32IF: # %bb.0: @@ -1331,8 +1291,6 @@ define float @rint_f32(float %a) nounwind { ret float %1 } -declare float @llvm.nearbyint.f32(float) - define float @nearbyint_f32(float %a) nounwind { ; RV32IF-LABEL: nearbyint_f32: ; RV32IF: # %bb.0: @@ -1375,8 +1333,6 @@ define float @nearbyint_f32(float %a) nounwind { ret float %1 } -declare float @llvm.round.f32(float) - define float @round_f32(float %a) nounwind { ; RV32IF-LABEL: round_f32: ; RV32IF: # %bb.0: @@ -1467,8 +1423,6 @@ define float @round_f32(float %a) nounwind { ret float %1 } -declare float @llvm.roundeven.f32(float) - define float @roundeven_f32(float %a) nounwind { ; RV32IF-LABEL: roundeven_f32: ; RV32IF: # %bb.0: @@ -1559,8 +1513,6 @@ define float @roundeven_f32(float %a) nounwind { ret float %1 } -declare iXLen @llvm.lrint.iXLen.f32(float) - define iXLen @lrint_f32(float %a) nounwind { ; RV32IF-LABEL: lrint_f32: ; RV32IF: # %bb.0: @@ -1608,9 +1560,6 @@ define iXLen @lrint_f32(float %a) nounwind { ret iXLen %1 } -declare i32 @llvm.lround.i32.f32(float) -declare i64 @llvm.lround.i64.f32(float) - define iXLen @lround_f32(float %a) nounwind { ; RV32IF-LABEL: lround_f32: ; RV32IF: # %bb.0: @@ -1707,8 +1656,6 @@ define i32 @lround_i32_f32(float %a) nounwind { ret i32 %1 } -declare i64 @llvm.llrint.i64.f32(float) - define i64 @llrint_f32(float %a) nounwind { ; RV32IF-LABEL: llrint_f32: ; RV32IF: # %bb.0: @@ -1764,8 +1711,6 @@ define i64 @llrint_f32(float %a) nounwind { ret i64 %1 } -declare i64 @llvm.llround.i64.f32(float) - define i64 @llround_f32(float %a) nounwind { ; RV32IF-LABEL: llround_f32: ; RV32IF: # %bb.0: @@ -1821,7 +1766,6 @@ define i64 @llround_f32(float %a) nounwind { ret i64 %1 } -declare i1 @llvm.is.fpclass.f32(float, i32) define i1 @fpclass(float %x) { ; RV32IF-LABEL: fpclass: ; RV32IF: # %bb.0: @@ -2505,8 +2449,6 @@ define float @tan_f32(float %a) nounwind { ret float %1 } -declare float @llvm.maximumnum.f32(float, float) - define float @maximumnum_float(float %x, float %y) { ; RV32IF-LABEL: maximumnum_float: ; RV32IF: # %bb.0: @@ -2562,8 +2504,6 @@ define float @maximumnum_float(float %x, float %y) { ret float %z } -declare float @llvm.minimumnum.f32(float, float) - define float @minimumnum_float(float %x, float %y) { ; RV32IF-LABEL: minimumnum_float: ; RV32IF: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/float-maximum-minimum.ll b/llvm/test/CodeGen/RISCV/float-maximum-minimum.ll index 2e9f8cbf6d2ef..806200c3f0b8e 100644 --- a/llvm/test/CodeGen/RISCV/float-maximum-minimum.ll +++ b/llvm/test/CodeGen/RISCV/float-maximum-minimum.ll @@ -24,8 +24,6 @@ ; RUN: -verify-machineinstrs -target-abi=lp64 \ ; RUN: | FileCheck -check-prefix=RV64I %s -declare float @llvm.minimum.f32(float, float) - define float @fminimum_f32(float %a, float %b) nounwind { ; RV32IF-LABEL: fminimum_f32: ; RV32IF: # %bb.0: @@ -124,8 +122,6 @@ define float @fminimum_f32(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.maximum.f32(float, float) - define float @fmaximum_f32(float %a, float %b) nounwind { ; RV32IF-LABEL: fmaximum_f32: ; RV32IF: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll index 6871f29cb8b05..33fc51363cb56 100644 --- a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll +++ b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll @@ -1568,13 +1568,3 @@ define i64 @test_rint_ui64(float %x) nounwind { ret i64 %b } -declare float @llvm.floor.f32(float) -declare float @llvm.ceil.f32(float) -declare float @llvm.trunc.f32(float) -declare float @llvm.round.f32(float) -declare float @llvm.roundeven.f32(float) -declare float @llvm.rint.f32(float) -declare i32 @llvm.fptosi.sat.i32.f32(float) -declare i64 @llvm.fptosi.sat.i64.f32(float) -declare i32 @llvm.fptoui.sat.i32.f32(float) -declare i64 @llvm.fptoui.sat.i64.f32(float) diff --git a/llvm/test/CodeGen/RISCV/float-round-conv.ll b/llvm/test/CodeGen/RISCV/float-round-conv.ll index 837ff766b430f..f71eaec7ab8c7 100644 --- a/llvm/test/CodeGen/RISCV/float-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/float-round-conv.ll @@ -1753,8 +1753,3 @@ define float @test_roundeven_float(float %x) { ret float %a } -declare float @llvm.floor.f32(float) -declare float @llvm.ceil.f32(float) -declare float @llvm.trunc.f32(float) -declare float @llvm.round.f32(float) -declare float @llvm.roundeven.f32(float) diff --git a/llvm/test/CodeGen/RISCV/float-select-verify.ll b/llvm/test/CodeGen/RISCV/float-select-verify.ll index 2d5d6d7cb4825..bebbf2fae5226 100644 --- a/llvm/test/CodeGen/RISCV/float-select-verify.ll +++ b/llvm/test/CodeGen/RISCV/float-select-verify.ll @@ -87,4 +87,3 @@ declare void @foo(i64) declare void @bar(float) -declare float @llvm.round.f32(float) diff --git a/llvm/test/CodeGen/RISCV/float-zfa.ll b/llvm/test/CodeGen/RISCV/float-zfa.ll index aec5ac75a9795..7be0d998f38c3 100644 --- a/llvm/test/CodeGen/RISCV/float-zfa.ll +++ b/llvm/test/CodeGen/RISCV/float-zfa.ll @@ -116,8 +116,6 @@ define float @loadfpimm13() { ret float 0xb810000000000000 } -declare float @llvm.minimum.f32(float, float) - define float @fminm_s(float %a, float %b) nounwind { ; CHECK-LABEL: fminm_s: ; CHECK: # %bb.0: @@ -127,8 +125,6 @@ define float @fminm_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.maximum.f32(float, float) - define float @fmaxm_s(float %a, float %b) nounwind { ; CHECK-LABEL: fmaxm_s: ; CHECK: # %bb.0: @@ -138,7 +134,6 @@ define float @fmaxm_s(float %a, float %b) nounwind { ret float %1 } - define float @fround_s_1(float %a) nounwind { ; CHECK-LABEL: fround_s_1: ; CHECK: # %bb.0: @@ -150,7 +145,6 @@ define float @fround_s_1(float %a) nounwind { declare float @roundf(float) nounwind readnone - define float @fround_s_2(float %a) nounwind { ; CHECK-LABEL: fround_s_2: ; CHECK: # %bb.0: @@ -162,7 +156,6 @@ define float @fround_s_2(float %a) nounwind { declare float @floorf(float) nounwind readnone - define float @fround_s_3(float %a) nounwind { ; CHECK-LABEL: fround_s_3: ; CHECK: # %bb.0: @@ -174,7 +167,6 @@ define float @fround_s_3(float %a) nounwind { declare float @ceilf(float) nounwind readnone - define float @fround_s_4(float %a) nounwind { ; CHECK-LABEL: fround_s_4: ; CHECK: # %bb.0: @@ -186,7 +178,6 @@ define float @fround_s_4(float %a) nounwind { declare float @truncf(float) nounwind readnone - define float @fround_s_5(float %a) nounwind { ; CHECK-LABEL: fround_s_5: ; CHECK: # %bb.0: @@ -207,9 +198,6 @@ define float @fround_s_6(float %a) nounwind { ret float %call } -declare float @llvm.roundeven.f32(float) nounwind readnone - - define float @froundnx_s(float %a) nounwind { ; CHECK-LABEL: froundnx_s: ; CHECK: # %bb.0: @@ -221,8 +209,6 @@ define float @froundnx_s(float %a) nounwind { declare float @rintf(float) nounwind readnone -declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata) - define i32 @fcmp_olt_q(float %a, float %b) nounwind strictfp { ; CHECK-LABEL: fcmp_olt_q: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/flt-rounds.ll b/llvm/test/CodeGen/RISCV/flt-rounds.ll index 4456c36cfb5de..df72a08117a5d 100644 --- a/llvm/test/CodeGen/RISCV/flt-rounds.ll +++ b/llvm/test/CodeGen/RISCV/flt-rounds.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64I %s -declare i32 @llvm.get.rounding() - define i32 @test_flt_rounds() nounwind { ; RV32I-LABEL: test_flt_rounds: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/fmax-fmin.ll b/llvm/test/CodeGen/RISCV/fmax-fmin.ll index 9d5729802a0ff..3b5010551005a 100644 --- a/llvm/test/CodeGen/RISCV/fmax-fmin.ll +++ b/llvm/test/CodeGen/RISCV/fmax-fmin.ll @@ -300,7 +300,3 @@ define double @minnum_f64_fast(double %x, double %y) nounwind { ret double %r } -declare float @llvm.maxnum.f32(float, float) -declare double @llvm.maxnum.f64(double, double) -declare float @llvm.minnum.f32(float, float) -declare double @llvm.minnum.f64(double, double) diff --git a/llvm/test/CodeGen/RISCV/fp-fcanonicalize.ll b/llvm/test/CodeGen/RISCV/fp-fcanonicalize.ll index e9b771a0698de..1f55a474484eb 100644 --- a/llvm/test/CodeGen/RISCV/fp-fcanonicalize.ll +++ b/llvm/test/CodeGen/RISCV/fp-fcanonicalize.ll @@ -5,10 +5,6 @@ ; RUN: llc --mtriple=riscv32 --mattr=+d,+zfh < %s | FileCheck %s --check-prefixes=CHECK,CHECK-FP16-RV32 ; RUN: llc --mtriple=riscv32 --mattr=+d,-zfh < %s | FileCheck %s --check-prefixes=CHECK,CHECK-NOFP16-RV32 -declare half @llvm.fcanonicalize.f16(half) -declare float @llvm.fcanonicalize.f32(float) -declare double @llvm.fcanonicalize.f64(double) - define half @fcanonicalize_f16(half %x) { ; CHECK-FP16-RV64-LABEL: fcanonicalize_f16: ; CHECK-FP16-RV64: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll index a0d1ecce74e04..a724556e553d5 100644 --- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll +++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll @@ -1987,9 +1987,6 @@ entry: ret i64 %conv6 } - - - ; i32 saturate define i32 @stest_f64i32_mm(double %x) { @@ -3875,12 +3872,3 @@ define i32 @ustest_f16i32_nsat(half %x) { ret i32 %spec.store.select7 } -declare i32 @llvm.smin.i32(i32, i32) -declare i32 @llvm.smax.i32(i32, i32) -declare i32 @llvm.umin.i32(i32, i32) -declare i64 @llvm.smin.i64(i64, i64) -declare i64 @llvm.smax.i64(i64, i64) -declare i64 @llvm.umin.i64(i64, i64) -declare i128 @llvm.smin.i128(i128, i128) -declare i128 @llvm.smax.i128(i128, i128) -declare i128 @llvm.umin.i128(i128, i128) diff --git a/llvm/test/CodeGen/RISCV/fpenv.ll b/llvm/test/CodeGen/RISCV/fpenv.ll index b4a1400dbd547..d241f114716ab 100644 --- a/llvm/test/CodeGen/RISCV/fpenv.ll +++ b/llvm/test/CodeGen/RISCV/fpenv.ll @@ -214,7 +214,5 @@ define void @func_07() { attributes #0 = { strictfp } -declare void @llvm.set.rounding(i32) -declare i32 @llvm.get.rounding() declare i32 @fesetround(i32 noundef) diff --git a/llvm/test/CodeGen/RISCV/frame.ll b/llvm/test/CodeGen/RISCV/frame.ll index 10d542496e0f7..799d337e7a452 100644 --- a/llvm/test/CodeGen/RISCV/frame.ll +++ b/llvm/test/CodeGen/RISCV/frame.ll @@ -48,6 +48,4 @@ define i32 @test() nounwind { ret i32 0 } -declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) - declare void @test1(ptr) diff --git a/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll b/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll index 478c8457997ae..fdc650e8819a2 100644 --- a/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll +++ b/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll @@ -5,8 +5,6 @@ ; RUN: | FileCheck -check-prefix=RV64I %s declare void @notdead(ptr) -declare ptr @llvm.frameaddress(i32) -declare ptr @llvm.returnaddress(i32) define ptr @test_frameaddress_0() nounwind { ; RV32I-LABEL: test_frameaddress_0: diff --git a/llvm/test/CodeGen/RISCV/frm-dependency.ll b/llvm/test/CodeGen/RISCV/frm-dependency.ll index a596c34ef9123..a2abd46267319 100644 --- a/llvm/test/CodeGen/RISCV/frm-dependency.ll +++ b/llvm/test/CodeGen/RISCV/frm-dependency.ll @@ -30,8 +30,6 @@ define float @fadd_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.fma.f32(float, float, float) - define float @fmadd_s(float %a, float %b, float %c) nounwind { ; RV32IF-LABEL: name: fmadd_s ; RV32IF: bb.0 (%ir-block.0): diff --git a/llvm/test/CodeGen/RISCV/get-register-invalid.ll b/llvm/test/CodeGen/RISCV/get-register-invalid.ll index a86de3e8868f4..48e2c5522f2ec 100644 --- a/llvm/test/CodeGen/RISCV/get-register-invalid.ll +++ b/llvm/test/CodeGen/RISCV/get-register-invalid.ll @@ -7,6 +7,4 @@ entry: ret i32 %reg } -declare i32 @llvm.read_register.i32(metadata) nounwind - !0 = !{!"notareg\00"} diff --git a/llvm/test/CodeGen/RISCV/get-register-noreserve.ll b/llvm/test/CodeGen/RISCV/get-register-noreserve.ll index 211ee0ea602b3..99248a9e3a798 100644 --- a/llvm/test/CodeGen/RISCV/get-register-noreserve.ll +++ b/llvm/test/CodeGen/RISCV/get-register-noreserve.ll @@ -41,10 +41,6 @@ entry: ret i32 %sp } - -declare i32 @llvm.read_register.i32(metadata) nounwind -declare void @llvm.write_register.i32(metadata, i32) nounwind - !0 = !{!"sp\00"} !1 = !{!"x4\00"} !2 = !{!"vlenb"} diff --git a/llvm/test/CodeGen/RISCV/get-register-reserve.ll b/llvm/test/CodeGen/RISCV/get-register-reserve.ll index 7549b4dd3f682..cce36240d5681 100644 --- a/llvm/test/CodeGen/RISCV/get-register-reserve.ll +++ b/llvm/test/CodeGen/RISCV/get-register-reserve.ll @@ -28,7 +28,5 @@ entry: ret i32 %fp } -declare i32 @llvm.read_register.i32(metadata) nounwind - !0 = !{!"a1\00"} !1 = !{!"fp\00"} diff --git a/llvm/test/CodeGen/RISCV/half-arith-strict.ll b/llvm/test/CodeGen/RISCV/half-arith-strict.ll index 74e7f8bdc565f..91e70145c316c 100644 --- a/llvm/test/CodeGen/RISCV/half-arith-strict.ll +++ b/llvm/test/CodeGen/RISCV/half-arith-strict.ll @@ -54,7 +54,6 @@ define half @fadd_h(half %a, half %b) nounwind strictfp { %1 = call half @llvm.experimental.constrained.fadd.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } -declare half @llvm.experimental.constrained.fadd.f16(half, half, metadata, metadata) define half @fsub_h(half %a, half %b) nounwind strictfp { ; CHECK-LABEL: fsub_h: @@ -85,7 +84,6 @@ define half @fsub_h(half %a, half %b) nounwind strictfp { %1 = call half @llvm.experimental.constrained.fsub.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } -declare half @llvm.experimental.constrained.fsub.f16(half, half, metadata, metadata) define half @fmul_h(half %a, half %b) nounwind strictfp { ; CHECK-LABEL: fmul_h: @@ -116,7 +114,6 @@ define half @fmul_h(half %a, half %b) nounwind strictfp { %1 = call half @llvm.experimental.constrained.fmul.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } -declare half @llvm.experimental.constrained.fmul.f16(half, half, metadata, metadata) define half @fdiv_h(half %a, half %b) nounwind strictfp { ; CHECK-LABEL: fdiv_h: @@ -147,7 +144,6 @@ define half @fdiv_h(half %a, half %b) nounwind strictfp { %1 = call half @llvm.experimental.constrained.fdiv.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } -declare half @llvm.experimental.constrained.fdiv.f16(half, half, metadata, metadata) define half @fsqrt_h(half %a) nounwind strictfp { ; CHECK-LABEL: fsqrt_h: @@ -176,7 +172,6 @@ define half @fsqrt_h(half %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.sqrt.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } -declare half @llvm.experimental.constrained.sqrt.f16(half, metadata, metadata) ; FIXME: fminnum/fmaxnum need libcalls to handle SNaN, but we don't have f16 ; libcalls and don't support promotion yet. @@ -223,7 +218,6 @@ define half @fmadd_h(half %a, half %b, half %c) nounwind strictfp { %1 = call half @llvm.experimental.constrained.fma.f16(half %a, half %b, half %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } -declare half @llvm.experimental.constrained.fma.f16(half, half, half, metadata, metadata) strictfp define half @fmsub_h(half %a, half %b, half %c) nounwind strictfp { ; CHECK-LABEL: fmsub_h: diff --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll index d089e3678756c..e1eb860d26591 100644 --- a/llvm/test/CodeGen/RISCV/half-arith.ll +++ b/llvm/test/CodeGen/RISCV/half-arith.ll @@ -353,8 +353,6 @@ define half @fdiv_h(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.sqrt.f16(half) - define half @fsqrt_h(half %a) nounwind { ; CHECKIZFH-LABEL: fsqrt_h: ; CHECKIZFH: # %bb.0: @@ -409,8 +407,6 @@ define half @fsqrt_h(half %a) nounwind { ret half %1 } -declare half @llvm.copysign.f16(half, half) - define half @fsgnj_h(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: fsgnj_h: ; CHECKIZFH: # %bb.0: @@ -760,8 +756,6 @@ define half @fsgnjn_h(half %a, half %b) nounwind { ret half %3 } -declare half @llvm.fabs.f16(half) - ; This function performs extra work to ensure that ; DAGCombiner::visitBITCAST doesn't replace the fabs with an and. define half @fabs_h(half %a, half %b) nounwind { @@ -916,8 +910,6 @@ define half @fabs_h(half %a, half %b) nounwind { ret half %3 } -declare half @llvm.minnum.f16(half, half) - define half @fmin_h(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: fmin_h: ; CHECKIZFH: # %bb.0: @@ -1000,8 +992,6 @@ define half @fmin_h(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.maxnum.f16(half, half) - define half @fmax_h(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: fmax_h: ; CHECKIZFH: # %bb.0: @@ -1084,8 +1074,6 @@ define half @fmax_h(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.fma.f16(half, half, half) - define half @fmadd_h(half %a, half %b, half %c) nounwind { ; CHECKIZFH-LABEL: fmadd_h: ; CHECKIZFH: # %bb.0: @@ -1762,7 +1750,6 @@ define half @fnmadd_h_3(half %a, half %b, half %c) nounwind { ret half %neg } - define half @fnmadd_nsz(half %a, half %b, half %c) nounwind { ; RV32IZFH-LABEL: fnmadd_nsz: ; RV32IZFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll index 730bde5af610b..cfe22b7d4f3b0 100644 --- a/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll @@ -86,8 +86,6 @@ define half @fneg(half %a) nounwind { ret half %1 } -declare half @llvm.fabs.f16(half) - define half @fabs(half %a) nounwind { ; RV32I-LABEL: fabs: ; RV32I: # %bb.0: @@ -154,8 +152,6 @@ define half @fabs(half %a) nounwind { ret half %1 } -declare half @llvm.copysign.f16(half, half) - ; DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN will convert to bitwise ; operations if half precision floating point isn't supported. A combine could ; be written to do the same even when f16 is legal. diff --git a/llvm/test/CodeGen/RISCV/half-convert-strict.ll b/llvm/test/CodeGen/RISCV/half-convert-strict.ll index a607893a3735b..daeb75c31d614 100644 --- a/llvm/test/CodeGen/RISCV/half-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/half-convert-strict.ll @@ -120,7 +120,6 @@ define i16 @fcvt_si_h(half %a) nounwind strictfp { %1 = call i16 @llvm.experimental.constrained.fptosi.i16.f16(half %a, metadata !"fpexcept.strict") ret i16 %1 } -declare i16 @llvm.experimental.constrained.fptosi.i16.f16(half, metadata) define i16 @fcvt_ui_h(half %a) nounwind strictfp { ; CHECK32-IZFH-LABEL: fcvt_ui_h: @@ -183,7 +182,6 @@ define i16 @fcvt_ui_h(half %a) nounwind strictfp { %1 = call i16 @llvm.experimental.constrained.fptoui.i16.f16(half %a, metadata !"fpexcept.strict") ret i16 %1 } -declare i16 @llvm.experimental.constrained.fptoui.i16.f16(half, metadata) define i32 @fcvt_w_h(half %a) nounwind strictfp { ; CHECKIZFH-LABEL: fcvt_w_h: @@ -246,7 +244,6 @@ define i32 @fcvt_w_h(half %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptosi.i32.f16(half, metadata) define i32 @fcvt_wu_h(half %a) nounwind strictfp { ; CHECKIZFH-LABEL: fcvt_wu_h: @@ -309,7 +306,6 @@ define i32 @fcvt_wu_h(half %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptoui.i32.f16(half, metadata) ; Test where the fptoui has multiple uses, one of which causes a sext to be ; inserted on RV64. @@ -475,7 +471,6 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp { %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f16(half %a, metadata !"fpexcept.strict") ret i64 %1 } -declare i64 @llvm.experimental.constrained.fptosi.i64.f16(half, metadata) define i64 @fcvt_lu_h(half %a) nounwind strictfp { ; CHECK32-IZFH-LABEL: fcvt_lu_h: @@ -552,7 +547,6 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp { %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f16(half %a, metadata !"fpexcept.strict") ret i64 %1 } -declare i64 @llvm.experimental.constrained.fptoui.i64.f16(half, metadata) define half @fcvt_h_si(i16 %a) nounwind strictfp { ; CHECK32-IZFH-LABEL: fcvt_h_si: @@ -633,7 +627,6 @@ define half @fcvt_h_si(i16 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.sitofp.f16.i16(i16, metadata, metadata) define half @fcvt_h_si_signext(i16 signext %a) nounwind strictfp { ; CHECKIZFH-LABEL: fcvt_h_si_signext: @@ -776,7 +769,6 @@ define half @fcvt_h_ui(i16 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.uitofp.f16.i16(i16, metadata, metadata) define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind strictfp { ; CHECKIZFH-LABEL: fcvt_h_ui_zeroext: @@ -901,7 +893,6 @@ define half @fcvt_h_w(i32 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.sitofp.f16.i32(i32, metadata, metadata) define half @fcvt_h_w_load(ptr %p) nounwind strictfp { ; CHECKIZFH-LABEL: fcvt_h_w_load: @@ -1036,7 +1027,6 @@ define half @fcvt_h_wu(i32 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.uitofp.f16.i32(i32, metadata, metadata) define half @fcvt_h_wu_load(ptr %p) nounwind strictfp { ; CHECKIZFH-LABEL: fcvt_h_wu_load: @@ -1185,7 +1175,6 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.sitofp.f16.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.sitofp.f16.i64(i64, metadata, metadata) define half @fcvt_h_lu(i64 %a) nounwind strictfp { ; CHECK32-IZFH-LABEL: fcvt_h_lu: @@ -1262,7 +1251,6 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.uitofp.f16.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.uitofp.f16.i64(i64, metadata, metadata) define half @fcvt_h_s(float %a) nounwind strictfp { ; CHECKIZFH-LABEL: fcvt_h_s: @@ -1320,7 +1308,6 @@ define half @fcvt_h_s(float %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.fptrunc.f16.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.fptrunc.f16.f32(float, metadata, metadata) define float @fcvt_s_h(half %a) nounwind strictfp { ; CHECKIZFH-LABEL: fcvt_s_h: @@ -1378,7 +1365,6 @@ define float @fcvt_s_h(half %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.fpext.f32.f16(half %a, metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.fpext.f32.f16(half, metadata) define half @fcvt_h_d(double %a) nounwind strictfp { ; RV32IZFH-LABEL: fcvt_h_d: @@ -1488,7 +1474,6 @@ define half @fcvt_h_d(double %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.fptrunc.f16.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.fptrunc.f16.f64(double, metadata, metadata) define double @fcvt_d_h(half %a) nounwind strictfp { ; RV32IZFH-LABEL: fcvt_d_h: @@ -1607,7 +1592,6 @@ define double @fcvt_d_h(half %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.fpext.f64.f16(half %a, metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.fpext.f64.f16(half, metadata) ; Make sure we select W version of addi on RV64. define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) strictfp { @@ -2088,4 +2072,3 @@ define fp128 @fcvt_q_h(half %a) nounwind strictfp { %1 = call fp128 @llvm.experimental.constrained.fpext.f128.f16(half %a, metadata !"fpexcept.strict") ret fp128 %1 } -declare fp128 @llvm.experimental.constrained.fpext.f128.f16(half, metadata) diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll index c3c06e192f76f..cdf07fc5b1ee7 100644 --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -570,7 +570,6 @@ start: %0 = tail call i16 @llvm.fptosi.sat.i16.f16(half %a) ret i16 %0 } -declare i16 @llvm.fptosi.sat.i16.f16(half) define i16 @fcvt_ui_h(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_ui_h: @@ -1016,7 +1015,6 @@ start: %0 = tail call i16 @llvm.fptoui.sat.i16.f16(half %a) ret i16 %0 } -declare i16 @llvm.fptoui.sat.i16.f16(half) define i32 @fcvt_w_h(half %a) nounwind { ; CHECKIZFH-LABEL: fcvt_w_h: @@ -1406,7 +1404,6 @@ start: %0 = tail call i32 @llvm.fptosi.sat.i32.f16(half %a) ret i32 %0 } -declare i32 @llvm.fptosi.sat.i32.f16(half) define i32 @fcvt_wu_h(half %a) nounwind { ; CHECKIZFH-LABEL: fcvt_wu_h: @@ -1990,7 +1987,6 @@ start: %0 = tail call i32 @llvm.fptoui.sat.i32.f16(half %a) ret i32 %0 } -declare i32 @llvm.fptoui.sat.i32.f16(half) define i64 @fcvt_l_h(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_l_h: @@ -2753,7 +2749,6 @@ start: %0 = tail call i64 @llvm.fptosi.sat.i64.f16(half %a) ret i64 %0 } -declare i64 @llvm.fptosi.sat.i64.f16(half) define i64 @fcvt_lu_h(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_lu_h: @@ -3348,7 +3343,6 @@ start: %0 = tail call i64 @llvm.fptoui.sat.i64.f16(half %a) ret i64 %0 } -declare i64 @llvm.fptoui.sat.i64.f16(half) define half @fcvt_h_si(i16 %a) nounwind { ; RV32IZFH-LABEL: fcvt_h_si: @@ -7684,7 +7678,6 @@ start: %0 = tail call i8 @llvm.fptosi.sat.i8.f16(half %a) ret i8 %0 } -declare i8 @llvm.fptosi.sat.i8.f16(half) define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_wu_s_i8: @@ -8110,7 +8103,6 @@ start: %0 = tail call i8 @llvm.fptoui.sat.i8.f16(half %a) ret i8 %0 } -declare i8 @llvm.fptoui.sat.i8.f16(half) define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_wu_h_sat_zext: diff --git a/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll index 12cf088e3205f..88f0b9db650ae 100644 --- a/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll +++ b/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll @@ -48,7 +48,6 @@ define i32 @fcmp_oeq(half %a, half %b) nounwind strictfp { %2 = zext i1 %1 to i32 ret i32 %2 } -declare i1 @llvm.experimental.constrained.fcmp.f16(half, half, metadata, metadata) define i32 @fcmp_ogt(half %a, half %b) nounwind strictfp { ; CHECK-LABEL: fcmp_ogt: @@ -653,7 +652,6 @@ define i32 @fcmps_oeq(half %a, half %b) nounwind strictfp { %2 = zext i1 %1 to i32 ret i32 %2 } -declare i1 @llvm.experimental.constrained.fcmps.f16(half, half, metadata, metadata) define i32 @fcmps_ogt(half %a, half %b) nounwind strictfp { ; CHECK-LABEL: fcmps_ogt: diff --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll index 847054d96968a..e712bd919b0b1 100644 --- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll @@ -56,8 +56,6 @@ ; RUN: -mattr=+zhinxmin -verify-machineinstrs -target-abi lp64 | \ ; RUN: FileCheck -check-prefixes=CHECKIZHINXMIN,RV64IZHINXMIN %s -declare half @llvm.sqrt.f16(half) - define half @sqrt_f16(half %a) nounwind { ; CHECKIZFH-LABEL: sqrt_f16: ; CHECKIZFH: # %bb.0: @@ -112,8 +110,6 @@ define half @sqrt_f16(half %a) nounwind { ret half %1 } -declare half @llvm.powi.f16.i32(half, i32) - define half @powi_f16(half %a, i32 %b) nounwind { ; RV32IZFH-LABEL: powi_f16: ; RV32IZFH: # %bb.0: @@ -244,8 +240,6 @@ define half @powi_f16(half %a, i32 %b) nounwind { ret half %1 } -declare half @llvm.sin.f16(half) - define half @sin_f16(half %a) nounwind { ; RV32IZFH-LABEL: sin_f16: ; RV32IZFH: # %bb.0: @@ -364,8 +358,6 @@ define half @sin_f16(half %a) nounwind { ret half %1 } -declare half @llvm.cos.f16(half) - define half @cos_f16(half %a) nounwind { ; RV32IZFH-LABEL: cos_f16: ; RV32IZFH: # %bb.0: @@ -819,8 +811,6 @@ define half @sincos_f16(half %a) nounwind { ret half %3 } -declare half @llvm.pow.f16(half, half) - define half @pow_f16(half %a, half %b) nounwind { ; RV32IZFH-LABEL: pow_f16: ; RV32IZFH: # %bb.0: @@ -973,8 +963,6 @@ define half @pow_f16(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.exp.f16(half) - define half @exp_f16(half %a) nounwind { ; RV32IZFH-LABEL: exp_f16: ; RV32IZFH: # %bb.0: @@ -1093,8 +1081,6 @@ define half @exp_f16(half %a) nounwind { ret half %1 } -declare half @llvm.exp2.f16(half) - define half @exp2_f16(half %a) nounwind { ; RV32IZFH-LABEL: exp2_f16: ; RV32IZFH: # %bb.0: @@ -1331,8 +1317,6 @@ define half @exp10_f16(half %a) nounwind { ret half %1 } -declare half @llvm.log.f16(half) - define half @log_f16(half %a) nounwind { ; RV32IZFH-LABEL: log_f16: ; RV32IZFH: # %bb.0: @@ -1451,8 +1435,6 @@ define half @log_f16(half %a) nounwind { ret half %1 } -declare half @llvm.log10.f16(half) - define half @log10_f16(half %a) nounwind { ; RV32IZFH-LABEL: log10_f16: ; RV32IZFH: # %bb.0: @@ -1571,8 +1553,6 @@ define half @log10_f16(half %a) nounwind { ret half %1 } -declare half @llvm.log2.f16(half) - define half @log2_f16(half %a) nounwind { ; RV32IZFH-LABEL: log2_f16: ; RV32IZFH: # %bb.0: @@ -1691,8 +1671,6 @@ define half @log2_f16(half %a) nounwind { ret half %1 } -declare half @llvm.fma.f16(half, half, half) - define half @fma_f16(half %a, half %b, half %c) nounwind { ; CHECKIZFH-LABEL: fma_f16: ; CHECKIZFH: # %bb.0: @@ -1791,8 +1769,6 @@ define half @fma_f16(half %a, half %b, half %c) nounwind { ret half %1 } -declare half @llvm.fmuladd.f16(half, half, half) - define half @fmuladd_f16(half %a, half %b, half %c) nounwind { ; CHECKIZFH-LABEL: fmuladd_f16: ; CHECKIZFH: # %bb.0: @@ -1907,8 +1883,6 @@ define half @fmuladd_f16(half %a, half %b, half %c) nounwind { ret half %1 } -declare half @llvm.fabs.f16(half) - define half @fabs_f16(half %a) nounwind { ; CHECKIZFH-LABEL: fabs_f16: ; CHECKIZFH: # %bb.0: @@ -1967,8 +1941,6 @@ define half @fabs_f16(half %a) nounwind { ret half %1 } -declare half @llvm.minnum.f16(half, half) - define half @minnum_f16(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: minnum_f16: ; CHECKIZFH: # %bb.0: @@ -2051,8 +2023,6 @@ define half @minnum_f16(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.maxnum.f16(half, half) - define half @maxnum_f16(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: maxnum_f16: ; CHECKIZFH: # %bb.0: @@ -2135,8 +2105,6 @@ define half @maxnum_f16(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.copysign.f16(half, half) - define half @copysign_f16(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: copysign_f16: ; CHECKIZFH: # %bb.0: @@ -2217,8 +2185,6 @@ define half @copysign_f16(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.floor.f16(half) - define half @floor_f16(half %a) nounwind { ; CHECKIZFH-LABEL: floor_f16: ; CHECKIZFH: # %bb.0: @@ -2309,8 +2275,6 @@ define half @floor_f16(half %a) nounwind { ret half %1 } -declare half @llvm.ceil.f16(half) - define half @ceil_f16(half %a) nounwind { ; CHECKIZFH-LABEL: ceil_f16: ; CHECKIZFH: # %bb.0: @@ -2401,8 +2365,6 @@ define half @ceil_f16(half %a) nounwind { ret half %1 } -declare half @llvm.trunc.f16(half) - define half @trunc_f16(half %a) nounwind { ; CHECKIZFH-LABEL: trunc_f16: ; CHECKIZFH: # %bb.0: @@ -2493,8 +2455,6 @@ define half @trunc_f16(half %a) nounwind { ret half %1 } -declare half @llvm.rint.f16(half) - define half @rint_f16(half %a) nounwind { ; CHECKIZFH-LABEL: rint_f16: ; CHECKIZFH: # %bb.0: @@ -2585,8 +2545,6 @@ define half @rint_f16(half %a) nounwind { ret half %1 } -declare half @llvm.nearbyint.f16(half) - define half @nearbyint_f16(half %a) nounwind { ; RV32IZFH-LABEL: nearbyint_f16: ; RV32IZFH: # %bb.0: @@ -2705,8 +2663,6 @@ define half @nearbyint_f16(half %a) nounwind { ret half %1 } -declare half @llvm.round.f16(half) - define half @round_f16(half %a) nounwind { ; CHECKIZFH-LABEL: round_f16: ; CHECKIZFH: # %bb.0: @@ -2797,8 +2753,6 @@ define half @round_f16(half %a) nounwind { ret half %1 } -declare half @llvm.roundeven.f16(half) - define half @roundeven_f16(half %a) nounwind { ; CHECKIZFH-LABEL: roundeven_f16: ; CHECKIZFH: # %bb.0: @@ -2889,7 +2843,6 @@ define half @roundeven_f16(half %a) nounwind { ret half %1 } -declare i1 @llvm.is.fpclass.f16(half, i32) define i1 @isnan_d_fpclass(half %x) { ; CHECKIZFH-LABEL: isnan_d_fpclass: ; CHECKIZFH: # %bb.0: @@ -2966,8 +2919,6 @@ define i1 @isnan_d_fpclass(half %x) { ret i1 %1 } -declare half @llvm.tan.f16(half) - define half @tan_f16(half %a) nounwind { ; RV32IZFH-LABEL: tan_f16: ; RV32IZFH: # %bb.0: @@ -3086,8 +3037,6 @@ define half @tan_f16(half %a) nounwind { ret half %1 } -declare half @llvm.maximumnum.f16(half, half) - define half @maximumnum_half(half %x, half %y) { ; CHECKIZFH-LABEL: maximumnum_half: ; CHECKIZFH: # %bb.0: @@ -3190,8 +3139,6 @@ define half @maximumnum_half(half %x, half %y) { ret half %z } -declare half @llvm.minimumnum.f16(half, half) - define half @minimumnum_half(half %x, half %y) { ; CHECKIZFH-LABEL: minimumnum_half: ; CHECKIZFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/half-maximum-minimum.ll b/llvm/test/CodeGen/RISCV/half-maximum-minimum.ll index bc3f44363fb95..23ca1992614a1 100644 --- a/llvm/test/CodeGen/RISCV/half-maximum-minimum.ll +++ b/llvm/test/CodeGen/RISCV/half-maximum-minimum.ll @@ -12,8 +12,6 @@ ; RUN: -verify-machineinstrs -target-abi lp64 | \ ; RUN: FileCheck -check-prefixes=CHECKIZHINX %s -declare half @llvm.minimum.f16(half, half) - define half @fminimum_f16(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: fminimum_f16: ; CHECKIZFH: # %bb.0: @@ -56,8 +54,6 @@ define half @fminimum_f16(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.maximum.f16(half, half) - define half @fmaximum_f16(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: fmaximum_f16: ; CHECKIZFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll index c815bc19e280c..67f69120b9aea 100644 --- a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll +++ b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll @@ -4300,13 +4300,3 @@ define i64 @test_rint_ui64(half %x) nounwind { ret i64 %b } -declare half @llvm.floor.f16(half) -declare half @llvm.ceil.f16(half) -declare half @llvm.trunc.f16(half) -declare half @llvm.round.f16(half) -declare half @llvm.roundeven.f16(half) -declare half @llvm.rint.f16(half) -declare i32 @llvm.fptosi.sat.i32.f16(half) -declare i64 @llvm.fptosi.sat.i64.f16(half) -declare i32 @llvm.fptoui.sat.i32.f16(half) -declare i64 @llvm.fptoui.sat.i64.f16(half) diff --git a/llvm/test/CodeGen/RISCV/half-round-conv.ll b/llvm/test/CodeGen/RISCV/half-round-conv.ll index cfc997d66ec56..b80a4cd13eb8a 100644 --- a/llvm/test/CodeGen/RISCV/half-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/half-round-conv.ll @@ -4901,8 +4901,3 @@ define half @test_roundeven_half(half %x) { ret half %a } -declare half @llvm.floor.f16(half) -declare half @llvm.ceil.f16(half) -declare half @llvm.trunc.f16(half) -declare half @llvm.round.f16(half) -declare half @llvm.roundeven.f16(half) diff --git a/llvm/test/CodeGen/RISCV/half-zfa.ll b/llvm/test/CodeGen/RISCV/half-zfa.ll index 90c66e7fe2ca4..9a5b9137466a2 100644 --- a/llvm/test/CodeGen/RISCV/half-zfa.ll +++ b/llvm/test/CodeGen/RISCV/half-zfa.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -target-abi lp64f -mattr=+zfa,+zfhmin < %s \ ; RUN: | FileCheck %s --check-prefix=ZFHMIN -declare half @llvm.minimum.f16(half, half) - define half @fminm_h(half %a, half %b) nounwind { ; CHECK-LABEL: fminm_h: ; CHECK: # %bb.0: @@ -27,8 +25,6 @@ define half @fminm_h(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.maximum.f16(half, half) - define half @fmaxm_h(half %a, half %b) nounwind { ; CHECK-LABEL: fmaxm_h: ; CHECK: # %bb.0: @@ -62,9 +58,6 @@ define half @fround_h_1(half %a) nounwind { ret half %call } -declare half @llvm.round.f16(half) nounwind readnone - - define half @fround_h_2(half %a) nounwind { ; CHECK-LABEL: fround_h_2: ; CHECK: # %bb.0: @@ -81,9 +74,6 @@ define half @fround_h_2(half %a) nounwind { ret half %call } -declare half @llvm.floor.f16(half) nounwind readnone - - define half @fround_h_3(half %a) nounwind { ; CHECK-LABEL: fround_h_3: ; CHECK: # %bb.0: @@ -100,9 +90,6 @@ define half @fround_h_3(half %a) nounwind { ret half %call } -declare half @llvm.ceil.f16(half) nounwind readnone - - define half @fround_h_4(half %a) nounwind { ; CHECK-LABEL: fround_h_4: ; CHECK: # %bb.0: @@ -119,9 +106,6 @@ define half @fround_h_4(half %a) nounwind { ret half %call } -declare half @llvm.trunc.f16(half) nounwind readnone - - define half @fround_h_5(half %a) nounwind { ; CHECK-LABEL: fround_h_5: ; CHECK: # %bb.0: @@ -138,8 +122,6 @@ define half @fround_h_5(half %a) nounwind { ret half %call } -declare half @llvm.nearbyint.f16(half) nounwind readnone - define half @fround_h_6(half %a) nounwind { ; CHECK-LABEL: fround_h_6: ; CHECK: # %bb.0: @@ -156,9 +138,6 @@ define half @fround_h_6(half %a) nounwind { ret half %call } -declare half @llvm.roundeven.f16(half) nounwind readnone - - define half @froundnx_h(half %a) nounwind { ; CHECK-LABEL: froundnx_h: ; CHECK: # %bb.0: @@ -175,10 +154,6 @@ define half @froundnx_h(half %a) nounwind { ret half %call } -declare half @llvm.rint.f16(half) nounwind readnone - -declare i1 @llvm.experimental.constrained.fcmp.f16(half, half, metadata, metadata) - define i32 @fcmp_olt_q(half %a, half %b) nounwind strictfp { ; CHECK-LABEL: fcmp_olt_q: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/hwasan-check-memaccess.ll b/llvm/test/CodeGen/RISCV/hwasan-check-memaccess.ll index cf780f4f7f76c..5c4c2a7f7dd0c 100644 --- a/llvm/test/CodeGen/RISCV/hwasan-check-memaccess.ll +++ b/llvm/test/CodeGen/RISCV/hwasan-check-memaccess.ll @@ -36,8 +36,6 @@ define ptr @f2(ptr %x0, ptr %x1) { ret ptr %x0 } -declare void @llvm.hwasan.check.memaccess.shortgranules(ptr, ptr, i32) - ; CHECK: .section .text.hot,"axG",@progbits,__hwasan_check_x10_2_short,comdat ; CHECK-NEXT: .type __hwasan_check_x10_2_short,@function ; CHECK-NEXT: .weak __hwasan_check_x10_2_short diff --git a/llvm/test/CodeGen/RISCV/i64-icmp.ll b/llvm/test/CodeGen/RISCV/i64-icmp.ll index 2742b9a3655d3..8e079450d9709 100644 --- a/llvm/test/CodeGen/RISCV/i64-icmp.ll +++ b/llvm/test/CodeGen/RISCV/i64-icmp.ll @@ -828,4 +828,3 @@ define i64 @mask_test_eq_multiuse(i64 %x, ptr %p) nounwind { ret i64 %ext } -declare i64 @llvm.umin.i64(i64, i64) diff --git a/llvm/test/CodeGen/RISCV/iabs.ll b/llvm/test/CodeGen/RISCV/iabs.ll index c157c63722cb4..35ff8bece9b5d 100644 --- a/llvm/test/CodeGen/RISCV/iabs.ll +++ b/llvm/test/CodeGen/RISCV/iabs.ll @@ -8,12 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefix=RV64ZBB -declare i8 @llvm.abs.i8(i8, i1 immarg) -declare i16 @llvm.abs.i16(i16, i1 immarg) -declare i32 @llvm.abs.i32(i32, i1 immarg) -declare i64 @llvm.abs.i64(i64, i1 immarg) -declare i128 @llvm.abs.i128(i128, i1 immarg) - define i8 @abs8(i8 %x) { ; RV32I-LABEL: abs8: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll index a06c7505d543d..e0aed2d4f90ff 100644 --- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll +++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll @@ -234,10 +234,4 @@ define i16 @ctz_v8i1_i16_ret(<8 x i1> %a) { ret i16 %res } -declare i64 @llvm.experimental.cttz.elts.i64.nxv8i16(, i1) -declare i32 @llvm.experimental.cttz.elts.i32.nxv16i1(, i1) -declare i32 @llvm.experimental.cttz.elts.i32.nxv4i32(, i1) -declare i32 @llvm.experimental.cttz.elts.i32.v16i1(<16 x i1>, i1) -declare i16 @llvm.experimental.cttz.elts.i16.v16i1(<8 x i1>, i1) - attributes #0 = { vscale_range(2,1024) } diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll index 1216d3000e8c8..632c9a5a75911 100644 --- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll +++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll @@ -60,5 +60,3 @@ define i32 @ctz_v2i1_poison(<2 x i1> %a) { ret i32 %res } -declare i32 @llvm.experimental.cttz.elts.i32.v2i1(<2 x i1>, i1) -declare i16 @llvm.experimental.cttz.elts.i16.v4i32(<4 x i32>, i1) diff --git a/llvm/test/CodeGen/RISCV/intrinsics/trap.ll b/llvm/test/CodeGen/RISCV/intrinsics/trap.ll index e85073518ab9c..1a16a30642472 100644 --- a/llvm/test/CodeGen/RISCV/intrinsics/trap.ll +++ b/llvm/test/CodeGen/RISCV/intrinsics/trap.ll @@ -6,9 +6,6 @@ ; Verify that we lower @llvm.trap() and @llvm.debugtrap() correctly. -declare void @llvm.trap() -declare void @llvm.debugtrap() - define void @test_trap() nounwind { ; RV32I-LABEL: test_trap: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll b/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll index 541fb37742570..34a58832d912c 100644 --- a/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll +++ b/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll @@ -113,8 +113,6 @@ define i64 @mul64(i64 %a, i64 %b) nounwind { ; Half libcalls: -declare half @llvm.sin.f16(half) - define half @sin_f16(half %a) nounwind { ; RV32IFD-ILP32D-LABEL: sin_f16: ; RV32IFD-ILP32D: # %bb.0: @@ -233,8 +231,6 @@ define half @sin_f16(half %a) nounwind { ; Float libcalls: -declare float @llvm.sin.f32(float) - define float @sin_f32(float %a) nounwind { ; F-ABI-ALL-LABEL: sin_f32: ; F-ABI-ALL: # %bb.0: @@ -265,8 +261,6 @@ define float @sin_f32(float %a) nounwind { ret float %1 } -declare float @llvm.powi.f32.i32(float, i32) - define float @powi_f32(float %a, i32 %b) nounwind { ; RV32IFD-ILP32D-LABEL: powi_f32: ; RV32IFD-ILP32D: # %bb.0: @@ -322,8 +316,6 @@ define float @powi_f32(float %a, i32 %b) nounwind { ret float %1 } -declare i64 @llvm.llround.i64.f32(float) - define i64 @llround_f32(float %a) nounwind { ; RV32-ALL-LABEL: llround_f32: ; RV32-ALL: # %bb.0: @@ -364,8 +356,6 @@ define i64 @llround_f32(float %a) nounwind { ; Double libcalls: -declare double @llvm.sin.f64(double) - define double @sin_f64(double %a) nounwind { ; D-ABI-ALL-LABEL: sin_f64: ; D-ABI-ALL: # %bb.0: @@ -414,8 +404,6 @@ define double @sin_f64(double %a) nounwind { ret double %1 } -declare double @llvm.powi.f64.i32(double, i32) - define double @powi_f64(double %a, i32 %b) nounwind { ; RV32IFD-ILP32D-LABEL: powi_f64: ; RV32IFD-ILP32D: # %bb.0: @@ -472,8 +460,6 @@ define double @powi_f64(double %a, i32 %b) nounwind { ret double %1 } -declare i64 @llvm.llround.i64.f64(double) - define i64 @llround_f64(double %a) nounwind { ; RV32-ALL-LABEL: llround_f64: ; RV32-ALL: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/live-sp.mir b/llvm/test/CodeGen/RISCV/live-sp.mir index 6da655ced8125..b8d69625e6a95 100644 --- a/llvm/test/CodeGen/RISCV/live-sp.mir +++ b/llvm/test/CodeGen/RISCV/live-sp.mir @@ -17,11 +17,6 @@ ret void } - ; Function Attrs: nofree nosync nounwind readnone willreturn - declare ptr @llvm.returnaddress(i32 immarg) #0 - - attributes #0 = { nofree nosync nounwind readnone willreturn } - ... --- name: test1 diff --git a/llvm/test/CodeGen/RISCV/llvm.exp10.ll b/llvm/test/CodeGen/RISCV/llvm.exp10.ll index 7b199504837e8..5a4ce01e2f351 100644 --- a/llvm/test/CodeGen/RISCV/llvm.exp10.ll +++ b/llvm/test/CodeGen/RISCV/llvm.exp10.ll @@ -6,19 +6,6 @@ ; RUN: -verify-machineinstrs -target-abi=lp64d < %s \ ; RUN: | FileCheck -check-prefixes=CHECK,RV64IFD %s -declare <1 x half> @llvm.exp10.v1f16(<1 x half>) -declare <2 x half> @llvm.exp10.v2f16(<2 x half>) -declare <3 x half> @llvm.exp10.v3f16(<3 x half>) -declare <4 x half> @llvm.exp10.v4f16(<4 x half>) -declare <1 x float> @llvm.exp10.v1f32(<1 x float>) -declare <2 x float> @llvm.exp10.v2f32(<2 x float>) -declare <3 x float> @llvm.exp10.v3f32(<3 x float>) -declare <4 x float> @llvm.exp10.v4f32(<4 x float>) -declare <1 x double> @llvm.exp10.v1f64(<1 x double>) -declare <2 x double> @llvm.exp10.v2f64(<2 x double>) -declare <3 x double> @llvm.exp10.v3f64(<3 x double>) -declare <4 x double> @llvm.exp10.v4f64(<4 x double>) - define <1 x half> @exp10_v1f16(<1 x half> %x) { ; RV32IFD-LABEL: exp10_v1f16: ; RV32IFD: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/llvm.frexp.ll b/llvm/test/CodeGen/RISCV/llvm.frexp.ll index 4a77b4d32cdda..4ba3785b85a5e 100644 --- a/llvm/test/CodeGen/RISCV/llvm.frexp.ll +++ b/llvm/test/CodeGen/RISCV/llvm.frexp.ll @@ -1933,19 +1933,3 @@ define i32 @test_frexp_f128_i32_only_use_exp(fp128 %a) nounwind { ret i32 %result.0 } -declare { float, i32 } @llvm.frexp.f32.i32(float) #0 -declare { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float>) #0 -declare { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float>) #0 - -declare { half, i32 } @llvm.frexp.f16.i32(half) #0 -declare { <2 x half>, <2 x i32> } @llvm.frexp.v2f16.v2i32(<2 x half>) #0 - -declare { double, i32 } @llvm.frexp.f64.i32(double) #0 -declare { <2 x double>, <2 x i32> } @llvm.frexp.v2f64.v2i32(<2 x double>) #0 - -declare { half, i16 } @llvm.frexp.f16.i16(half) #0 -declare { <2 x half>, <2 x i16> } @llvm.frexp.v2f16.v2i16(<2 x half>) #0 - -declare { fp128, i32 } @llvm.frexp.f128.i32(fp128) #0 - -attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } diff --git a/llvm/test/CodeGen/RISCV/machine-combiner.ll b/llvm/test/CodeGen/RISCV/machine-combiner.ll index 69eca6dd7768a..326cf7bc179ce 100644 --- a/llvm/test/CodeGen/RISCV/machine-combiner.ll +++ b/llvm/test/CodeGen/RISCV/machine-combiner.ll @@ -1070,29 +1070,6 @@ define double @test_fmax_f64(double %a0, double %a1, double %a2, double %a3) { ret double %t2 } -declare i8 @llvm.umin.i8(i8 %a, i8 %b) -declare i16 @llvm.umin.i16(i16 %a, i16 %b) -declare i32 @llvm.umin.i32(i32 %a, i32 %b) -declare i64 @llvm.umin.i64(i64 %a, i64 %b) -declare i8 @llvm.smin.i8(i8 %a, i8 %b) -declare i16 @llvm.smin.i16(i16 %a, i16 %b) -declare i32 @llvm.smin.i32(i32 %a, i32 %b) -declare i64 @llvm.smin.i64(i64 %a, i64 %b) -declare i8 @llvm.umax.i8(i8 %a, i8 %b) -declare i16 @llvm.umax.i16(i16 %a, i16 %b) -declare i32 @llvm.umax.i32(i32 %a, i32 %b) -declare i64 @llvm.umax.i64(i64 %a, i64 %b) -declare i8 @llvm.smax.i8(i8 %a, i8 %b) -declare i16 @llvm.smax.i16(i16 %a, i16 %b) -declare i32 @llvm.smax.i32(i32 %a, i32 %b) -declare i64 @llvm.smax.i64(i64 %a, i64 %b) -declare half @llvm.minnum.f16(half, half) -declare float @llvm.minnum.f32(float, float) -declare double @llvm.minnum.f64(double, double) -declare half @llvm.maxnum.f16(half, half) -declare float @llvm.maxnum.f32(float, float) -declare double @llvm.maxnum.f64(double, double) - define double @test_fmadd_strategy(double %a0, double %a1, double %a2, double %a3, i64 %flag) { ; CHECK-LABEL: test_fmadd_strategy: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/machine-cse.ll b/llvm/test/CodeGen/RISCV/machine-cse.ll index 58cc042f90e40..aaf4320284be1 100644 --- a/llvm/test/CodeGen/RISCV/machine-cse.ll +++ b/llvm/test/CodeGen/RISCV/machine-cse.ll @@ -79,8 +79,6 @@ falseblock: ret void } -declare half @llvm.fma.f16(half, half, half) - define void @commute_fmadd_f16(half %x, half %y, half %z, ptr %p1, ptr %p2, i1 zeroext %cond) { ; RV32-LABEL: commute_fmadd_f16: ; RV32: # %bb.0: @@ -114,8 +112,6 @@ falseblock: ret void } -declare float @llvm.fma.f32(float, float, float) - define void @commute_fmadd_f32(float %x, float %y, float %z, ptr %p1, ptr %p2, i1 zeroext %cond) { ; RV32-LABEL: commute_fmadd_f32: ; RV32: # %bb.0: @@ -149,8 +145,6 @@ falseblock: ret void } -declare double @llvm.fma.f64(double, double, double) - define void @commute_fmadd_f64(double %x, double %y, double %z, ptr %p1, ptr %p2, i1 zeroext %cond) { ; RV32-LABEL: commute_fmadd_f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/machinelicm-constant-phys-reg.ll b/llvm/test/CodeGen/RISCV/machinelicm-constant-phys-reg.ll index 83e9bf661ab1c..11047d1c758ea 100644 --- a/llvm/test/CodeGen/RISCV/machinelicm-constant-phys-reg.ll +++ b/llvm/test/CodeGen/RISCV/machinelicm-constant-phys-reg.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -O3 < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare i32 @llvm.vector.reduce.add.nxv2i32() - define i32 @test(ptr %a, i64 %n) { ; CHECK-LABEL: test: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/memcpy-inline.ll b/llvm/test/CodeGen/RISCV/memcpy-inline.ll index 833e07351eec7..d150ab1dddc6b 100644 --- a/llvm/test/CodeGen/RISCV/memcpy-inline.ll +++ b/llvm/test/CodeGen/RISCV/memcpy-inline.ll @@ -933,7 +933,6 @@ entry: ; ------------------------------------------------------------------------ ; A few partially aligned cases - define void @memcpy16_align4(ptr nocapture %dest, ptr nocapture %src) nounwind { ; RV32-BOTH-LABEL: memcpy16_align4: ; RV32-BOTH: # %bb.0: # %entry @@ -1020,6 +1019,3 @@ entry: ret i32 0 } - -declare void @llvm.memcpy.inline.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind -declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/RISCV/memcpy.ll b/llvm/test/CodeGen/RISCV/memcpy.ll index 447fc26b0106e..680ddba73f07f 100644 --- a/llvm/test/CodeGen/RISCV/memcpy.ll +++ b/llvm/test/CodeGen/RISCV/memcpy.ll @@ -674,7 +674,6 @@ entry: ; ------------------------------------------------------------------------ ; A few partially aligned cases - define void @memcpy16_align4(ptr nocapture %dest, ptr nocapture %src) nounwind { ; RV32-BOTH-LABEL: memcpy16_align4: ; RV32-BOTH: # %bb.0: # %entry @@ -761,5 +760,3 @@ entry: ret i32 0 } -declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind -declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/RISCV/memmove.ll b/llvm/test/CodeGen/RISCV/memmove.ll index 62915bd4ad99d..1fffe359389b0 100644 --- a/llvm/test/CodeGen/RISCV/memmove.ll +++ b/llvm/test/CodeGen/RISCV/memmove.ll @@ -600,7 +600,6 @@ entry: ; ------------------------------------------------------------------------ ; A few partially aligned cases - define void @memmove16_align4(ptr nocapture %dest, ptr nocapture %src) nounwind { ; RV32-BOTH-LABEL: memmove16_align4: ; RV32-BOTH: # %bb.0: # %entry @@ -667,4 +666,3 @@ entry: ret i32 0 } -declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/RISCV/memset-inline.ll b/llvm/test/CodeGen/RISCV/memset-inline.ll index 40915241543ee..a03961b0dd5c9 100644 --- a/llvm/test/CodeGen/RISCV/memset-inline.ll +++ b/llvm/test/CodeGen/RISCV/memset-inline.ll @@ -9,9 +9,6 @@ ; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST %struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } -declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind -declare void @llvm.memset.inline.p0.i64(ptr nocapture, i8, i64, i1) nounwind - ; ///////////////////////////////////////////////////////////////////////////// define void @memset_1(ptr %a, i8 %value) nounwind { @@ -1164,7 +1161,6 @@ define void @aligned_memset_zero_8(ptr %a) nounwind { ret void } - define void @aligned_memset_zero_16(ptr %a) nounwind { ; RV32-BOTH-LABEL: aligned_memset_zero_16: ; RV32-BOTH: # %bb.0: @@ -1243,7 +1239,6 @@ define void @aligned_memset_zero_64(ptr %a) nounwind { ret void } - ; ///////////////////////////////////////////////////////////////////////////// ; Usual overlap tricks diff --git a/llvm/test/CodeGen/RISCV/min-max.ll b/llvm/test/CodeGen/RISCV/min-max.ll index e7f6899f18d16..71859431de923 100644 --- a/llvm/test/CodeGen/RISCV/min-max.ll +++ b/llvm/test/CodeGen/RISCV/min-max.ll @@ -14,8 +14,6 @@ ; Basic tests. -declare i8 @llvm.smax.i8(i8 %a, i8 %b) readnone - define signext i8 @smax_i8(i8 signext %a, i8 signext %b) { ; NOZBB-LABEL: smax_i8: ; NOZBB: # %bb.0: @@ -54,8 +52,6 @@ define signext i8 @smax_i8(i8 signext %a, i8 signext %b) { ret i8 %c } -declare i16 @llvm.smax.i16(i16 %a, i16 %b) readnone - define signext i16 @smax_i16(i16 signext %a, i16 signext %b) { ; NOZBB-LABEL: smax_i16: ; NOZBB: # %bb.0: @@ -94,8 +90,6 @@ define signext i16 @smax_i16(i16 signext %a, i16 signext %b) { ret i16 %c } -declare i32 @llvm.smax.i32(i32 %a, i32 %b) readnone - define signext i32 @smax_i32(i32 signext %a, i32 signext %b) { ; NOZBB-LABEL: smax_i32: ; NOZBB: # %bb.0: @@ -134,8 +128,6 @@ define signext i32 @smax_i32(i32 signext %a, i32 signext %b) { ret i32 %c } -declare i64 @llvm.smax.i64(i64 %a, i64 %b) readnone - define i64 @smax_i64(i64 %a, i64 %b) { ; RV32I-LABEL: smax_i64: ; RV32I: # %bb.0: @@ -220,8 +212,6 @@ define i64 @smax_i64(i64 %a, i64 %b) { ret i64 %c } -declare i8 @llvm.smin.i8(i8 %a, i8 %b) readnone - define signext i8 @smin_i8(i8 signext %a, i8 signext %b) { ; NOZBB-LABEL: smin_i8: ; NOZBB: # %bb.0: @@ -260,8 +250,6 @@ define signext i8 @smin_i8(i8 signext %a, i8 signext %b) { ret i8 %c } -declare i16 @llvm.smin.i16(i16 %a, i16 %b) readnone - define signext i16 @smin_i16(i16 signext %a, i16 signext %b) { ; NOZBB-LABEL: smin_i16: ; NOZBB: # %bb.0: @@ -300,8 +288,6 @@ define signext i16 @smin_i16(i16 signext %a, i16 signext %b) { ret i16 %c } -declare i32 @llvm.smin.i32(i32 %a, i32 %b) readnone - define signext i32 @smin_i32(i32 signext %a, i32 signext %b) { ; NOZBB-LABEL: smin_i32: ; NOZBB: # %bb.0: @@ -340,8 +326,6 @@ define signext i32 @smin_i32(i32 signext %a, i32 signext %b) { ret i32 %c } -declare i64 @llvm.smin.i64(i64 %a, i64 %b) readnone - define i64 @smin_i64(i64 %a, i64 %b) { ; RV32I-LABEL: smin_i64: ; RV32I: # %bb.0: @@ -426,8 +410,6 @@ define i64 @smin_i64(i64 %a, i64 %b) { ret i64 %c } -declare i8 @llvm.umax.i8(i8 %a, i8 %b) readnone - define i8 @umax_i8(i8 zeroext %a, i8 zeroext %b) { ; NOZBB-LABEL: umax_i8: ; NOZBB: # %bb.0: @@ -466,8 +448,6 @@ define i8 @umax_i8(i8 zeroext %a, i8 zeroext %b) { ret i8 %c } -declare i16 @llvm.umax.i16(i16 %a, i16 %b) readnone - define i16 @umax_i16(i16 zeroext %a, i16 zeroext %b) { ; NOZBB-LABEL: umax_i16: ; NOZBB: # %bb.0: @@ -506,8 +486,6 @@ define i16 @umax_i16(i16 zeroext %a, i16 zeroext %b) { ret i16 %c } -declare i32 @llvm.umax.i32(i32 %a, i32 %b) readnone - define signext i32 @umax_i32(i32 signext %a, i32 signext %b) { ; NOZBB-LABEL: umax_i32: ; NOZBB: # %bb.0: @@ -546,8 +524,6 @@ define signext i32 @umax_i32(i32 signext %a, i32 signext %b) { ret i32 %c } -declare i64 @llvm.umax.i64(i64 %a, i64 %b) readnone - define i64 @umax_i64(i64 %a, i64 %b) { ; RV32I-LABEL: umax_i64: ; RV32I: # %bb.0: @@ -632,8 +608,6 @@ define i64 @umax_i64(i64 %a, i64 %b) { ret i64 %c } -declare i8 @llvm.umin.i8(i8 %a, i8 %b) readnone - define zeroext i8 @umin_i8(i8 zeroext %a, i8 zeroext %b) { ; NOZBB-LABEL: umin_i8: ; NOZBB: # %bb.0: @@ -672,8 +646,6 @@ define zeroext i8 @umin_i8(i8 zeroext %a, i8 zeroext %b) { ret i8 %c } -declare i16 @llvm.umin.i16(i16 %a, i16 %b) readnone - define zeroext i16 @umin_i16(i16 zeroext %a, i16 zeroext %b) { ; NOZBB-LABEL: umin_i16: ; NOZBB: # %bb.0: @@ -712,8 +684,6 @@ define zeroext i16 @umin_i16(i16 zeroext %a, i16 zeroext %b) { ret i16 %c } -declare i32 @llvm.umin.i32(i32 %a, i32 %b) readnone - define signext i32 @umin_i32(i32 signext %a, i32 signext %b) { ; NOZBB-LABEL: umin_i32: ; NOZBB: # %bb.0: @@ -752,8 +722,6 @@ define signext i32 @umin_i32(i32 signext %a, i32 signext %b) { ret i32 %c } -declare i64 @llvm.umin.i64(i64 %a, i64 %b) readnone - define i64 @umin_i64(i64 %a, i64 %b) { ; RV32I-LABEL: umin_i64: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll b/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll index 395fc99ea0536..3803ac82458bd 100644 --- a/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll +++ b/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll @@ -2,7 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m < %s \ ; RUN: | FileCheck %s - @_ZTIi = external dso_local constant ptr declare void @_Z3fooiiiiiiiiiiPi(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3, i32 signext %4, i32 signext %5, i32 signext %6, i32 signext %7, i32 %8, i32 %9, i32 %10) @@ -84,8 +83,6 @@ ehcleanup: declare i32 @__gxx_personality_v0(...) -declare i32 @llvm.eh.typeid.for(ptr) - declare ptr @__cxa_begin_catch(ptr) declare void @__cxa_end_catch() diff --git a/llvm/test/CodeGen/RISCV/module-target-abi3.ll b/llvm/test/CodeGen/RISCV/module-target-abi3.ll index 5df750c0d4b6e..1d3fc8f9c9a90 100644 --- a/llvm/test/CodeGen/RISCV/module-target-abi3.ll +++ b/llvm/test/CodeGen/RISCV/module-target-abi3.ll @@ -2,6 +2,5 @@ ; CHECK: Flags: 0x2, single-float ABI -attributes #0 = { "target-features"="+f" } !llvm.module.flags = !{!0} !0 = !{i32 1, !"target-abi", !"ilp32f"} diff --git a/llvm/test/CodeGen/RISCV/neg-abs.ll b/llvm/test/CodeGen/RISCV/neg-abs.ll index f9ccf7637eee9..cddd1a0331cad 100644 --- a/llvm/test/CodeGen/RISCV/neg-abs.ll +++ b/llvm/test/CodeGen/RISCV/neg-abs.ll @@ -8,9 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefix=RV64ZBB -declare i32 @llvm.abs.i32(i32, i1 immarg) -declare i64 @llvm.abs.i64(i64, i1 immarg) - define i32 @neg_abs32(i32 %x) { ; RV32I-LABEL: neg_abs32: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/overflow-intrinsic-optimizations.ll b/llvm/test/CodeGen/RISCV/overflow-intrinsic-optimizations.ll index eca3df4861b90..5a4675fa52598 100644 --- a/llvm/test/CodeGen/RISCV/overflow-intrinsic-optimizations.ll +++ b/llvm/test/CodeGen/RISCV/overflow-intrinsic-optimizations.ll @@ -17,4 +17,3 @@ entry: ret i1 %7 } -declare { i64, i1 } @llvm.smul.with.overflow.i64(i64, i64) diff --git a/llvm/test/CodeGen/RISCV/pei-crash.ll b/llvm/test/CodeGen/RISCV/pei-crash.ll index 7778f9580cf69..169033b8036ac 100644 --- a/llvm/test/CodeGen/RISCV/pei-crash.ll +++ b/llvm/test/CodeGen/RISCV/pei-crash.ll @@ -23,6 +23,4 @@ entry: ret i64 %0 } -declare i64 @llvm.readcyclecounter() #1 - attributes #0 = { noinline nounwind optnone } diff --git a/llvm/test/CodeGen/RISCV/pr135206.ll b/llvm/test/CodeGen/RISCV/pr135206.ll index 75b11c373895b..1ca372d528ecf 100644 --- a/llvm/test/CodeGen/RISCV/pr135206.ll +++ b/llvm/test/CodeGen/RISCV/pr135206.ll @@ -3,7 +3,6 @@ %"buff" = type { [4096 x i64] } -declare void @llvm.memset.p0.i64(ptr, i8, i64, i1) declare void @bar() define i1 @foo() nounwind "probe-stack"="inline-asm" "target-features"="+v" { diff --git a/llvm/test/CodeGen/RISCV/pr56457.ll b/llvm/test/CodeGen/RISCV/pr56457.ll index 0dca858089167..5e46e56103761 100644 --- a/llvm/test/CodeGen/RISCV/pr56457.ll +++ b/llvm/test/CodeGen/RISCV/pr56457.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s -declare i15 @llvm.ctlz.i15(i15, i1) - define i15 @foo(i15 %x) nounwind { ; CHECK-LABEL: foo: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/pr69586.ll b/llvm/test/CodeGen/RISCV/pr69586.ll index 33b89a405d8e3..fa447c8f4fec1 100644 --- a/llvm/test/CodeGen/RISCV/pr69586.ll +++ b/llvm/test/CodeGen/RISCV/pr69586.ll @@ -2413,8 +2413,3 @@ define void @test(ptr %0, ptr %1, i64 %2) { ret void } -declare i64 @llvm.riscv.vsetvli.i64(i64, i64, i64) -declare @llvm.riscv.vle.nxv4i32.i64(, ptr, i64) -declare void @llvm.riscv.sf.vc.vv.se.i64.nxv4i32.nxv4i32.i64(i64, i64, , , i64) -declare @llvm.riscv.sf.vc.v.i.se.nxv4i32.i64.i64.i64(i64, i64, i64, i64) -declare void @llvm.riscv.vse.nxv4i32.i64(, ptr, i64) diff --git a/llvm/test/CodeGen/RISCV/pr92193.ll b/llvm/test/CodeGen/RISCV/pr92193.ll index 8c8398c4b45fa..8f197242db14a 100644 --- a/llvm/test/CodeGen/RISCV/pr92193.ll +++ b/llvm/test/CodeGen/RISCV/pr92193.ll @@ -18,4 +18,3 @@ entry: ret i16 %mul.0 } -declare i16 @llvm.vector.reduce.mul.v4i32(<4 x i16>) diff --git a/llvm/test/CodeGen/RISCV/prefetch.ll b/llvm/test/CodeGen/RISCV/prefetch.ll index bc46c60c053f3..ba33ed7ac1a59 100644 --- a/llvm/test/CodeGen/RISCV/prefetch.ll +++ b/llvm/test/CodeGen/RISCV/prefetch.ll @@ -10,8 +10,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zicbop,+zihintntl -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64ZICBOPZIHINTNTL %s -declare void @llvm.prefetch(ptr, i32, i32, i32) - define void @test_prefetch_read_locality_0(ptr %a) nounwind { ; RV32I-LABEL: test_prefetch_read_locality_0: ; RV32I: # %bb.0: @@ -264,7 +262,6 @@ define void @test_prefetch_instruction_locality_2(ptr %a) nounwind { ret void } - define void @test_prefetch_read_locality_3(ptr %a) nounwind { ; RV32I-LABEL: test_prefetch_read_locality_3: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/push-pop-popret.ll b/llvm/test/CodeGen/RISCV/push-pop-popret.ll index 5e949f8969e3e..0d289c606eff4 100644 --- a/llvm/test/CodeGen/RISCV/push-pop-popret.ll +++ b/llvm/test/CodeGen/RISCV/push-pop-popret.ll @@ -1132,9 +1132,6 @@ entry: ; Check that functions with varargs do not use save/restore code -declare void @llvm.va_start(ptr) -declare void @llvm.va_end(ptr) - define i32 @varargs(ptr %fmt, ...) { ; RV32IZCMP-LABEL: varargs: ; RV32IZCMP: # %bb.0: @@ -1579,8 +1576,6 @@ entry: ; Check that dynamic allocation calculations remain correct -declare ptr @llvm.stacksave() -declare void @llvm.stackrestore(ptr) declare void @notdead(ptr) define void @alloca(i32 %n) { @@ -4004,7 +3999,6 @@ define void @callee_no_irq() { } declare void @bar(ptr, ptr) -declare ptr @llvm.frameaddress.p0(i32 immarg) define i32 @use_fp(i32 %x) { ; RV32IZCMP-LABEL: use_fp: diff --git a/llvm/test/CodeGen/RISCV/readcyclecounter.ll b/llvm/test/CodeGen/RISCV/readcyclecounter.ll index c22417cd0390f..83509901b59a6 100644 --- a/llvm/test/CodeGen/RISCV/readcyclecounter.ll +++ b/llvm/test/CodeGen/RISCV/readcyclecounter.ll @@ -6,8 +6,6 @@ ; Verify that we lower @llvm.readcyclecounter() correctly. -declare i64 @llvm.readcyclecounter() - define i64 @test_builtin_readcyclecounter() nounwind { ; RV32I-LABEL: test_builtin_readcyclecounter: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/readsteadycounter.ll b/llvm/test/CodeGen/RISCV/readsteadycounter.ll index 19eab64530c66..464d03814ba5d 100644 --- a/llvm/test/CodeGen/RISCV/readsteadycounter.ll +++ b/llvm/test/CodeGen/RISCV/readsteadycounter.ll @@ -6,8 +6,6 @@ ; Verify that we lower @llvm.readsteadycounter() correctly. -declare i64 @llvm.readsteadycounter() - define i64 @test_builtin_readsteadycounter() nounwind { ; RV32I-LABEL: test_builtin_readsteadycounter: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/redundant-copy-from-tail-duplicate.ll b/llvm/test/CodeGen/RISCV/redundant-copy-from-tail-duplicate.ll index 15b5698c22e81..705009c6deb70 100644 --- a/llvm/test/CodeGen/RISCV/redundant-copy-from-tail-duplicate.ll +++ b/llvm/test/CodeGen/RISCV/redundant-copy-from-tail-duplicate.ll @@ -1,7 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s - define signext i32 @sum(ptr %a, i32 signext %n, i1 %prof.min.iters.check, %0, %1) { ; CHECK-LABEL: sum: ; CHECK: # %bb.0: # %entry @@ -47,4 +46,3 @@ for.end: ; preds = %for.body, %vector.p ret i32 %red.0.lcssa } -declare i32 @llvm.vp.reduce.add.nxv8i32(i32, , , i32) diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll index bfbe70685cbec..ae797de91b857 100644 --- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll +++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll @@ -128,11 +128,3 @@ entry: } declare void @func() -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(target("riscv.vector.tuple", , 2), ptr nocapture, , i64, i64) -declare @llvm.riscv.tuple.extract.v16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), i32) -declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64(, , , , i64, i64, i64 immarg) -declare @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(, , , , i64, i64 immarg) -declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64(, , , i64, i64) -declare @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64(, , , , i64, i64 immarg) -declare @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64(, , , , i64, i64, i64 immarg) -declare void @llvm.riscv.vse.nxv16f32.i64(, ptr nocapture, i64) #3 diff --git a/llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll b/llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll index aa63552eb4b63..649d91c2d747b 100644 --- a/llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll +++ b/llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll @@ -444,48 +444,6 @@ define @frem_f32( %in) { ret %1 } -declare @llvm.ceil.nxv2f64() -declare @llvm.ceil.nxv4f32() -declare @llvm.copysign.nxv2f64(, ) -declare @llvm.copysign.nxv4f32(, ) -declare @llvm.cos.nxv2f64() -declare @llvm.cos.nxv4f32() -declare @llvm.exp.nxv2f64() -declare @llvm.exp.nxv4f32() -declare @llvm.exp2.nxv2f64() -declare @llvm.exp2.nxv4f32() -declare @llvm.exp10.nxv2f64() -declare @llvm.exp10.nxv4f32() -declare @llvm.fabs.nxv2f64() -declare @llvm.fabs.nxv4f32() -declare @llvm.floor.nxv2f64() -declare @llvm.floor.nxv4f32() -declare @llvm.fma.nxv2f64(, , ) -declare @llvm.fma.nxv4f32(, , ) -declare @llvm.log.nxv2f64() -declare @llvm.log.nxv4f32() -declare @llvm.log10.nxv2f64() -declare @llvm.log10.nxv4f32() -declare @llvm.log2.nxv2f64() -declare @llvm.log2.nxv4f32() -declare @llvm.maxnum.nxv2f64(, ) -declare @llvm.maxnum.nxv4f32(, ) -declare @llvm.minnum.nxv2f64(, ) -declare @llvm.minnum.nxv4f32(, ) -declare @llvm.nearbyint.nxv2f64() -declare @llvm.nearbyint.nxv4f32() -declare @llvm.pow.nxv2f64(, ) -declare @llvm.pow.nxv4f32(, ) -declare @llvm.rint.nxv2f64() -declare @llvm.rint.nxv4f32() -declare @llvm.round.nxv2f64() -declare @llvm.round.nxv4f32() -declare @llvm.sin.nxv2f64() -declare @llvm.sin.nxv4f32() -declare @llvm.sqrt.nxv2f64() -declare @llvm.sqrt.nxv4f32() -declare @llvm.trunc.nxv2f64() -declare @llvm.trunc.nxv4f32() ;. ; CHECK: attributes #[[ATTR0]] = { "target-features"="+v" } ; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nocreateundeforpoison nofree nosync nounwind speculatable willreturn memory(none) "target-features"="+v" } diff --git a/llvm/test/CodeGen/RISCV/riscv-zihintpause.ll b/llvm/test/CodeGen/RISCV/riscv-zihintpause.ll index 6c6f5e20a8b48..46e06875efe7a 100644 --- a/llvm/test/CodeGen/RISCV/riscv-zihintpause.ll +++ b/llvm/test/CodeGen/RISCV/riscv-zihintpause.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zihintpause -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RVPAUSE -declare void @llvm.riscv.pause() - define void @test_pause() { ; RVPAUSE-LABEL: test_pause: ; RVPAUSE: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll index cf64650c964e8..ec1de5d3229fa 100644 --- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll +++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll @@ -1436,7 +1436,6 @@ define signext i32 @rotl_32_mask_shared(i32 signext %a, i32 signext %b, i32 sign %3 = add i32 %1, %2 ret i32 %3 } -declare i32 @llvm.fshl.i32(i32, i32, i32) define signext i64 @rotl_64_mask_shared(i64 signext %a, i64 signext %b, i64 signext %amt) nounwind { ; RV32I-LABEL: rotl_64_mask_shared: @@ -1602,7 +1601,6 @@ define signext i64 @rotl_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign %3 = add i64 %1, %2 ret i64 %3 } -declare i64 @llvm.fshl.i64(i64, i64, i64) define signext i32 @rotr_32_mask_shared(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { ; RV32I-LABEL: rotr_32_mask_shared: @@ -1664,7 +1662,6 @@ define signext i32 @rotr_32_mask_shared(i32 signext %a, i32 signext %b, i32 sign %3 = add i32 %1, %2 ret i32 %3 } -declare i32 @llvm.fshr.i32(i32, i32, i32) define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 signext %amt) nounwind { ; RV32I-LABEL: rotr_64_mask_shared: @@ -1828,7 +1825,6 @@ define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign %3 = add i64 %1, %2 ret i64 %3 } -declare i64 @llvm.fshr.i64(i64, i64, i64) define signext i32 @rotl_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { ; RV32I-LABEL: rotl_32_mask_multiple: diff --git a/llvm/test/CodeGen/RISCV/rv32p.ll b/llvm/test/CodeGen/RISCV/rv32p.ll index 4eee880a398a9..8a81846da9da2 100644 --- a/llvm/test/CodeGen/RISCV/rv32p.ll +++ b/llvm/test/CodeGen/RISCV/rv32p.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+experimental-p -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -declare i32 @llvm.ctlz.i32(i32, i1) - define i32 @ctlz_i32(i32 %a) nounwind { ; CHECK-LABEL: ctlz_i32: ; CHECK: # %bb.0: @@ -13,8 +11,6 @@ define i32 @ctlz_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.ctlz.i64(i64, i1) - define i64 @ctlz_i64(i64 %a) nounwind { ; CHECK-LABEL: ctlz_i64: ; CHECK: # %bb.0: @@ -32,8 +28,6 @@ define i64 @ctlz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define i32 @cttz_i32(i32 %a) nounwind { ; CHECK-LABEL: cttz_i32: ; CHECK: # %bb.0: @@ -53,8 +47,6 @@ define i32 @cttz_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.cttz.i64(i64, i1) - define i64 @cttz_i64(i64 %a) nounwind { ; CHECK-LABEL: cttz_i64: ; CHECK: # %bb.0: @@ -273,8 +265,6 @@ define i64 @maxu_i64(i64 %a, i64 %b) nounwind { ret i64 %cond } -declare i32 @llvm.abs.i32(i32, i1 immarg) - define i32 @abs_i32(i32 %x) { ; CHECK-LABEL: abs_i32: ; CHECK: # %bb.0: @@ -284,8 +274,6 @@ define i32 @abs_i32(i32 %x) { ret i32 %abs } -declare i64 @llvm.abs.i64(i64, i1 immarg) - define i64 @abs_i64(i64 %x) { ; CHECK-LABEL: abs_i64: ; CHECK: # %bb.0: @@ -322,8 +310,6 @@ define i64 @zexth_i64(i64 %a) nounwind { ret i64 %and } -declare i32 @llvm.bswap.i32(i32) - define i32 @bswap_i32(i32 %a) nounwind { ; CHECK-LABEL: bswap_i32: ; CHECK: # %bb.0: @@ -333,8 +319,6 @@ define i32 @bswap_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.bswap.i64(i64) - define i64 @bswap_i64(i64 %a) { ; CHECK-LABEL: bswap_i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll index aa02d46c34550..ff236c72922f4 100644 --- a/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll +++ b/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll @@ -6,8 +6,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+xtheadbb,+b -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=CHECK,RV32XTHEADBB,RV32XTHEADBB-B -declare i32 @llvm.ctlz.i32(i32, i1) - define i32 @ctlz_i32(i32 %a) nounwind { ; RV32I-LABEL: ctlz_i32: ; RV32I: # %bb.0: @@ -63,8 +61,6 @@ define i32 @ctlz_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.ctlz.i64(i64, i1) - define i64 @ctlz_i64(i64 %a) nounwind { ; RV32I-LABEL: ctlz_i64: ; RV32I: # %bb.0: @@ -171,8 +167,6 @@ define i64 @ctlz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define i32 @cttz_i32(i32 %a) nounwind { ; RV32I-LABEL: cttz_i32: ; RV32I: # %bb.0: @@ -220,8 +214,6 @@ define i32 @cttz_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.cttz.i64(i64, i1) - define i64 @cttz_i64(i64 %a) nounwind { ; RV32I-LABEL: cttz_i64: ; RV32I: # %bb.0: @@ -578,8 +570,6 @@ define i64 @zexth_i64(i64 %a) nounwind { ret i64 %and } -declare i32 @llvm.bswap.i32(i32) - define i32 @bswap_i32(i32 %a) nounwind { ; RV32I-LABEL: bswap_i32: ; RV32I: # %bb.0: @@ -609,8 +599,6 @@ define i32 @bswap_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.bswap.i64(i64) - define i64 @bswap_i64(i64 %a) { ; RV32I-LABEL: bswap_i64: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zbb-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbb-intrinsic.ll index 1a6c87465d026..b97277aeee708 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbb-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZBB -declare i32 @llvm.riscv.orc.b.i32(i32) - define i32 @orcb(i32 %a) nounwind { ; RV32ZBB-LABEL: orcb: ; RV32ZBB: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll index 7ab3d7c694568..d7f55f23a3c56 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll @@ -245,8 +245,6 @@ define i64 @inverted_masked_merge_i64(i64 %x, i64 %y, i64 %z) nounwind { ret i64 %not } -declare i32 @llvm.fshl.i32(i32, i32, i32) - define i32 @rol_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: rol_i32: ; RV32I: # %bb.0: @@ -267,8 +265,6 @@ define i32 @rol_i32(i32 %a, i32 %b) nounwind { ; This test is presented here in case future expansions of the Bitmanip ; extensions introduce instructions suitable for this pattern. -declare i64 @llvm.fshl.i64(i64, i64, i64) - define i64 @rol_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: rol_i64: ; CHECK: # %bb.0: @@ -297,8 +293,6 @@ define i64 @rol_i64(i64 %a, i64 %b) nounwind { ret i64 %or } -declare i32 @llvm.fshr.i32(i32, i32, i32) - define i32 @ror_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: ror_i32: ; RV32I: # %bb.0: @@ -319,8 +313,6 @@ define i32 @ror_i32(i32 %a, i32 %b) nounwind { ; This test is presented here in case future expansions of the Bitmanip ; extensions introduce instructions suitable for this pattern. -declare i64 @llvm.fshr.i64(i64, i64, i64) - define i64 @ror_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: ror_i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll index a1a843a7c1ba7..dad71ee5de066 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=CHECK,RV32ZBB -declare i32 @llvm.ctlz.i32(i32, i1) - define i32 @ctlz_i32(i32 %a) nounwind { ; RV32I-LABEL: ctlz_i32: ; RV32I: # %bb.0: @@ -56,8 +54,6 @@ define i32 @ctlz_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.ctlz.i64(i64, i1) - define i64 @ctlz_i64(i64 %a) nounwind { ; RV32I-LABEL: ctlz_i64: ; RV32I: # %bb.0: @@ -151,8 +147,6 @@ define i64 @ctlz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define i32 @cttz_i32(i32 %a) nounwind { ; RV32I-LABEL: cttz_i32: ; RV32I: # %bb.0: @@ -185,8 +179,6 @@ define i32 @cttz_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.cttz.i64(i64, i1) - define i64 @cttz_i64(i64 %a) nounwind { ; RV32I-LABEL: cttz_i64: ; RV32I: # %bb.0: @@ -258,8 +250,6 @@ define i64 @cttz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.ctpop.i32(i32) - define i32 @ctpop_i32(i32 %a) nounwind { ; RV32I-LABEL: ctpop_i32: ; RV32I: # %bb.0: @@ -370,8 +360,6 @@ define i1 @ctpop_i32_ne_one(i32 signext %a) nounwind { ret i1 %2 } -declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>) - define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind { ; RV32I-LABEL: ctpop_v2i32: ; RV32I: # %bb.0: @@ -484,8 +472,6 @@ define <2 x i1> @ctpop_v2i32_ne_one(<2 x i32> %a) nounwind { ret <2 x i1> %2 } -declare i64 @llvm.ctpop.i64(i64) - define i64 @ctpop_i64(i64 %a) nounwind { ; RV32I-LABEL: ctpop_i64: ; RV32I: # %bb.0: @@ -649,8 +635,6 @@ define i1 @ctpop_i64_ne_one(i64 %a) nounwind { ret i1 %2 } -declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) - define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind { ; RV32I-LABEL: ctpop_v2i64: ; RV32I: # %bb.0: @@ -1127,8 +1111,6 @@ define i64 @maxu_i64(i64 %a, i64 %b) nounwind { ret i64 %cond } -declare i32 @llvm.abs.i32(i32, i1 immarg) - define i32 @abs_i32(i32 %x) { ; RV32I-LABEL: abs_i32: ; RV32I: # %bb.0: @@ -1146,8 +1128,6 @@ define i32 @abs_i32(i32 %x) { ret i32 %abs } -declare i64 @llvm.abs.i64(i64, i1 immarg) - define i64 @abs_i64(i64 %x) { ; CHECK-LABEL: abs_i64: ; CHECK: # %bb.0: @@ -1195,8 +1175,6 @@ define i64 @zexth_i64(i64 %a) nounwind { ret i64 %and } -declare i32 @llvm.bswap.i32(i32) - define i32 @bswap_i32(i32 %a) nounwind { ; RV32I-LABEL: bswap_i32: ; RV32I: # %bb.0: @@ -1221,8 +1199,6 @@ define i32 @bswap_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.bswap.i64(i64) - define i64 @bswap_i64(i64 %a) { ; RV32I-LABEL: bswap_i64: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll index fcd1671bc04d0..0f53acdfc7bf3 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zbc -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZBC -declare i32 @llvm.riscv.clmulr.i32(i32 %a, i32 %b) - define i32 @clmul32r(i32 %a, i32 %b) nounwind { ; RV32ZBC-LABEL: clmul32r: ; RV32ZBC: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zbc-zbkc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbc-zbkc-intrinsic.ll index 51fd086e26dfe..b40e8c28db7ee 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbc-zbkc-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbc-zbkc-intrinsic.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zbkc -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZBC-ZBKC -declare i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b) - define i32 @clmul32(i32 %a, i32 %b) nounwind { ; RV32ZBC-ZBKC-LABEL: clmul32: ; RV32ZBC-ZBKC: # %bb.0: @@ -15,8 +13,6 @@ define i32 @clmul32(i32 %a, i32 %b) nounwind { ret i32 %tmp } -declare i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b) - define i32 @clmul32h(i32 %a, i32 %b) nounwind { ; RV32ZBC-ZBKC-LABEL: clmul32h: ; RV32ZBC-ZBKC: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zbkb-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbkb-intrinsic.ll index a4d76f8e82103..0b92498f221d9 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbkb-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbkb-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zbkb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZBKB -declare i32 @llvm.riscv.brev8(i32); - define i32 @brev8(i32 %a) nounwind { ; RV32ZBKB-LABEL: brev8: ; RV32ZBKB: # %bb.0: @@ -25,8 +23,6 @@ define zeroext i16 @brev8_knownbits(i16 zeroext %a) nounwind { ret i16 %trunc } -declare i32 @llvm.bswap.i32(i32) - define i32 @rev8_i32(i32 %a) nounwind { ; RV32ZBKB-LABEL: rev8_i32: ; RV32ZBKB: # %bb.0: @@ -36,8 +32,6 @@ define i32 @rev8_i32(i32 %a) nounwind { ret i32 %1 } -declare i32 @llvm.riscv.zip(i32); - define i32 @zip(i32 %a) nounwind { ; RV32ZBKB-LABEL: zip: ; RV32ZBKB: # %bb.0: @@ -47,8 +41,6 @@ define i32 @zip(i32 %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.unzip(i32); - define i32 @unzip(i32 %a) nounwind { ; RV32ZBKB-LABEL: unzip: ; RV32ZBKB: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zbkx-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbkx-intrinsic.ll index eb94c20e1f44e..11c00641bb66d 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbkx-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbkx-intrinsic.ll @@ -1,8 +1,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zbkx -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZBKX -declare i32 @llvm.riscv.xperm8.i32(i32 %a, i32 %b) - define i32 @xperm8(i32 %a, i32 %b) nounwind { ; RV32ZBKX-LABEL: xperm8: ; RV32ZBKX: # %bb.0: @@ -12,8 +10,6 @@ define i32 @xperm8(i32 %a, i32 %b) nounwind { ret i32 %tmp } -declare i32 @llvm.riscv.xperm4.i32(i32 %a, i32 %b) - define i32 @xperm4(i32 %a, i32 %b) nounwind { ; RV32ZBKX-LABEL: xperm4: ; RV32ZBKX: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll index 8e843fa47db69..f56ee0011e413 100644 --- a/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zimop -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZIMOP -declare i32 @llvm.riscv.mopr.i32(i32 %a, i32 %b) - define i32 @mopr0_32(i32 %a) nounwind { ; RV32ZIMOP-LABEL: mopr0_32: ; RV32ZIMOP: # %bb.0: @@ -22,8 +20,6 @@ define i32 @mopr31_32(i32 %a) nounwind { ret i32 %tmp } -declare i32 @llvm.riscv.moprr.i32(i32 %a, i32 %b, i32 %c) - define i32 @moprr0_32(i32 %a, i32 %b) nounwind { ; RV32ZIMOP-LABEL: moprr0_32: ; RV32ZIMOP: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic-autoupgrade.ll b/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic-autoupgrade.ll index 33c5839fde586..6e6d18490782c 100644 --- a/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic-autoupgrade.ll +++ b/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic-autoupgrade.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zknd -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZKND -declare i32 @llvm.riscv.aes32dsi(i32, i32, i8); - define i32 @aes32dsi(i32 %a, i32 %b) nounwind { ; RV32ZKND-LABEL: aes32dsi: ; RV32ZKND: # %bb.0: @@ -13,8 +11,6 @@ define i32 @aes32dsi(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.aes32dsmi(i32, i32, i8); - define i32 @aes32dsmi(i32 %a, i32 %b) nounwind { ; RV32ZKND-LABEL: aes32dsmi: ; RV32ZKND: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll index 9535d127f244b..2c69ddd684f80 100644 --- a/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zknd -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZKND -declare i32 @llvm.riscv.aes32dsi(i32, i32, i32); - define i32 @aes32dsi(i32 %a, i32 %b) nounwind { ; RV32ZKND-LABEL: aes32dsi: ; RV32ZKND: # %bb.0: @@ -13,8 +11,6 @@ define i32 @aes32dsi(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.aes32dsmi(i32, i32, i32); - define i32 @aes32dsmi(i32 %a, i32 %b) nounwind { ; RV32ZKND-LABEL: aes32dsmi: ; RV32ZKND: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic-autoupgrade.ll b/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic-autoupgrade.ll index 296641ca593e6..6cdad6682b70c 100644 --- a/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic-autoupgrade.ll +++ b/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic-autoupgrade.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zkne -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZKNE -declare i32 @llvm.riscv.aes32esi(i32, i32, i8); - define i32 @aes32esi(i32 %a, i32 %b) nounwind { ; RV32ZKNE-LABEL: aes32esi: ; RV32ZKNE: # %bb.0: @@ -13,8 +11,6 @@ define i32 @aes32esi(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.aes32esmi(i32, i32, i8); - define i32 @aes32esmi(i32 %a, i32 %b) nounwind { ; RV32ZKNE-LABEL: aes32esmi: ; RV32ZKNE: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll index 5859426823c8a..6dd22001f815e 100644 --- a/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zkne -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZKNE -declare i32 @llvm.riscv.aes32esi(i32, i32, i32); - define i32 @aes32esi(i32 %a, i32 %b) nounwind { ; RV32ZKNE-LABEL: aes32esi: ; RV32ZKNE: # %bb.0: @@ -13,8 +11,6 @@ define i32 @aes32esi(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.aes32esmi(i32, i32, i32); - define i32 @aes32esmi(i32 %a, i32 %b) nounwind { ; RV32ZKNE-LABEL: aes32esmi: ; RV32ZKNE: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll index 906285e320d12..a3846bcf69180 100644 --- a/llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zknh -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZKNH - -declare i32 @llvm.riscv.sha256sig0(i32); - define i32 @sha256sig0_i32(i32 %a) nounwind { ; RV32ZKNH-LABEL: sha256sig0_i32: ; RV32ZKNH: # %bb.0: @@ -14,8 +11,6 @@ define i32 @sha256sig0_i32(i32 %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha256sig1(i32); - define i32 @sha256sig1_i32(i32 %a) nounwind { ; RV32ZKNH-LABEL: sha256sig1_i32: ; RV32ZKNH: # %bb.0: @@ -25,8 +20,6 @@ define i32 @sha256sig1_i32(i32 %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha256sum0(i32); - define i32 @sha256sum0_i32(i32 %a) nounwind { ; RV32ZKNH-LABEL: sha256sum0_i32: ; RV32ZKNH: # %bb.0: @@ -36,8 +29,6 @@ define i32 @sha256sum0_i32(i32 %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha256sum1(i32); - define i32 @sha256sum1_i32(i32 %a) nounwind { ; RV32ZKNH-LABEL: sha256sum1_i32: ; RV32ZKNH: # %bb.0: @@ -47,8 +38,6 @@ define i32 @sha256sum1_i32(i32 %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha512sig0l(i32, i32); - define i32 @sha512sig0l(i32 %a, i32 %b) nounwind { ; RV32ZKNH-LABEL: sha512sig0l: ; RV32ZKNH: # %bb.0: @@ -58,8 +47,6 @@ define i32 @sha512sig0l(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha512sig0h(i32, i32); - define i32 @sha512sig0h(i32 %a, i32 %b) nounwind { ; RV32ZKNH-LABEL: sha512sig0h: ; RV32ZKNH: # %bb.0: @@ -69,8 +56,6 @@ define i32 @sha512sig0h(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha512sig1l(i32, i32); - define i32 @sha512sig1l(i32 %a, i32 %b) nounwind { ; RV32ZKNH-LABEL: sha512sig1l: ; RV32ZKNH: # %bb.0: @@ -80,8 +65,6 @@ define i32 @sha512sig1l(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha512sig1h(i32, i32); - define i32 @sha512sig1h(i32 %a, i32 %b) nounwind { ; RV32ZKNH-LABEL: sha512sig1h: ; RV32ZKNH: # %bb.0: @@ -91,8 +74,6 @@ define i32 @sha512sig1h(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha512sum0r(i32, i32); - define i32 @sha512sum0r(i32 %a, i32 %b) nounwind { ; RV32ZKNH-LABEL: sha512sum0r: ; RV32ZKNH: # %bb.0: @@ -102,8 +83,6 @@ define i32 @sha512sum0r(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha512sum1r(i32, i32); - define i32 @sha512sum1r(i32 %a, i32 %b) nounwind { ; RV32ZKNH-LABEL: sha512sum1r: ; RV32ZKNH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic-autoupgrade.ll b/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic-autoupgrade.ll index e8ecb4f3decd2..b435cb6a4cec0 100644 --- a/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic-autoupgrade.ll +++ b/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic-autoupgrade.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zksed -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZKSED -declare i32 @llvm.riscv.sm4ks.i32(i32, i32, i8); - define i32 @sm4ks_i32(i32 %a, i32 %b) nounwind { ; RV32ZKSED-LABEL: sm4ks_i32: ; RV32ZKSED: # %bb.0: @@ -13,8 +11,6 @@ define i32 @sm4ks_i32(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sm4ed.i32(i32, i32, i8); - define i32 @sm4ed_i32(i32 %a, i32 %b) nounwind { ; RV32ZKSED-LABEL: sm4ed_i32: ; RV32ZKSED: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll index e29c515cb8319..dde131e778d19 100644 --- a/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zksed -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZKSED -declare i32 @llvm.riscv.sm4ks(i32, i32, i32); - define i32 @sm4ks_i32(i32 %a, i32 %b) nounwind { ; RV32ZKSED-LABEL: sm4ks_i32: ; RV32ZKSED: # %bb.0: @@ -13,8 +11,6 @@ define i32 @sm4ks_i32(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sm4ed(i32, i32, i32); - define i32 @sm4ed_i32(i32 %a, i32 %b) nounwind { ; RV32ZKSED-LABEL: sm4ed_i32: ; RV32ZKSED: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll index df2703f996f96..c92e8a3358985 100644 --- a/llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zksh -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZKSH -declare i32 @llvm.riscv.sm3p0(i32); - define i32 @sm3p0_i32(i32 %a) nounwind { ; RV32ZKSH-LABEL: sm3p0_i32: ; RV32ZKSH: # %bb.0: @@ -13,8 +11,6 @@ define i32 @sm3p0_i32(i32 %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sm3p1(i32); - define i32 @sm3p1_i32(i32 %a) nounwind { ; RV32ZKSH-LABEL: sm3p1_i32: ; RV32ZKSH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64-double-convert.ll b/llvm/test/CodeGen/RISCV/rv64-double-convert.ll index a919452389c43..c310c105767ff 100644 --- a/llvm/test/CodeGen/RISCV/rv64-double-convert.ll +++ b/llvm/test/CodeGen/RISCV/rv64-double-convert.ll @@ -195,7 +195,6 @@ define i128 @fptosi_sat_f64_to_i128(double %a) nounwind { %1 = tail call i128 @llvm.fptosi.sat.i128.f64(double %a) ret i128 %1 } -declare i128 @llvm.fptosi.sat.i128.f64(double) define i128 @fptoui_sat_f64_to_i128(double %a) nounwind { ; RV64I-LABEL: fptoui_sat_f64_to_i128: @@ -286,4 +285,3 @@ define i128 @fptoui_sat_f64_to_i128(double %a) nounwind { %1 = tail call i128 @llvm.fptoui.sat.i128.f64(double %a) ret i128 %1 } -declare i128 @llvm.fptoui.sat.i128.f64(double) diff --git a/llvm/test/CodeGen/RISCV/rv64-float-convert.ll b/llvm/test/CodeGen/RISCV/rv64-float-convert.ll index 0af75a789f7a2..fcbbb8235c629 100644 --- a/llvm/test/CodeGen/RISCV/rv64-float-convert.ll +++ b/llvm/test/CodeGen/RISCV/rv64-float-convert.ll @@ -191,7 +191,6 @@ define i128 @fptosi_sat_f32_to_i128(float %a) nounwind { %1 = tail call i128 @llvm.fptosi.sat.i128.f32(float %a) ret i128 %1 } -declare i128 @llvm.fptosi.sat.i128.f32(float) define i128 @fptoui_sat_f32_to_i128(float %a) nounwind { ; RV64I-LABEL: fptoui_sat_f32_to_i128: @@ -278,4 +277,3 @@ define i128 @fptoui_sat_f32_to_i128(float %a) nounwind { %1 = tail call i128 @llvm.fptoui.sat.i128.f32(float %a) ret i128 %1 } -declare i128 @llvm.fptoui.sat.i128.f32(float) diff --git a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll index d8f3816b85485..7ff1d7684d1ee 100644 --- a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll +++ b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll @@ -269,7 +269,6 @@ define i128 @fptosi_sat_f16_to_i128(half %a) nounwind { %1 = tail call i128 @llvm.fptosi.sat.i128.f16(half %a) ret i128 %1 } -declare i128 @llvm.fptosi.sat.i128.f16(half) define i128 @fptoui_sat_f16_to_i128(half %a) nounwind { ; RV64I-LABEL: fptoui_sat_f16_to_i128: @@ -358,6 +357,5 @@ define i128 @fptoui_sat_f16_to_i128(half %a) nounwind { %1 = tail call i128 @llvm.fptoui.sat.i128.f16(half %a) ret i128 %1 } -declare i128 @llvm.fptoui.sat.i128.f16(half) ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; CHECK: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll b/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll index 0850cc65d81ee..a134fc89452fb 100644 --- a/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll +++ b/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll @@ -64,6 +64,3 @@ entry: ret void } -declare void @llvm.experimental.stackmap(i64, i32, ...) -declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...) -declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...) diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap-args.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap-args.ll index 9437ac02962b6..62e2a6702bb38 100644 --- a/llvm/test/CodeGen/RISCV/rv64-stackmap-args.ll +++ b/llvm/test/CodeGen/RISCV/rv64-stackmap-args.ll @@ -19,4 +19,3 @@ entry: ret void } -declare void @llvm.experimental.stackmap(i64, i32, ...) diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll index 1bba9cbfd03c4..c2e5e19a8ec6a 100644 --- a/llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll +++ b/llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll @@ -19,4 +19,3 @@ entry: ret void } -declare void @llvm.experimental.stackmap(i64, i32, ...) diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap-nops.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap-nops.ll index bb2caeac4a976..68a7702a3df68 100644 --- a/llvm/test/CodeGen/RISCV/rv64-stackmap-nops.ll +++ b/llvm/test/CodeGen/RISCV/rv64-stackmap-nops.ll @@ -14,4 +14,3 @@ entry: ret void } -declare void @llvm.experimental.stackmap(i64, i32, ...) diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll index 320a3aa94cd7d..92f509037139e 100644 --- a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll +++ b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll @@ -481,6 +481,3 @@ define void @floats(float %f, double %g, half %h, bfloat %i) { ret void } -declare void @llvm.experimental.stackmap(i64, i32, ...) -declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...) -declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...) diff --git a/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering-x1.ll b/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering-x1.ll index 3ba49653cd01e..83ee899894ae4 100644 --- a/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering-x1.ll +++ b/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering-x1.ll @@ -11,6 +11,4 @@ entry: ret void } - declare void @return_i1() -declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...) diff --git a/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering-x2.ll b/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering-x2.ll index 9c99f64bcacc0..16e9d2d68a936 100644 --- a/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering-x2.ll +++ b/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering-x2.ll @@ -16,8 +16,4 @@ entry: ret i1 %call2 } - declare i1 @return_i1() -declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...) -declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32, i32) -declare i1 @llvm.experimental.gc.result.i1(token) diff --git a/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering.ll b/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering.ll index 1a0be244c824c..53240c5cdb24d 100644 --- a/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering.ll +++ b/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering.ll @@ -267,17 +267,3 @@ entry: ret void } -declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...) -declare i1 @llvm.experimental.gc.result.i1(token) - -declare i32 @llvm.experimental.gc.result.i32(token) - -declare ptr @llvm.experimental.gc.result.p0(token) - -declare float @llvm.experimental.gc.result.f32(token) - -declare %struct @llvm.experimental.gc.result.struct(token) - - - -declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32, i32) diff --git a/llvm/test/CodeGen/RISCV/rv64-trampoline-cfi.ll b/llvm/test/CodeGen/RISCV/rv64-trampoline-cfi.ll index 8a338a855c863..06a818516c149 100644 --- a/llvm/test/CodeGen/RISCV/rv64-trampoline-cfi.ll +++ b/llvm/test/CodeGen/RISCV/rv64-trampoline-cfi.ll @@ -4,8 +4,6 @@ ; RUN: llc -O0 -mtriple=riscv64-unknown-linux-gnu -mattr=+experimental-zicfilp -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64-LINUX %s -declare void @llvm.init.trampoline(ptr, ptr, ptr) -declare ptr @llvm.adjust.trampoline(ptr) declare i64 @f(ptr nest, i64) define i64 @test0(i64 %n, ptr %p) nounwind { diff --git a/llvm/test/CodeGen/RISCV/rv64-trampoline.ll b/llvm/test/CodeGen/RISCV/rv64-trampoline.ll index c68fa59cd5780..2ff26e5274542 100644 --- a/llvm/test/CodeGen/RISCV/rv64-trampoline.ll +++ b/llvm/test/CodeGen/RISCV/rv64-trampoline.ll @@ -4,8 +4,6 @@ ; RUN: llc -O0 -mtriple=riscv64-unknown-linux-gnu -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64-LINUX %s -declare void @llvm.init.trampoline(ptr, ptr, ptr) -declare ptr @llvm.adjust.trampoline(ptr) declare i64 @f(ptr nest, i64) define i64 @test0(i64 %n, ptr %p) nounwind { diff --git a/llvm/test/CodeGen/RISCV/rv64d-double-convert-strict.ll b/llvm/test/CodeGen/RISCV/rv64d-double-convert-strict.ll index 8396d992164e4..cbb22803a7912 100644 --- a/llvm/test/CodeGen/RISCV/rv64d-double-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/rv64d-double-convert-strict.ll @@ -22,7 +22,6 @@ define i32 @aext_fptosi(double %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata) define signext i32 @sext_fptosi(double %a) nounwind strictfp { ; RV64ID-LABEL: sext_fptosi: @@ -69,7 +68,6 @@ define i32 @aext_fptoui(double %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata) define signext i32 @sext_fptoui(double %a) nounwind strictfp { ; RV64ID-LABEL: sext_fptoui: @@ -112,7 +110,6 @@ define double @uitofp_aext_i32_to_f64(i32 %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata, metadata) define double @uitofp_sext_i32_to_f64(i32 signext %a) nounwind strictfp { ; RV64ID-LABEL: uitofp_sext_i32_to_f64: @@ -155,7 +152,6 @@ define double @sitofp_aext_i32_to_f64(i32 %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata, metadata) define double @sitofp_sext_i32_to_f64(i32 signext %a) nounwind strictfp { ; RV64ID-LABEL: sitofp_sext_i32_to_f64: diff --git a/llvm/test/CodeGen/RISCV/rv64f-float-convert-strict.ll b/llvm/test/CodeGen/RISCV/rv64f-float-convert-strict.ll index 2b358ce075d60..af61656671292 100644 --- a/llvm/test/CodeGen/RISCV/rv64f-float-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/rv64f-float-convert-strict.ll @@ -22,7 +22,6 @@ define i32 @aext_fptosi(float %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata) define signext i32 @sext_fptosi(float %a) nounwind strictfp { ; RV64IF-LABEL: sext_fptosi: @@ -69,7 +68,6 @@ define i32 @aext_fptoui(float %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata) define signext i32 @sext_fptoui(float %a) nounwind strictfp { ; RV64IF-LABEL: sext_fptoui: @@ -112,7 +110,6 @@ define float @uitofp_aext_i32_to_f32(i32 %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata, metadata) define float @uitofp_sext_i32_to_f32(i32 signext %a) nounwind strictfp { ; RV64IF-LABEL: uitofp_sext_i32_to_f32: @@ -155,7 +152,6 @@ define float @sitofp_aext_i32_to_f32(i32 %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata) define float @sitofp_sext_i32_to_f32(i32 signext %a) nounwind strictfp { ; RV64IF-LABEL: sitofp_sext_i32_to_f32: diff --git a/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll b/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll index 6fdf2a3a939ce..5b9617cba08b4 100644 --- a/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll @@ -7,9 +7,6 @@ ; The test cases check that we use the si versions of the conversions from ; double. -declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata) -declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata) - define i32 @strict_fp64_to_ui32(double %a) nounwind strictfp { ; RV64I-LABEL: strict_fp64_to_ui32: ; RV64I: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll b/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll index b645b621c75c6..c5ecb68b8a270 100644 --- a/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll @@ -5,9 +5,6 @@ ; The test cases check that we use the si versions of the conversions from ; double. -declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata) -declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata) - define i32 @strict_fp32_to_ui32(float %a) nounwind strictfp { ; RV64I-LABEL: strict_fp32_to_ui32: ; RV64I: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rv64p.ll b/llvm/test/CodeGen/RISCV/rv64p.ll index f937f44f13320..ec242294c3036 100644 --- a/llvm/test/CodeGen/RISCV/rv64p.ll +++ b/llvm/test/CodeGen/RISCV/rv64p.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-p -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -declare i32 @llvm.ctlz.i32(i32, i1) - define signext i32 @ctlz_i32(i32 signext %a) nounwind { ; CHECK-LABEL: ctlz_i32: ; CHECK: # %bb.0: @@ -66,8 +64,6 @@ define i32 @ctlz_lshr_i32(i32 signext %a) { ret i32 %2 } -declare i64 @llvm.ctlz.i64(i64, i1) - define i64 @ctlz_i64(i64 %a) nounwind { ; CHECK-LABEL: ctlz_i64: ; CHECK: # %bb.0: @@ -77,8 +73,6 @@ define i64 @ctlz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define signext i32 @cttz_i32(i32 signext %a) nounwind { ; CHECK-LABEL: cttz_i32: ; CHECK: # %bb.0: @@ -151,8 +145,6 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind { ret i32 %4 } -declare i64 @llvm.cttz.i64(i64, i1) - define i64 @cttz_i64(i64 %a) nounwind { ; CHECK-LABEL: cttz_i64: ; CHECK: # %bb.0: @@ -292,8 +284,6 @@ define i64 @maxu_i64(i64 %a, i64 %b) nounwind { ret i64 %cond } -declare i32 @llvm.abs.i32(i32, i1 immarg) - define i32 @abs_i32(i32 %x) { ; CHECK-LABEL: abs_i32: ; CHECK: # %bb.0: @@ -312,8 +302,6 @@ define signext i32 @abs_i32_sext(i32 signext %x) { ret i32 %abs } -declare i64 @llvm.abs.i64(i64, i1 immarg) - define i64 @abs_i64(i64 %x) { ; CHECK-LABEL: abs_i64: ; CHECK: # %bb.0: @@ -323,8 +311,6 @@ define i64 @abs_i64(i64 %x) { ret i64 %abs } -declare i32 @llvm.bswap.i32(i32) - define signext i32 @bswap_i32(i32 signext %a) nounwind { ; CHECK-LABEL: bswap_i32: ; CHECK: # %bb.0: @@ -348,8 +334,6 @@ define void @bswap_i32_nosext(i32 signext %a, ptr %x) nounwind { ret void } -declare i64 @llvm.bswap.i64(i64) - define i64 @bswap_i64(i64 %a) { ; CHECK-LABEL: bswap_i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll index c5707987408f7..c62fb0ae63555 100644 --- a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll @@ -6,8 +6,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+xtheadbb,+b -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=CHECK,RV64XTHEADBB,RV64XTHEADBB-B -declare i32 @llvm.ctlz.i32(i32, i1) - define signext i32 @ctlz_i32(i32 signext %a) nounwind { ; RV64I-LABEL: ctlz_i32: ; RV64I: # %bb.0: @@ -321,8 +319,6 @@ define i32 @ctlz_lshr_i32(i32 signext %a) { ret i32 %2 } -declare i64 @llvm.ctlz.i64(i64, i1) - define i64 @ctlz_i64(i64 %a) nounwind { ; RV64I-LABEL: ctlz_i64: ; RV64I: # %bb.0: @@ -388,8 +384,6 @@ define i64 @ctlz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define signext i32 @cttz_i32(i32 signext %a) nounwind { ; RV64I-LABEL: cttz_i32: ; RV64I: # %bb.0: @@ -625,8 +619,6 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind { ret i32 %4 } -declare i64 @llvm.cttz.i64(i64, i1) - define i64 @cttz_i64(i64 %a) nounwind { ; RV64I-LABEL: cttz_i64: ; RV64I: # %bb.0: @@ -1020,8 +1012,6 @@ define i64 @zext_i64_srliw(i64 %a) nounwind { ret i64 %and } -declare i32 @llvm.bswap.i32(i32) - define signext i32 @bswap_i32(i32 signext %a) nounwind { ; RV64I-LABEL: bswap_i32: ; RV64I: # %bb.0: @@ -1074,8 +1064,6 @@ define void @bswap_i32_nosext(i32 signext %a, ptr %x) nounwind { ret void } -declare i64 @llvm.bswap.i64(i64) - define i64 @bswap_i64(i64 %a) { ; RV64I-LABEL: bswap_i64: ; RV64I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zbb-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbb-intrinsic.ll index 500d51be80a66..52415a31effad 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbb-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZBB -declare i32 @llvm.riscv.orc.b.i32(i32) - define signext i32 @orcb32(i32 signext %a) nounwind { ; RV64ZBB-LABEL: orcb32: ; RV64ZBB: # %bb.0: @@ -43,8 +41,6 @@ define signext i32 @orcb32_knownbits(i32 signext %a) nounwind { ret i32 %tmp5 } -declare i64 @llvm.riscv.orc.b.i64(i64) - define i64 @orcb64(i64 %a) nounwind { ; RV64ZBB-LABEL: orcb64: ; RV64ZBB: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll index f2c95f855e178..4460773290b7e 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll @@ -219,8 +219,6 @@ define i64 @inverted_masked_merge_i64(i64 %x, i64 %y, i64 %z) nounwind { ret i64 %not } -declare i32 @llvm.fshl.i32(i32, i32, i32) - define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: rol_i32: ; RV64I: # %bb.0: @@ -278,8 +276,6 @@ define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind { ret i32 %1 } -declare i64 @llvm.fshl.i64(i64, i64, i64) - define i64 @rol_i64(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: rol_i64: ; RV64I: # %bb.0: @@ -297,8 +293,6 @@ define i64 @rol_i64(i64 %a, i64 %b) nounwind { ret i64 %or } -declare i32 @llvm.fshr.i32(i32, i32, i32) - define signext i32 @ror_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: ror_i32: ; RV64I: # %bb.0: @@ -356,8 +350,6 @@ define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind { ret i32 %1 } -declare i64 @llvm.fshr.i64(i64, i64, i64) - define i64 @ror_i64(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: ror_i64: ; RV64I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll index d8b7bfcbceb27..b3581459c2622 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=CHECK,RV64ZBB -declare i32 @llvm.ctlz.i32(i32, i1) - define signext i32 @ctlz_i32(i32 signext %a) nounwind { ; RV64I-LABEL: ctlz_i32: ; RV64I: # %bb.0: @@ -278,8 +276,6 @@ define i32 @ctlz_lshr_i32(i32 signext %a) { ret i32 %2 } -declare i64 @llvm.ctlz.i64(i64, i1) - define i64 @ctlz_i64(i64 %a) nounwind { ; RV64I-LABEL: ctlz_i64: ; RV64I: # %bb.0: @@ -340,8 +336,6 @@ define i64 @ctlz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define signext i32 @cttz_i32(i32 signext %a) nounwind { ; RV64I-LABEL: cttz_i32: ; RV64I: # %bb.0: @@ -526,8 +520,6 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind { ret i32 %4 } -declare i64 @llvm.cttz.i64(i64, i1) - define i64 @cttz_i64(i64 %a) nounwind { ; RV64I-LABEL: cttz_i64: ; RV64I: # %bb.0: @@ -560,8 +552,6 @@ define i64 @cttz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.ctpop.i32(i32) - define signext i32 @ctpop_i32(i32 signext %a) nounwind { ; RV64I-LABEL: ctpop_i32: ; RV64I: # %bb.0: @@ -709,8 +699,6 @@ define signext i32 @ctpop_i32_load(ptr %p) nounwind { ret i32 %1 } -declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>) - define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind { ; RV64I-LABEL: ctpop_v2i32: ; RV64I: # %bb.0: @@ -831,8 +819,6 @@ define <2 x i1> @ctpop_v2i32_ne_one(<2 x i32> %a) nounwind { ret <2 x i1> %2 } -declare i64 @llvm.ctpop.i64(i64) - define i64 @ctpop_i64(i64 %a) nounwind { ; RV64I-LABEL: ctpop_i64: ; RV64I: # %bb.0: @@ -951,8 +937,6 @@ define i1 @ctpop_i64_ne_one(i64 %a) nounwind { ret i1 %2 } -declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) - define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind { ; RV64I-LABEL: ctpop_v2i64: ; RV64I: # %bb.0: @@ -1283,8 +1267,6 @@ define i64 @maxu_i64(i64 %a, i64 %b) nounwind { ret i64 %cond } -declare i32 @llvm.abs.i32(i32, i1 immarg) - define i32 @abs_i32(i32 %x) { ; RV64I-LABEL: abs_i32: ; RV64I: # %bb.0: @@ -1320,8 +1302,6 @@ define signext i32 @abs_i32_sext(i32 signext %x) { ret i32 %abs } -declare i64 @llvm.abs.i64(i64, i1 immarg) - define i64 @abs_i64(i64 %x) { ; RV64I-LABEL: abs_i64: ; RV64I: # %bb.0: @@ -1369,8 +1349,6 @@ define i64 @zexth_i64(i64 %a) nounwind { ret i64 %and } -declare i32 @llvm.bswap.i32(i32) - define signext i32 @bswap_i32(i32 signext %a) nounwind { ; RV64I-LABEL: bswap_i32: ; RV64I: # %bb.0: @@ -1425,8 +1403,6 @@ define void @bswap_i32_nosext(i32 signext %a, ptr %x) nounwind { ret void } -declare i64 @llvm.bswap.i64(i64) - define i64 @bswap_i64(i64 %a) { ; RV64I-LABEL: bswap_i64: ; RV64I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll index ef42c15b6b986..9dbbcb43f5e95 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbc -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZBC -declare i64 @llvm.riscv.clmulr.i64(i64 %a, i64 %b) - define i64 @clmul64r(i64 %a, i64 %b) nounwind { ; RV64ZBC-LABEL: clmul64r: ; RV64ZBC: # %bb.0: @@ -13,8 +11,6 @@ define i64 @clmul64r(i64 %a, i64 %b) nounwind { ret i64 %tmp } -declare i32 @llvm.riscv.clmulr.i32(i32 %a, i32 %b) - define signext i32 @clmul32r(i32 signext %a, i32 signext %b) nounwind { ; RV64ZBC-LABEL: clmul32r: ; RV64ZBC: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll index aa9e89bc20953..83bcd57c7094f 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbkc -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZBC-ZBKC -declare i64 @llvm.riscv.clmul.i64(i64 %a, i64 %b) - define i64 @clmul64(i64 %a, i64 %b) nounwind { ; RV64ZBC-ZBKC-LABEL: clmul64: ; RV64ZBC-ZBKC: # %bb.0: @@ -15,8 +13,6 @@ define i64 @clmul64(i64 %a, i64 %b) nounwind { ret i64 %tmp } -declare i64 @llvm.riscv.clmulh.i64(i64 %a, i64 %b) - define i64 @clmul64h(i64 %a, i64 %b) nounwind { ; RV64ZBC-ZBKC-LABEL: clmul64h: ; RV64ZBC-ZBKC: # %bb.0: @@ -26,8 +22,6 @@ define i64 @clmul64h(i64 %a, i64 %b) nounwind { ret i64 %tmp } -declare i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b) - define signext i32 @clmul32(i32 signext %a, i32 signext %b) nounwind { ; RV64ZBC-ZBKC-LABEL: clmul32: ; RV64ZBC-ZBKC: # %bb.0: @@ -38,8 +32,6 @@ define signext i32 @clmul32(i32 signext %a, i32 signext %b) nounwind { ret i32 %tmp } -declare i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b) - define signext i32 @clmul32h(i32 signext %a, i32 signext %b) nounwind { ; RV64ZBC-ZBKC-LABEL: clmul32h: ; RV64ZBC-ZBKC: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbkb-intrinsic.ll index 3169f65f64671..cd9fc2adc80a6 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbkb-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbkb-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZBKB -declare i64 @llvm.riscv.brev8.i64(i64) - define i64 @brev8(i64 %a) nounwind { ; RV64ZBKB-LABEL: brev8: ; RV64ZBKB: # %bb.0: @@ -25,8 +23,6 @@ define zeroext i16 @brev8_knownbits(i16 zeroext %a) nounwind { ret i16 %trunc } -declare i64 @llvm.bswap.i64(i64) - define i64 @rev8_i64(i64 %a) { ; RV64ZBKB-LABEL: rev8_i64: ; RV64ZBKB: # %bb.0: @@ -36,8 +32,6 @@ define i64 @rev8_i64(i64 %a) { ret i64 %1 } -declare i32 @llvm.riscv.brev8.i32(i32) - define signext i32 @brev8_i32(i32 signext %a) nounwind { ; RV64ZBKB-LABEL: brev8_i32: ; RV64ZBKB: # %bb.0: @@ -60,8 +54,6 @@ define zeroext i16 @brev8_i32_knownbits(i16 zeroext %a) nounwind { ret i16 %trunc } -declare i32 @llvm.bswap.i32(i32) - define signext i32 @rev8_i32(i32 signext %a) { ; RV64ZBKB-LABEL: rev8_i32: ; RV64ZBKB: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zbkx-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbkx-intrinsic.ll index f4186606c14f9..72afe21aa4e23 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbkx-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbkx-intrinsic.ll @@ -1,8 +1,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbkx -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZBKX -declare i64 @llvm.riscv.xperm8.i64(i64 %a, i64 %b) - define i64 @xperm8(i64 %a, i64 %b) nounwind { ; RV64ZBKX-LABEL: xperm8: ; RV64ZBKX: # %bb.0: @@ -12,8 +10,6 @@ define i64 @xperm8(i64 %a, i64 %b) nounwind { ret i64 %tmp } -declare i64 @llvm.riscv.xperm4.i64(i64 %a, i64 %b) - define i64 @xperm4(i64 %a, i64 %b) nounwind { ; RV64ZBKX-LABEL: xperm4: ; RV64ZBKX: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll b/llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll index afc41fe86b838..12767f64048b4 100644 --- a/llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll @@ -24,7 +24,6 @@ define i32 @aext_fptosi(half %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptosi.i32.f16(half, metadata) define signext i32 @sext_fptosi(half %a) nounwind strictfp { ; RV64IZFH-LABEL: sext_fptosi: @@ -71,7 +70,6 @@ define i32 @aext_fptoui(half %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptoui.i32.f16(half, metadata) define signext i32 @sext_fptoui(half %a) nounwind strictfp { ; RV64IZFH-LABEL: sext_fptoui: @@ -114,7 +112,6 @@ define half @uitofp_aext_i32_to_f16(i32 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata, metadata) define half @uitofp_sext_i32_to_f16(i32 signext %a) nounwind strictfp { ; RV64IZFH-LABEL: uitofp_sext_i32_to_f16: @@ -157,7 +154,6 @@ define half @sitofp_aext_i32_to_f16(i32 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata, metadata) define half @sitofp_sext_i32_to_f16(i32 signext %a) nounwind strictfp { ; RV64IZFH-LABEL: sitofp_sext_i32_to_f16: diff --git a/llvm/test/CodeGen/RISCV/rv64zfh-half-intrinsics.ll b/llvm/test/CodeGen/RISCV/rv64zfh-half-intrinsics.ll index a453e3b0f1c53..633634fab2e13 100644 --- a/llvm/test/CodeGen/RISCV/rv64zfh-half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rv64zfh-half-intrinsics.ll @@ -14,8 +14,6 @@ ; These intrinsics require half and i64 to be legal types. -declare i64 @llvm.llrint.i64.f16(half) - define i64 @llrint_f16(half %a) nounwind { ; RV64IZFH-LABEL: llrint_f16: ; RV64IZFH: # %bb.0: @@ -40,8 +38,6 @@ define i64 @llrint_f16(half %a) nounwind { ret i64 %1 } -declare i64 @llvm.llround.i64.f16(half) - define i64 @llround_f16(half %a) nounwind { ; RV64IZFH-LABEL: llround_f16: ; RV64IZFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll index 41d66382679f1..91f8af9d92bba 100644 --- a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll @@ -23,7 +23,6 @@ define i32 @aext_fptosi(half %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptosi.i32.f16(half, metadata) define signext i32 @sext_fptosi(half %a) nounwind strictfp { ; RV64IZFHMIN-LABEL: sext_fptosi: @@ -76,7 +75,6 @@ define i32 @aext_fptoui(half %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptoui.i32.f16(half, metadata) define signext i32 @sext_fptoui(half %a) nounwind strictfp { ; RV64IZFHMIN-LABEL: sext_fptoui: @@ -125,7 +123,6 @@ define half @uitofp_aext_i32_to_f16(i32 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata, metadata) define half @uitofp_sext_i32_to_f16(i32 signext %a) nounwind strictfp { ; RV64IZFHMIN-LABEL: uitofp_sext_i32_to_f16: @@ -174,7 +171,6 @@ define half @sitofp_aext_i32_to_f16(i32 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata, metadata) define half @sitofp_sext_i32_to_f16(i32 signext %a) nounwind strictfp { ; RV64IZFHMIN-LABEL: sitofp_sext_i32_to_f16: diff --git a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll index 7d8f2b03b6721..266d7b01fe5f1 100644 --- a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll @@ -14,8 +14,6 @@ ; These intrinsics require half and i64 to be legal types. -declare i64 @llvm.llrint.i64.f16(half) - define i64 @llrint_f16(half %a) nounwind { ; CHECKIZFHMIN-LABEL: llrint_f16: ; CHECKIZFHMIN: # %bb.0: @@ -32,8 +30,6 @@ define i64 @llrint_f16(half %a) nounwind { ret i64 %1 } -declare i64 @llvm.llround.i64.f16(half) - define i64 @llround_f16(half %a) nounwind { ; CHECKIZFHMIN-LABEL: llround_f16: ; CHECKIZFHMIN: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll index a407fe552ff74..fbcfe34a835f3 100644 --- a/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zimop -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZIMOP -declare i64 @llvm.riscv.mopr.i64(i64 %a, i64 %b) - define i64 @mopr0_64(i64 %a) nounwind { ; RV64ZIMOP-LABEL: mopr0_64: ; RV64ZIMOP: # %bb.0: @@ -22,8 +20,6 @@ define i64 @mopr31_64(i64 %a) nounwind { ret i64 %tmp } -declare i64 @llvm.riscv.moprr.i64(i64 %a, i64 %b, i64 %c) - define i64 @moprr0_64(i64 %a, i64 %b) nounwind { ; RV64ZIMOP-LABEL: moprr0_64: ; RV64ZIMOP: # %bb.0: @@ -42,8 +38,6 @@ define i64 @moprr7_64(i64 %a, i64 %b) nounwind { ret i64 %tmp } -declare i32 @llvm.riscv.mopr.i32(i32 %a, i32 %b) - define signext i32 @mopr0_32(i32 signext %a) nounwind { ; RV64ZIMOP-LABEL: mopr0_32: ; RV64ZIMOP: # %bb.0: @@ -64,8 +58,6 @@ define signext i32 @mopr31_32(i32 signext %a) nounwind { ret i32 %tmp } -declare i32 @llvm.riscv.moprr.i32(i32 %a, i32 %b, i32 %c) - define signext i32 @moprr0_32(i32 signext %a, i32 signext %b) nounwind { ; RV64ZIMOP-LABEL: moprr0_32: ; RV64ZIMOP: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zknd-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zknd-intrinsic.ll index ea922ed6775a0..8e9511c9db31b 100644 --- a/llvm/test/CodeGen/RISCV/rv64zknd-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zknd-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zknd -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKND -declare i64 @llvm.riscv.aes64ds(i64, i64); - define i64 @aes64ds(i64 %a, i64 %b) nounwind { ; RV64ZKND-LABEL: aes64ds: ; RV64ZKND: # %bb.0: @@ -13,8 +11,6 @@ define i64 @aes64ds(i64 %a, i64 %b) nounwind { ret i64 %val } -declare i64 @llvm.riscv.aes64dsm(i64, i64); - define i64 @aes64dsm(i64 %a, i64 %b) nounwind { ; RV64ZKND-LABEL: aes64dsm: ; RV64ZKND: # %bb.0: @@ -24,8 +20,6 @@ define i64 @aes64dsm(i64 %a, i64 %b) nounwind { ret i64 %val } -declare i64 @llvm.riscv.aes64im(i64); - define i64 @aes64im(i64 %a) nounwind { ; RV64ZKND-LABEL: aes64im: ; RV64ZKND: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zknd-zkne-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zknd-zkne-intrinsic.ll index 075097037a5b3..7035f6e38d72f 100644 --- a/llvm/test/CodeGen/RISCV/rv64zknd-zkne-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zknd-zkne-intrinsic.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zkne -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKND-ZKNE -declare i64 @llvm.riscv.aes64ks2(i64, i64); - define i64 @aes64ks2(i64 %a, i64 %b) nounwind { ; RV64ZKND-ZKNE-LABEL: aes64ks2: ; RV64ZKND-ZKNE: # %bb.0: @@ -15,8 +13,6 @@ define i64 @aes64ks2(i64 %a, i64 %b) nounwind { ret i64 %val } -declare i64 @llvm.riscv.aes64ks1i(i64, i32); - define i64 @aes64ks1i(i64 %a) nounwind { ; RV64ZKND-ZKNE-LABEL: aes64ks1i: ; RV64ZKND-ZKNE: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zkne-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zkne-intrinsic.ll index eee03a0c4469b..f1280c2de4af8 100644 --- a/llvm/test/CodeGen/RISCV/rv64zkne-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zkne-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zkne -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKNE -declare i64 @llvm.riscv.aes64es(i64, i64); - define i64 @aes64es(i64 %a, i64 %b) nounwind { ; RV64ZKNE-LABEL: aes64es: ; RV64ZKNE: # %bb.0: @@ -13,8 +11,6 @@ define i64 @aes64es(i64 %a, i64 %b) nounwind { ret i64 %val } -declare i64 @llvm.riscv.aes64esm(i64, i64); - define i64 @aes64esm(i64 %a, i64 %b) nounwind { ; RV64ZKNE-LABEL: aes64esm: ; RV64ZKNE: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic-autoupgrade.ll b/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic-autoupgrade.ll index b96524b3294fc..72051e9af7f4d 100644 --- a/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic-autoupgrade.ll +++ b/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic-autoupgrade.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zknh -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKNH - -declare i64 @llvm.riscv.sha256sig0.i64(i64); - define i64 @sha256sig0_i64(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha256sig0_i64: ; RV64ZKNH: # %bb.0: @@ -14,8 +11,6 @@ define i64 @sha256sig0_i64(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha256sig1.i64(i64); - define i64 @sha256sig1_i64(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha256sig1_i64: ; RV64ZKNH: # %bb.0: @@ -25,8 +20,6 @@ define i64 @sha256sig1_i64(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha256sum0.i64(i64); - define i64 @sha256sum0_i64(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha256sum0_i64: ; RV64ZKNH: # %bb.0: @@ -36,8 +29,6 @@ define i64 @sha256sum0_i64(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha256sum1.i64(i64); - define i64 @sha256sum1_i64(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha256sum1_i64: ; RV64ZKNH: # %bb.0: @@ -47,8 +38,6 @@ define i64 @sha256sum1_i64(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha512sig0(i64); - define i64 @sha512sig0(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha512sig0: ; RV64ZKNH: # %bb.0: @@ -58,8 +47,6 @@ define i64 @sha512sig0(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha512sig1(i64); - define i64 @sha512sig1(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha512sig1: ; RV64ZKNH: # %bb.0: @@ -69,8 +56,6 @@ define i64 @sha512sig1(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha512sum0(i64); - define i64 @sha512sum0(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha512sum0: ; RV64ZKNH: # %bb.0: @@ -80,8 +65,6 @@ define i64 @sha512sum0(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha512sum1(i64); - define i64 @sha512sum1(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha512sum1: ; RV64ZKNH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll index 866995edbfa47..b563600724392 100644 --- a/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zknh -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKNH - -declare i32 @llvm.riscv.sha256sig0(i32); - define signext i32 @sha256sig0_i32(i32 signext %a) nounwind { ; RV64ZKNH-LABEL: sha256sig0_i32: ; RV64ZKNH: # %bb.0: @@ -14,8 +11,6 @@ define signext i32 @sha256sig0_i32(i32 signext %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha256sig1(i32); - define signext i32 @sha256sig1_i32(i32 signext %a) nounwind { ; RV64ZKNH-LABEL: sha256sig1_i32: ; RV64ZKNH: # %bb.0: @@ -25,8 +20,6 @@ define signext i32 @sha256sig1_i32(i32 signext %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha256sum0(i32); - define signext i32 @sha256sum0_i32(i32 signext %a) nounwind { ; RV64ZKNH-LABEL: sha256sum0_i32: ; RV64ZKNH: # %bb.0: @@ -36,8 +29,6 @@ define signext i32 @sha256sum0_i32(i32 signext %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha256sum1(i32); - define signext i32 @sha256sum1_i32(i32 signext %a) nounwind { ; RV64ZKNH-LABEL: sha256sum1_i32: ; RV64ZKNH: # %bb.0: @@ -47,8 +38,6 @@ define signext i32 @sha256sum1_i32(i32 signext %a) nounwind { ret i32 %val } -declare i64 @llvm.riscv.sha512sig0(i64); - define i64 @sha512sig0(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha512sig0: ; RV64ZKNH: # %bb.0: @@ -58,8 +47,6 @@ define i64 @sha512sig0(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha512sig1(i64); - define i64 @sha512sig1(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha512sig1: ; RV64ZKNH: # %bb.0: @@ -69,8 +56,6 @@ define i64 @sha512sig1(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha512sum0(i64); - define i64 @sha512sum0(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha512sum0: ; RV64ZKNH: # %bb.0: @@ -80,8 +65,6 @@ define i64 @sha512sum0(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha512sum1(i64); - define i64 @sha512sum1(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha512sum1: ; RV64ZKNH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade.ll b/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade.ll index 2fa7601906067..ad6ac276f0d5b 100644 --- a/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade.ll +++ b/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zksed -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKSED -declare i64 @llvm.riscv.sm4ks.i64(i64, i64, i8); - define i64 @sm4ks_i64(i64 %a, i64 %b) nounwind { ; RV64ZKSED-LABEL: sm4ks_i64: ; RV64ZKSED: # %bb.0: @@ -13,8 +11,6 @@ define i64 @sm4ks_i64(i64 %a, i64 %b) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sm4ed.i64(i64, i64, i8); - define i64 @sm4ed_i64(i64 %a, i64 %b) nounwind { ; RV64ZKSED-LABEL: sm4ed_i64: ; RV64ZKSED: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade2.ll b/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade2.ll index c942ff884450b..0defffd2e899d 100644 --- a/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade2.ll +++ b/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade2.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zksed -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKSED -declare i64 @llvm.riscv.sm4ks.i64(i64, i64, i32); - define i64 @sm4ks_i64(i64 %a, i64 %b) nounwind { ; RV64ZKSED-LABEL: sm4ks_i64: ; RV64ZKSED: # %bb.0: @@ -13,8 +11,6 @@ define i64 @sm4ks_i64(i64 %a, i64 %b) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sm4ed.i64(i64, i64, i32); - define i64 @sm4ed_i64(i64 %a, i64 %b) nounwind { ; RV64ZKSED-LABEL: sm4ed_i64: ; RV64ZKSED: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll index bc7807350fcb2..90864600599ba 100644 --- a/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zksed -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKSED -declare i32 @llvm.riscv.sm4ks(i32, i32, i32); - define signext i32 @sm4ks_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64ZKSED-LABEL: sm4ks_i32: ; RV64ZKSED: # %bb.0: @@ -13,8 +11,6 @@ define signext i32 @sm4ks_i32(i32 signext %a, i32 signext %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sm4ed(i32, i32, i32); - define signext i32 @sm4ed_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64ZKSED-LABEL: sm4ed_i32: ; RV64ZKSED: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic-autoupgrade.ll b/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic-autoupgrade.ll index 8790ec1af24dd..6767bdc9ba6d5 100644 --- a/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic-autoupgrade.ll +++ b/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic-autoupgrade.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zksh -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKSH -declare i64 @llvm.riscv.sm3p0.i64(i64); - define i64 @sm3p0_i64(i64 %a) nounwind { ; RV64ZKSH-LABEL: sm3p0_i64: ; RV64ZKSH: # %bb.0: @@ -13,8 +11,6 @@ define i64 @sm3p0_i64(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sm3p1.i64(i64); - define i64 @sm3p1_i64(i64 %a) nounwind { ; RV64ZKSH-LABEL: sm3p1_i64: ; RV64ZKSH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll index 3436236d46359..8d3e96fa0ee57 100644 --- a/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zksh -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKSH -declare i32 @llvm.riscv.sm3p0(i32); - define signext i32 @sm3p0_i32(i32 signext %a) nounwind { ; RV64ZKSH-LABEL: sm3p0_i32: ; RV64ZKSH: # %bb.0: @@ -13,8 +11,6 @@ define signext i32 @sm3p0_i32(i32 signext %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sm3p1(i32); - define signext i32 @sm3p1_i32(i32 signext %a) nounwind { ; RV64ZKSH-LABEL: sm3p1_i32: ; RV64ZKSH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll b/llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll index bb3e691311cd8..d4ea9e6c3def0 100644 --- a/llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll @@ -523,20 +523,85 @@ define void @test_non_const_splat_i16(ptr %ret_ptr, ptr %a_ptr, i16 %elt) { ret void } -; Intrinsic declarations -declare <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16>, <2 x i16>) -declare <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16>, <2 x i16>) -declare <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16>, <2 x i16>) -declare <2 x i16> @llvm.usub.sat.v2i16(<2 x i16>, <2 x i16>) -declare <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8>, <4 x i8>) -declare <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8>, <4 x i8>) -declare <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8>, <4 x i8>) -declare <4 x i8> @llvm.usub.sat.v4i8(<4 x i8>, <4 x i8>) -declare <2 x i16> @llvm.smin.v2i16(<2 x i16>, <2 x i16>) -declare <2 x i16> @llvm.smax.v2i16(<2 x i16>, <2 x i16>) -declare <2 x i16> @llvm.umin.v2i16(<2 x i16>, <2 x i16>) -declare <2 x i16> @llvm.umax.v2i16(<2 x i16>, <2 x i16>) -declare <4 x i8> @llvm.smin.v4i8(<4 x i8>, <4 x i8>) -declare <4 x i8> @llvm.smax.v4i8(<4 x i8>, <4 x i8>) -declare <4 x i8> @llvm.umin.v4i8(<4 x i8>, <4 x i8>) -declare <4 x i8> @llvm.umax.v4i8(<4 x i8>, <4 x i8>) +define void @test_build_vector_i8(i8 %a, i8 %c, i8 %b, i8 %d, ptr %ret_ptr) { +; CHECK-RV32-LABEL: test_build_vector_i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: ppack.dh a0, a0, a2 +; CHECK-RV32-NEXT: pack a0, a0, a1 +; CHECK-RV32-NEXT: sw a0, 0(a4) +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: test_build_vector_i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: ppack.h a1, a1, a3 +; CHECK-RV64-NEXT: ppack.h a0, a0, a2 +; CHECK-RV64-NEXT: ppack.w a0, a0, a1 +; CHECK-RV64-NEXT: sw a0, 0(a4) +; CHECK-RV64-NEXT: ret + %v0 = insertelement <4 x i8> poison, i8 %a, i32 0 + %v1 = insertelement <4 x i8> %v0, i8 %b, i32 1 + %v2 = insertelement <4 x i8> %v1, i8 %c, i32 2 + %v3 = insertelement <4 x i8> %v2, i8 %d, i32 3 + store <4 x i8> %v3, ptr %ret_ptr + ret void +} + +define void @test_build_vector_i16(ptr %ret_ptr, i16 %a, i16 %b) { +; CHECK-RV32-LABEL: test_build_vector_i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: pack a1, a1, a2 +; CHECK-RV32-NEXT: sw a1, 0(a0) +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: test_build_vector_i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: ppack.w a1, a1, a2 +; CHECK-RV64-NEXT: sw a1, 0(a0) +; CHECK-RV64-NEXT: ret + %v0 = insertelement <2 x i16> poison, i16 %a, i32 0 + %v1 = insertelement <2 x i16> %v0, i16 %b, i32 1 + store <2 x i16> %v1, ptr %ret_ptr + ret void +} + +; Test logical shift left immediate for v2i16 +define void @test_pslli_h(ptr %ret_ptr, ptr %a_ptr) { +; CHECK-LABEL: test_pslli_h: +; CHECK: # %bb.0: +; CHECK-NEXT: lw a1, 0(a1) +; CHECK-NEXT: pslli.h a1, a1, 2 +; CHECK-NEXT: sw a1, 0(a0) +; CHECK-NEXT: ret + %a = load <2 x i16>, ptr %a_ptr + %res = shl <2 x i16> %a, splat(i16 2) + store <2 x i16> %res, ptr %ret_ptr + ret void +} + +; Test logical shift left immediate for v4i8 +define void @test_pslli_b(ptr %ret_ptr, ptr %a_ptr) { +; CHECK-LABEL: test_pslli_b: +; CHECK: # %bb.0: +; CHECK-NEXT: lw a1, 0(a1) +; CHECK-NEXT: pslli.b a1, a1, 2 +; CHECK-NEXT: sw a1, 0(a0) +; CHECK-NEXT: ret + %a = load <4 x i8>, ptr %a_ptr + %res = shl <4 x i8> %a, splat(i8 2) + store <4 x i8> %res, ptr %ret_ptr + ret void +} + +; Test arithmetic saturation shift left immediate for v2i16 +define void @test_psslai_h(ptr %ret_ptr, ptr %a_ptr) { +; CHECK-LABEL: test_psslai_h: +; CHECK: # %bb.0: +; CHECK-NEXT: lw a1, 0(a1) +; CHECK-NEXT: psslai.h a1, a1, 2 +; CHECK-NEXT: sw a1, 0(a0) +; CHECK-NEXT: ret + %a = load <2 x i16>, ptr %a_ptr + %res = call <2 x i16> @llvm.sshl.sat.v2i16(<2 x i16> %a, <2 x i16> splat(i16 2)) + store <2 x i16> %res, ptr %ret_ptr + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll b/llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll index f989b025a12dc..b39b807d43154 100644 --- a/llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll @@ -685,20 +685,111 @@ define void @test_non_const_splat_i32(ptr %ret_ptr, ptr %a_ptr, i32 %elt) { ret void } -; Intrinsic declarations -declare <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i16> @llvm.usub.sat.v4i16(<4 x i16>, <4 x i16>) -declare <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8>, <8 x i8>) -declare <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8>, <8 x i8>) -declare <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8>, <8 x i8>) -declare <8 x i8> @llvm.usub.sat.v8i8(<8 x i8>, <8 x i8>) -declare <4 x i16> @llvm.smin.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i16> @llvm.smax.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i16> @llvm.umin.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i16> @llvm.umax.v4i16(<4 x i16>, <4 x i16>) -declare <8 x i8> @llvm.smin.v8i8(<8 x i8>, <8 x i8>) -declare <8 x i8> @llvm.smax.v8i8(<8 x i8>, <8 x i8>) -declare <8 x i8> @llvm.umin.v8i8(<8 x i8>, <8 x i8>) -declare <8 x i8> @llvm.umax.v8i8(<8 x i8>, <8 x i8>) +define void @test_build_vector_i8(ptr %ret_ptr, i8 %a, i8 %b, i8 %c, i8 %d, i8 %e, i8 %f, i8 %g, i8 %h) { +; CHECK-LABEL: test_build_vector_i8: +; CHECK: # %bb.0: +; CHECK-NEXT: lbu t0, 0(sp) +; CHECK-NEXT: ppack.h a5, a5, a6 +; CHECK-NEXT: ppack.h a3, a3, a4 +; CHECK-NEXT: ppack.h a1, a1, a2 +; CHECK-NEXT: ppack.h a2, a7, t0 +; CHECK-NEXT: ppack.w a2, a5, a2 +; CHECK-NEXT: ppack.w a1, a1, a3 +; CHECK-NEXT: pack a1, a1, a2 +; CHECK-NEXT: sd a1, 0(a0) +; CHECK-NEXT: ret + %v0 = insertelement <8 x i8> poison, i8 %a, i32 0 + %v1 = insertelement <8 x i8> %v0, i8 %b, i32 1 + %v2 = insertelement <8 x i8> %v1, i8 %c, i32 2 + %v3 = insertelement <8 x i8> %v2, i8 %d, i32 3 + %v4 = insertelement <8 x i8> %v3, i8 %e, i32 4 + %v5 = insertelement <8 x i8> %v4, i8 %f, i32 5 + %v6 = insertelement <8 x i8> %v5, i8 %g, i32 6 + %v7 = insertelement <8 x i8> %v6, i8 %h, i32 7 + store <8 x i8> %v7, ptr %ret_ptr + ret void +} + +define void @test_build_vector_i16(ptr %ret_ptr, i16 %a, i16 %b, i16 %c, i16 %d) { +; CHECK-LABEL: test_build_vector_i16: +; CHECK: # %bb.0: +; CHECK-NEXT: ppack.w a3, a3, a4 +; CHECK-NEXT: ppack.w a1, a1, a2 +; CHECK-NEXT: pack a1, a1, a3 +; CHECK-NEXT: sd a1, 0(a0) +; CHECK-NEXT: ret + %v0 = insertelement <4 x i16> poison, i16 %a, i32 0 + %v1 = insertelement <4 x i16> %v0, i16 %b, i32 1 + %v2 = insertelement <4 x i16> %v1, i16 %c, i32 2 + %v3 = insertelement <4 x i16> %v2, i16 %d, i32 3 + store <4 x i16> %v3, ptr %ret_ptr + ret void +} + +define void @test_build_vector_i32(ptr %ret_ptr, i32 %a, i32 %b) { +; CHECK-LABEL: test_build_vector_i32: +; CHECK: # %bb.0: +; CHECK-NEXT: pack a1, a1, a2 +; CHECK-NEXT: sd a1, 0(a0) +; CHECK-NEXT: ret + %v0 = insertelement <2 x i32> poison, i32 %a, i32 0 + %v1 = insertelement <2 x i32> %v0, i32 %b, i32 1 + store <2 x i32> %v1, ptr %ret_ptr + ret void +} + +; Test logical shift left immediate for v4i16 +define void @test_pslli_h(ptr %ret_ptr, ptr %a_ptr) { +; CHECK-LABEL: test_pslli_h: +; CHECK: # %bb.0: +; CHECK-NEXT: ld a1, 0(a1) +; CHECK-NEXT: pslli.h a1, a1, 2 +; CHECK-NEXT: sd a1, 0(a0) +; CHECK-NEXT: ret + %a = load <4 x i16>, ptr %a_ptr + %res = shl <4 x i16> %a, splat(i16 2) + store <4 x i16> %res, ptr %ret_ptr + ret void +} + +; Test logical shift left immediate for v8i8 +define void @test_pslli_b(ptr %ret_ptr, ptr %a_ptr) { +; CHECK-LABEL: test_pslli_b: +; CHECK: # %bb.0: +; CHECK-NEXT: ld a1, 0(a1) +; CHECK-NEXT: pslli.b a1, a1, 2 +; CHECK-NEXT: sd a1, 0(a0) +; CHECK-NEXT: ret + %a = load <8 x i8>, ptr %a_ptr + %res = shl <8 x i8> %a, splat(i8 2) + store <8 x i8> %res, ptr %ret_ptr + ret void +} + +; Test logical shift left immediate for v2i32 +define void @test_pslli_w(ptr %ret_ptr, ptr %a_ptr) { +; CHECK-LABEL: test_pslli_w: +; CHECK: # %bb.0: +; CHECK-NEXT: ld a1, 0(a1) +; CHECK-NEXT: pslli.w a1, a1, 2 +; CHECK-NEXT: sd a1, 0(a0) +; CHECK-NEXT: ret + %a = load <2 x i32>, ptr %a_ptr + %res = shl <2 x i32> %a, splat(i32 2) + store <2 x i32> %res, ptr %ret_ptr + ret void +} + +; Test arithmetic saturation shift left immediate for v2i32 +define void @test_psslai_w(ptr %ret_ptr, ptr %a_ptr) { +; CHECK-LABEL: test_psslai_w: +; CHECK: # %bb.0: +; CHECK-NEXT: ld a1, 0(a1) +; CHECK-NEXT: psslai.w a1, a1, 2 +; CHECK-NEXT: sd a1, 0(a0) +; CHECK-NEXT: ret + %a = load <2 x i32>, ptr %a_ptr + %res = call <2 x i32> @llvm.sshl.sat.v2i32(<2 x i32> %a, <2 x i32> splat(i32 2)) + store <2 x i32> %res, ptr %ret_ptr + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/65704-illegal-instruction.ll b/llvm/test/CodeGen/RISCV/rvv/65704-illegal-instruction.ll index 44fd9046fa0e0..ee1d889c3cd15 100644 --- a/llvm/test/CodeGen/RISCV/rvv/65704-illegal-instruction.ll +++ b/llvm/test/CodeGen/RISCV/rvv/65704-illegal-instruction.ll @@ -2,11 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+m,+zvfh \ ; RUN: < %s | FileCheck %s -declare <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(, i64 immarg) -declare @llvm.vector.insert.nxv8i8.v16i8(, <16 x i8>, i64 immarg) -declare @llvm.riscv.vslideup.nxv8i8.i64(, , i64, i64, i64 immarg) -declare @llvm.vector.insert.nxv2i32.v4i32(, <4 x i32>, i64 immarg) - define void @foo( %0) { ; CHECK-LABEL: foo: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/abd.ll b/llvm/test/CodeGen/RISCV/rvv/abd.ll index be4292c9902eb..949a9a3dfc470 100644 --- a/llvm/test/CodeGen/RISCV/rvv/abd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/abd.ll @@ -328,18 +328,6 @@ define @uabd_non_matching_promotion( %a, %abs } -declare @llvm.abs.nxv16i8(, i1) - -declare @llvm.abs.nxv8i16(, i1) -declare @llvm.abs.nxv16i16(, i1) - -declare @llvm.abs.nxv4i32(, i1) -declare @llvm.abs.nxv8i32(, i1) - -declare @llvm.abs.nxv2i64(, i1) -declare @llvm.abs.nxv4i64(, i1) - -declare @llvm.abs.nxv2i128(, i1) ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; RV32: {{.*}} ; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll index 589b9994651d2..7260cca64a476 100644 --- a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare @llvm.abs.nxv1i16(, i1) - define @vabs_nxv1i16( %v) { ; CHECK-LABEL: vabs_nxv1i16: ; CHECK: # %bb.0: @@ -15,8 +13,6 @@ define @vabs_nxv1i16( %v) { ret %r } -declare @llvm.abs.nxv2i16(, i1) - define @vabs_nxv2i16( %v) { ; CHECK-LABEL: vabs_nxv2i16: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define @vabs_nxv2i16( %v) { ret %r } -declare @llvm.abs.nxv4i16(, i1) - define @vabs_nxv4i16( %v) { ; CHECK-LABEL: vabs_nxv4i16: ; CHECK: # %bb.0: @@ -41,8 +35,6 @@ define @vabs_nxv4i16( %v) { ret %r } -declare @llvm.abs.nxv8i16(, i1) - define @vabs_nxv8i16( %v) { ; CHECK-LABEL: vabs_nxv8i16: ; CHECK: # %bb.0: @@ -54,8 +46,6 @@ define @vabs_nxv8i16( %v) { ret %r } -declare @llvm.abs.nxv16i16(, i1) - define @vabs_nxv16i16( %v) { ; CHECK-LABEL: vabs_nxv16i16: ; CHECK: # %bb.0: @@ -67,8 +57,6 @@ define @vabs_nxv16i16( %v) { ret %r } -declare @llvm.abs.nxv32i16(, i1) - define @vabs_nxv32i16( %v) { ; CHECK-LABEL: vabs_nxv32i16: ; CHECK: # %bb.0: @@ -80,8 +68,6 @@ define @vabs_nxv32i16( %v) { ret %r } -declare @llvm.abs.nxv1i32(, i1) - define @vabs_nxv1i32( %v) { ; CHECK-LABEL: vabs_nxv1i32: ; CHECK: # %bb.0: @@ -93,8 +79,6 @@ define @vabs_nxv1i32( %v) { ret %r } -declare @llvm.abs.nxv2i32(, i1) - define @vabs_nxv2i32( %v) { ; CHECK-LABEL: vabs_nxv2i32: ; CHECK: # %bb.0: @@ -106,8 +90,6 @@ define @vabs_nxv2i32( %v) { ret %r } -declare @llvm.abs.nxv4i32(, i1) - define @vabs_nxv4i32( %v) { ; CHECK-LABEL: vabs_nxv4i32: ; CHECK: # %bb.0: @@ -119,8 +101,6 @@ define @vabs_nxv4i32( %v) { ret %r } -declare @llvm.abs.nxv8i32(, i1) - define @vabs_nxv8i32( %v) { ; CHECK-LABEL: vabs_nxv8i32: ; CHECK: # %bb.0: @@ -132,8 +112,6 @@ define @vabs_nxv8i32( %v) { ret %r } -declare @llvm.abs.nxv16i32(, i1) - define @vabs_nxv16i32( %v) { ; CHECK-LABEL: vabs_nxv16i32: ; CHECK: # %bb.0: @@ -145,8 +123,6 @@ define @vabs_nxv16i32( %v) { ret %r } -declare @llvm.abs.nxv1i64(, i1) - define @vabs_nxv1i64( %v) { ; CHECK-LABEL: vabs_nxv1i64: ; CHECK: # %bb.0: @@ -158,8 +134,6 @@ define @vabs_nxv1i64( %v) { ret %r } -declare @llvm.abs.nxv2i64(, i1) - define @vabs_nxv2i64( %v) { ; CHECK-LABEL: vabs_nxv2i64: ; CHECK: # %bb.0: @@ -171,8 +145,6 @@ define @vabs_nxv2i64( %v) { ret %r } -declare @llvm.abs.nxv4i64(, i1) - define @vabs_nxv4i64( %v) { ; CHECK-LABEL: vabs_nxv4i64: ; CHECK: # %bb.0: @@ -184,8 +156,6 @@ define @vabs_nxv4i64( %v) { ret %r } -declare @llvm.abs.nxv8i64(, i1) - define @vabs_nxv8i64( %v) { ; CHECK-LABEL: vabs_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll index 2bee8de168d7d..5b215c5173211 100644 --- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK -declare @llvm.vp.abs.nxv1i8(, i1 immarg, , i32) - define @vp_abs_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv1i8: ; CHECK: # %bb.0: @@ -28,8 +26,6 @@ define @vp_abs_nxv1i8_unmasked( %va, i32 zero ret %v } -declare @llvm.vp.abs.nxv2i8(, i1 immarg, , i32) - define @vp_abs_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv2i8: ; CHECK: # %bb.0: @@ -52,8 +48,6 @@ define @vp_abs_nxv2i8_unmasked( %va, i32 zero ret %v } -declare @llvm.vp.abs.nxv4i8(, i1 immarg, , i32) - define @vp_abs_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv4i8: ; CHECK: # %bb.0: @@ -76,8 +70,6 @@ define @vp_abs_nxv4i8_unmasked( %va, i32 zero ret %v } -declare @llvm.vp.abs.nxv8i8(, i1 immarg, , i32) - define @vp_abs_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv8i8: ; CHECK: # %bb.0: @@ -100,8 +92,6 @@ define @vp_abs_nxv8i8_unmasked( %va, i32 zero ret %v } -declare @llvm.vp.abs.nxv16i8(, i1 immarg, , i32) - define @vp_abs_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv16i8: ; CHECK: # %bb.0: @@ -124,8 +114,6 @@ define @vp_abs_nxv16i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv32i8(, i1 immarg, , i32) - define @vp_abs_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv32i8: ; CHECK: # %bb.0: @@ -148,8 +136,6 @@ define @vp_abs_nxv32i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv64i8(, i1 immarg, , i32) - define @vp_abs_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv64i8: ; CHECK: # %bb.0: @@ -172,8 +158,6 @@ define @vp_abs_nxv64i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv1i16(, i1 immarg, , i32) - define @vp_abs_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv1i16: ; CHECK: # %bb.0: @@ -196,8 +180,6 @@ define @vp_abs_nxv1i16_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv2i16(, i1 immarg, , i32) - define @vp_abs_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv2i16: ; CHECK: # %bb.0: @@ -220,8 +202,6 @@ define @vp_abs_nxv2i16_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv4i16(, i1 immarg, , i32) - define @vp_abs_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv4i16: ; CHECK: # %bb.0: @@ -244,8 +224,6 @@ define @vp_abs_nxv4i16_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv8i16(, i1 immarg, , i32) - define @vp_abs_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv8i16: ; CHECK: # %bb.0: @@ -268,8 +246,6 @@ define @vp_abs_nxv8i16_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv16i16(, i1 immarg, , i32) - define @vp_abs_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv16i16: ; CHECK: # %bb.0: @@ -292,8 +268,6 @@ define @vp_abs_nxv16i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.abs.nxv32i16(, i1 immarg, , i32) - define @vp_abs_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv32i16: ; CHECK: # %bb.0: @@ -316,8 +290,6 @@ define @vp_abs_nxv32i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.abs.nxv1i32(, i1 immarg, , i32) - define @vp_abs_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv1i32: ; CHECK: # %bb.0: @@ -340,8 +312,6 @@ define @vp_abs_nxv1i32_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv2i32(, i1 immarg, , i32) - define @vp_abs_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv2i32: ; CHECK: # %bb.0: @@ -364,8 +334,6 @@ define @vp_abs_nxv2i32_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv4i32(, i1 immarg, , i32) - define @vp_abs_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv4i32: ; CHECK: # %bb.0: @@ -388,8 +356,6 @@ define @vp_abs_nxv4i32_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv8i32(, i1 immarg, , i32) - define @vp_abs_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv8i32: ; CHECK: # %bb.0: @@ -412,8 +378,6 @@ define @vp_abs_nxv8i32_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv16i32(, i1 immarg, , i32) - define @vp_abs_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv16i32: ; CHECK: # %bb.0: @@ -436,8 +400,6 @@ define @vp_abs_nxv16i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.abs.nxv1i64(, i1 immarg, , i32) - define @vp_abs_nxv1i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv1i64: ; CHECK: # %bb.0: @@ -460,8 +422,6 @@ define @vp_abs_nxv1i64_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv2i64(, i1 immarg, , i32) - define @vp_abs_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv2i64: ; CHECK: # %bb.0: @@ -484,8 +444,6 @@ define @vp_abs_nxv2i64_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv4i64(, i1 immarg, , i32) - define @vp_abs_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv4i64: ; CHECK: # %bb.0: @@ -508,8 +466,6 @@ define @vp_abs_nxv4i64_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv7i64(, i1 immarg, , i32) - define @vp_abs_nxv7i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv7i64: ; CHECK: # %bb.0: @@ -532,8 +488,6 @@ define @vp_abs_nxv7i64_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv8i64(, i1 immarg, , i32) - define @vp_abs_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv8i64: ; CHECK: # %bb.0: @@ -556,8 +510,6 @@ define @vp_abs_nxv8i64_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv16i64(, i1 immarg, , i32) - define @vp_abs_nxv16i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll index 10156f14252a4..4aaaa88db8b6e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll @@ -22,12 +22,6 @@ define @access_fixed_object(ptr %val) { ret %v } -declare @llvm.riscv.vadd.nxv1i64.nxv1i64( - , - , - , - i64); - define @access_fixed_and_vector_objects(ptr %val) { ; RV64IV-LABEL: access_fixed_and_vector_objects: ; RV64IV: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll index 1acc830347de4..a2bf9b2906a87 100644 --- a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll @@ -200,10 +200,3 @@ define <128 x i1> @fv128(ptr %p, i64 %index, i64 %tc) { ret <128 x i1> %mask } - -declare @llvm.get.active.lane.mask.nxv1i1.i64(i64, i64) -declare <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64, i64) -declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64, i64) -declare <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64, i64) -declare <64 x i1> @llvm.get.active.lane.mask.v64i1.i64(i64, i64) -declare <128 x i1> @llvm.get.active.lane.mask.v128i1.i64(i64, i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll index cd896c9fa0f08..9b7d9736d9835 100644 --- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll +++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll @@ -48,8 +48,3 @@ entry: ret %3 } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( - , - , - , - i64, i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/allone-masked-to-unmasked.ll b/llvm/test/CodeGen/RISCV/rvv/allone-masked-to-unmasked.ll index 58cfbc331609f..42048b80f98d9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/allone-masked-to-unmasked.ll +++ b/llvm/test/CodeGen/RISCV/rvv/allone-masked-to-unmasked.ll @@ -2,15 +2,6 @@ ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmset.nxv1i1(iXLen); - -declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - ; Use unmasked instruction because the mask operand is allone mask define @test0( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: test0: diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll index a75c159339bed..8e33d634a61d1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll @@ -33,7 +33,6 @@ define @bitreverse_nxv1i8( %va) { %a = call @llvm.bitreverse.nxv1i8( %va) ret %a } -declare @llvm.bitreverse.nxv1i8() define @bitreverse_nxv2i8( %va) { ; CHECK-LABEL: bitreverse_nxv2i8: @@ -64,7 +63,6 @@ define @bitreverse_nxv2i8( %va) { %a = call @llvm.bitreverse.nxv2i8( %va) ret %a } -declare @llvm.bitreverse.nxv2i8() define @bitreverse_nxv4i8( %va) { ; CHECK-LABEL: bitreverse_nxv4i8: @@ -95,7 +93,6 @@ define @bitreverse_nxv4i8( %va) { %a = call @llvm.bitreverse.nxv4i8( %va) ret %a } -declare @llvm.bitreverse.nxv4i8() define @bitreverse_nxv8i8( %va) { ; CHECK-LABEL: bitreverse_nxv8i8: @@ -126,7 +123,6 @@ define @bitreverse_nxv8i8( %va) { %a = call @llvm.bitreverse.nxv8i8( %va) ret %a } -declare @llvm.bitreverse.nxv8i8() define @bitreverse_nxv16i8( %va) { ; CHECK-LABEL: bitreverse_nxv16i8: @@ -157,7 +153,6 @@ define @bitreverse_nxv16i8( %va) { %a = call @llvm.bitreverse.nxv16i8( %va) ret %a } -declare @llvm.bitreverse.nxv16i8() define @bitreverse_nxv32i8( %va) { ; CHECK-LABEL: bitreverse_nxv32i8: @@ -188,7 +183,6 @@ define @bitreverse_nxv32i8( %va) { %a = call @llvm.bitreverse.nxv32i8( %va) ret %a } -declare @llvm.bitreverse.nxv32i8() define @bitreverse_nxv64i8( %va) { ; CHECK-LABEL: bitreverse_nxv64i8: @@ -219,7 +213,6 @@ define @bitreverse_nxv64i8( %va) { %a = call @llvm.bitreverse.nxv64i8( %va) ret %a } -declare @llvm.bitreverse.nxv64i8() define @bitreverse_nxv1i16( %va) { ; CHECK-LABEL: bitreverse_nxv1i16: @@ -259,7 +252,6 @@ define @bitreverse_nxv1i16( %va) { %a = call @llvm.bitreverse.nxv1i16( %va) ret %a } -declare @llvm.bitreverse.nxv1i16() define @bitreverse_nxv2i16( %va) { ; CHECK-LABEL: bitreverse_nxv2i16: @@ -299,7 +291,6 @@ define @bitreverse_nxv2i16( %va) { %a = call @llvm.bitreverse.nxv2i16( %va) ret %a } -declare @llvm.bitreverse.nxv2i16() define @bitreverse_nxv4i16( %va) { ; CHECK-LABEL: bitreverse_nxv4i16: @@ -339,7 +330,6 @@ define @bitreverse_nxv4i16( %va) { %a = call @llvm.bitreverse.nxv4i16( %va) ret %a } -declare @llvm.bitreverse.nxv4i16() define @bitreverse_nxv8i16( %va) { ; CHECK-LABEL: bitreverse_nxv8i16: @@ -379,7 +369,6 @@ define @bitreverse_nxv8i16( %va) { %a = call @llvm.bitreverse.nxv8i16( %va) ret %a } -declare @llvm.bitreverse.nxv8i16() define @bitreverse_nxv16i16( %va) { ; CHECK-LABEL: bitreverse_nxv16i16: @@ -419,7 +408,6 @@ define @bitreverse_nxv16i16( %va) { %a = call @llvm.bitreverse.nxv16i16( %va) ret %a } -declare @llvm.bitreverse.nxv16i16() define @bitreverse_nxv32i16( %va) { ; CHECK-LABEL: bitreverse_nxv32i16: @@ -459,7 +447,6 @@ define @bitreverse_nxv32i16( %va) { %a = call @llvm.bitreverse.nxv32i16( %va) ret %a } -declare @llvm.bitreverse.nxv32i16() define @bitreverse_nxv1i32( %va) { ; CHECK-LABEL: bitreverse_nxv1i32: @@ -507,7 +494,6 @@ define @bitreverse_nxv1i32( %va) { %a = call @llvm.bitreverse.nxv1i32( %va) ret %a } -declare @llvm.bitreverse.nxv1i32() define @bitreverse_nxv2i32( %va) { ; CHECK-LABEL: bitreverse_nxv2i32: @@ -555,7 +541,6 @@ define @bitreverse_nxv2i32( %va) { %a = call @llvm.bitreverse.nxv2i32( %va) ret %a } -declare @llvm.bitreverse.nxv2i32() define @bitreverse_nxv4i32( %va) { ; CHECK-LABEL: bitreverse_nxv4i32: @@ -603,7 +588,6 @@ define @bitreverse_nxv4i32( %va) { %a = call @llvm.bitreverse.nxv4i32( %va) ret %a } -declare @llvm.bitreverse.nxv4i32() define @bitreverse_nxv8i32( %va) { ; CHECK-LABEL: bitreverse_nxv8i32: @@ -651,7 +635,6 @@ define @bitreverse_nxv8i32( %va) { %a = call @llvm.bitreverse.nxv8i32( %va) ret %a } -declare @llvm.bitreverse.nxv8i32() define @bitreverse_nxv16i32( %va) { ; CHECK-LABEL: bitreverse_nxv16i32: @@ -699,7 +682,6 @@ define @bitreverse_nxv16i32( %va) { %a = call @llvm.bitreverse.nxv16i32( %va) ret %a } -declare @llvm.bitreverse.nxv16i32() define @bitreverse_nxv1i64( %va) { ; RV32-LABEL: bitreverse_nxv1i64: @@ -840,7 +822,6 @@ define @bitreverse_nxv1i64( %va) { %a = call @llvm.bitreverse.nxv1i64( %va) ret %a } -declare @llvm.bitreverse.nxv1i64() define @bitreverse_nxv2i64( %va) { ; RV32-LABEL: bitreverse_nxv2i64: @@ -981,7 +962,6 @@ define @bitreverse_nxv2i64( %va) { %a = call @llvm.bitreverse.nxv2i64( %va) ret %a } -declare @llvm.bitreverse.nxv2i64() define @bitreverse_nxv4i64( %va) { ; RV32-LABEL: bitreverse_nxv4i64: @@ -1122,7 +1102,6 @@ define @bitreverse_nxv4i64( %va) { %a = call @llvm.bitreverse.nxv4i64( %va) ret %a } -declare @llvm.bitreverse.nxv4i64() define @bitreverse_nxv8i64( %va) { ; RV32-LABEL: bitreverse_nxv8i64: @@ -1285,4 +1264,3 @@ define @bitreverse_nxv8i64( %va) { %a = call @llvm.bitreverse.nxv8i64( %va) ret %a } -declare @llvm.bitreverse.nxv8i64() diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll index f704a8ca875ba..09b8fdbf11d26 100644 --- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB -declare @llvm.vp.bitreverse.nxv1i8(, , i32) - define @vp_bitreverse_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv1i8: ; CHECK: # %bb.0: @@ -74,8 +72,6 @@ define @vp_bitreverse_nxv1i8_unmasked( %va, i ret %v } -declare @llvm.vp.bitreverse.nxv2i8(, , i32) - define @vp_bitreverse_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv2i8: ; CHECK: # %bb.0: @@ -140,8 +136,6 @@ define @vp_bitreverse_nxv2i8_unmasked( %va, i ret %v } -declare @llvm.vp.bitreverse.nxv4i8(, , i32) - define @vp_bitreverse_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv4i8: ; CHECK: # %bb.0: @@ -206,8 +200,6 @@ define @vp_bitreverse_nxv4i8_unmasked( %va, i ret %v } -declare @llvm.vp.bitreverse.nxv8i8(, , i32) - define @vp_bitreverse_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv8i8: ; CHECK: # %bb.0: @@ -272,8 +264,6 @@ define @vp_bitreverse_nxv8i8_unmasked( %va, i ret %v } -declare @llvm.vp.bitreverse.nxv16i8(, , i32) - define @vp_bitreverse_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv16i8: ; CHECK: # %bb.0: @@ -338,8 +328,6 @@ define @vp_bitreverse_nxv16i8_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv32i8(, , i32) - define @vp_bitreverse_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv32i8: ; CHECK: # %bb.0: @@ -404,8 +392,6 @@ define @vp_bitreverse_nxv32i8_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv64i8(, , i32) - define @vp_bitreverse_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv64i8: ; CHECK: # %bb.0: @@ -470,8 +456,6 @@ define @vp_bitreverse_nxv64i8_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv1i16(, , i32) - define @vp_bitreverse_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv1i16: ; CHECK: # %bb.0: @@ -550,8 +534,6 @@ define @vp_bitreverse_nxv1i16_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv2i16(, , i32) - define @vp_bitreverse_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv2i16: ; CHECK: # %bb.0: @@ -630,8 +612,6 @@ define @vp_bitreverse_nxv2i16_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv4i16(, , i32) - define @vp_bitreverse_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv4i16: ; CHECK: # %bb.0: @@ -710,8 +690,6 @@ define @vp_bitreverse_nxv4i16_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv8i16(, , i32) - define @vp_bitreverse_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv8i16: ; CHECK: # %bb.0: @@ -790,8 +768,6 @@ define @vp_bitreverse_nxv8i16_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv16i16(, , i32) - define @vp_bitreverse_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv16i16: ; CHECK: # %bb.0: @@ -870,8 +846,6 @@ define @vp_bitreverse_nxv16i16_unmasked( ret %v } -declare @llvm.vp.bitreverse.nxv32i16(, , i32) - define @vp_bitreverse_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv32i16: ; CHECK: # %bb.0: @@ -950,8 +924,6 @@ define @vp_bitreverse_nxv32i16_unmasked( ret %v } -declare @llvm.vp.bitreverse.nxv1i32(, , i32) - define @vp_bitreverse_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv1i32: ; CHECK: # %bb.0: @@ -1046,8 +1018,6 @@ define @vp_bitreverse_nxv1i32_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv2i32(, , i32) - define @vp_bitreverse_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv2i32: ; CHECK: # %bb.0: @@ -1142,8 +1112,6 @@ define @vp_bitreverse_nxv2i32_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv4i32(, , i32) - define @vp_bitreverse_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv4i32: ; CHECK: # %bb.0: @@ -1238,8 +1206,6 @@ define @vp_bitreverse_nxv4i32_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv8i32(, , i32) - define @vp_bitreverse_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv8i32: ; CHECK: # %bb.0: @@ -1334,8 +1300,6 @@ define @vp_bitreverse_nxv8i32_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv16i32(, , i32) - define @vp_bitreverse_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv16i32: ; CHECK: # %bb.0: @@ -1430,8 +1394,6 @@ define @vp_bitreverse_nxv16i32_unmasked( ret %v } -declare @llvm.vp.bitreverse.nxv1i64(, , i32) - define @vp_bitreverse_nxv1i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv1i64: ; RV32: # %bb.0: @@ -1712,8 +1674,6 @@ define @vp_bitreverse_nxv1i64_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv2i64(, , i32) - define @vp_bitreverse_nxv2i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv2i64: ; RV32: # %bb.0: @@ -1994,8 +1954,6 @@ define @vp_bitreverse_nxv2i64_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv4i64(, , i32) - define @vp_bitreverse_nxv4i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv4i64: ; RV32: # %bb.0: @@ -2276,8 +2234,6 @@ define @vp_bitreverse_nxv4i64_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv7i64(, , i32) - define @vp_bitreverse_nxv7i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv7i64: ; RV32: # %bb.0: @@ -2663,8 +2619,6 @@ define @vp_bitreverse_nxv7i64_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv8i64(, , i32) - define @vp_bitreverse_nxv8i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv8i64: ; RV32: # %bb.0: @@ -3051,7 +3005,6 @@ define @vp_bitreverse_nxv8i64_unmasked( %va } ; Test splitting. Use i16 version for easier check. -declare @llvm.vp.bitreverse.nxv64i16(, , i32) define @vp_bitreverse_nxv64i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv64i16: @@ -3224,7 +3177,6 @@ define @vp_bitreverse_nxv64i16_unmasked( } ; Test promotion. -declare @llvm.vp.bitreverse.nxv1i9(, , i32) define @vp_bitreverse_nxv1i9( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv1i9: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll index b8521c37e4906..51a72d3f435b5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll @@ -21,7 +21,6 @@ define @bswap_nxv1i16( %va) { %a = call @llvm.bswap.nxv1i16( %va) ret %a } -declare @llvm.bswap.nxv1i16() define @bswap_nxv2i16( %va) { ; CHECK-LABEL: bswap_nxv2i16: @@ -40,7 +39,6 @@ define @bswap_nxv2i16( %va) { %a = call @llvm.bswap.nxv2i16( %va) ret %a } -declare @llvm.bswap.nxv2i16() define @bswap_nxv4i16( %va) { ; CHECK-LABEL: bswap_nxv4i16: @@ -59,7 +57,6 @@ define @bswap_nxv4i16( %va) { %a = call @llvm.bswap.nxv4i16( %va) ret %a } -declare @llvm.bswap.nxv4i16() define @bswap_nxv8i16( %va) { ; CHECK-LABEL: bswap_nxv8i16: @@ -78,7 +75,6 @@ define @bswap_nxv8i16( %va) { %a = call @llvm.bswap.nxv8i16( %va) ret %a } -declare @llvm.bswap.nxv8i16() define @bswap_nxv16i16( %va) { ; CHECK-LABEL: bswap_nxv16i16: @@ -97,7 +93,6 @@ define @bswap_nxv16i16( %va) { %a = call @llvm.bswap.nxv16i16( %va) ret %a } -declare @llvm.bswap.nxv16i16() define @bswap_nxv32i16( %va) { ; CHECK-LABEL: bswap_nxv32i16: @@ -116,7 +111,6 @@ define @bswap_nxv32i16( %va) { %a = call @llvm.bswap.nxv32i16( %va) ret %a } -declare @llvm.bswap.nxv32i16() define @bswap_nxv1i32( %va) { ; CHECK-LABEL: bswap_nxv1i32: @@ -143,7 +137,6 @@ define @bswap_nxv1i32( %va) { %a = call @llvm.bswap.nxv1i32( %va) ret %a } -declare @llvm.bswap.nxv1i32() define @bswap_nxv2i32( %va) { ; CHECK-LABEL: bswap_nxv2i32: @@ -170,7 +163,6 @@ define @bswap_nxv2i32( %va) { %a = call @llvm.bswap.nxv2i32( %va) ret %a } -declare @llvm.bswap.nxv2i32() define @bswap_nxv4i32( %va) { ; CHECK-LABEL: bswap_nxv4i32: @@ -197,7 +189,6 @@ define @bswap_nxv4i32( %va) { %a = call @llvm.bswap.nxv4i32( %va) ret %a } -declare @llvm.bswap.nxv4i32() define @bswap_nxv8i32( %va) { ; CHECK-LABEL: bswap_nxv8i32: @@ -224,7 +215,6 @@ define @bswap_nxv8i32( %va) { %a = call @llvm.bswap.nxv8i32( %va) ret %a } -declare @llvm.bswap.nxv8i32() define @bswap_nxv16i32( %va) { ; CHECK-LABEL: bswap_nxv16i32: @@ -251,7 +241,6 @@ define @bswap_nxv16i32( %va) { %a = call @llvm.bswap.nxv16i32( %va) ret %a } -declare @llvm.bswap.nxv16i32() define @bswap_nxv1i64( %va) { ; RV32-LABEL: bswap_nxv1i64: @@ -335,7 +324,6 @@ define @bswap_nxv1i64( %va) { %a = call @llvm.bswap.nxv1i64( %va) ret %a } -declare @llvm.bswap.nxv1i64() define @bswap_nxv2i64( %va) { ; RV32-LABEL: bswap_nxv2i64: @@ -419,7 +407,6 @@ define @bswap_nxv2i64( %va) { %a = call @llvm.bswap.nxv2i64( %va) ret %a } -declare @llvm.bswap.nxv2i64() define @bswap_nxv4i64( %va) { ; RV32-LABEL: bswap_nxv4i64: @@ -503,7 +490,6 @@ define @bswap_nxv4i64( %va) { %a = call @llvm.bswap.nxv4i64( %va) ret %a } -declare @llvm.bswap.nxv4i64() define @bswap_nxv8i64( %va) { ; RV32-LABEL: bswap_nxv8i64: @@ -609,4 +595,3 @@ define @bswap_nxv8i64( %va) { %a = call @llvm.bswap.nxv8i64( %va) ret %a } -declare @llvm.bswap.nxv8i64() diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll index 3d31cf80cdd3a..0177b8cfd4393 100644 --- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB -declare @llvm.vp.bswap.nxv1i16(, , i32) - define @vp_bswap_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv1i16: ; CHECK: # %bb.0: @@ -46,8 +44,6 @@ define @vp_bswap_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv2i16(, , i32) - define @vp_bswap_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv2i16: ; CHECK: # %bb.0: @@ -84,8 +80,6 @@ define @vp_bswap_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv4i16(, , i32) - define @vp_bswap_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv4i16: ; CHECK: # %bb.0: @@ -122,8 +116,6 @@ define @vp_bswap_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv8i16(, , i32) - define @vp_bswap_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv8i16: ; CHECK: # %bb.0: @@ -160,8 +152,6 @@ define @vp_bswap_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv16i16(, , i32) - define @vp_bswap_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv16i16: ; CHECK: # %bb.0: @@ -198,8 +188,6 @@ define @vp_bswap_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.bswap.nxv32i16(, , i32) - define @vp_bswap_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv32i16: ; CHECK: # %bb.0: @@ -236,8 +224,6 @@ define @vp_bswap_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.bswap.nxv1i32(, , i32) - define @vp_bswap_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv1i32: ; CHECK: # %bb.0: @@ -290,8 +276,6 @@ define @vp_bswap_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv2i32(, , i32) - define @vp_bswap_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv2i32: ; CHECK: # %bb.0: @@ -344,8 +328,6 @@ define @vp_bswap_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv4i32(, , i32) - define @vp_bswap_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv4i32: ; CHECK: # %bb.0: @@ -398,8 +380,6 @@ define @vp_bswap_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv8i32(, , i32) - define @vp_bswap_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv8i32: ; CHECK: # %bb.0: @@ -452,8 +432,6 @@ define @vp_bswap_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv16i32(, , i32) - define @vp_bswap_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv16i32: ; CHECK: # %bb.0: @@ -506,8 +484,6 @@ define @vp_bswap_nxv16i32_unmasked( %va, ret %v } -declare @llvm.vp.bswap.nxv1i64(, , i32) - define @vp_bswap_nxv1i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_nxv1i64: ; RV32: # %bb.0: @@ -674,8 +650,6 @@ define @vp_bswap_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv2i64(, , i32) - define @vp_bswap_nxv2i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_nxv2i64: ; RV32: # %bb.0: @@ -842,8 +816,6 @@ define @vp_bswap_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv4i64(, , i32) - define @vp_bswap_nxv4i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_nxv4i64: ; RV32: # %bb.0: @@ -1010,8 +982,6 @@ define @vp_bswap_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv7i64(, , i32) - define @vp_bswap_nxv7i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_nxv7i64: ; RV32: # %bb.0: @@ -1282,8 +1252,6 @@ define @vp_bswap_nxv7i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv8i64(, , i32) - define @vp_bswap_nxv8i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_nxv8i64: ; RV32: # %bb.0: @@ -1555,7 +1523,6 @@ define @vp_bswap_nxv8i64_unmasked( %va, i32 } ; Test splitting. Use i16 version for easier check. -declare @llvm.vp.bswap.nxv64i16(, , i32) define @vp_bswap_nxv64i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv64i16: @@ -1656,7 +1623,6 @@ define @vp_bswap_nxv64i16_unmasked( %va, } ; Test promotion. -declare @llvm.vp.bswap.nxv1i48(, , i32) define @vp_bswap_nxv1i48( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_nxv1i48: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll index 6507349f45a2f..6c7709f52e30b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZFH -declare @llvm.vp.ceil.nxv1bf16(, , i32) - define @vp_ceil_vv_nxv1bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -66,8 +64,6 @@ define @vp_ceil_vv_nxv1bf16_unmasked( ret %v } -declare @llvm.vp.ceil.nxv2bf16(, , i32) - define @vp_ceil_vv_nxv2bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -120,8 +116,6 @@ define @vp_ceil_vv_nxv2bf16_unmasked( ret %v } -declare @llvm.vp.ceil.nxv4bf16(, , i32) - define @vp_ceil_vv_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -174,8 +168,6 @@ define @vp_ceil_vv_nxv4bf16_unmasked( ret %v } -declare @llvm.vp.ceil.nxv8bf16(, , i32) - define @vp_ceil_vv_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -228,8 +220,6 @@ define @vp_ceil_vv_nxv8bf16_unmasked( ret %v } -declare @llvm.vp.ceil.nxv16bf16(, , i32) - define @vp_ceil_vv_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -282,8 +272,6 @@ define @vp_ceil_vv_nxv16bf16_unmasked( %v } -declare @llvm.vp.ceil.nxv32bf16(, , i32) - define @vp_ceil_vv_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -402,7 +390,6 @@ define @vp_ceil_vv_nxv32bf16_unmasked( @llvm.vp.ceil.nxv32bf16( %va, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.ceil.nxv1f16(, , i32) define @vp_ceil_vv_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv1f16: @@ -490,8 +477,6 @@ define @vp_ceil_vv_nxv1f16_unmasked( %va, ret %v } -declare @llvm.vp.ceil.nxv2f16(, , i32) - define @vp_ceil_vv_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -578,8 +563,6 @@ define @vp_ceil_vv_nxv2f16_unmasked( %va, ret %v } -declare @llvm.vp.ceil.nxv4f16(, , i32) - define @vp_ceil_vv_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -666,8 +649,6 @@ define @vp_ceil_vv_nxv4f16_unmasked( %va, ret %v } -declare @llvm.vp.ceil.nxv8f16(, , i32) - define @vp_ceil_vv_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -756,8 +737,6 @@ define @vp_ceil_vv_nxv8f16_unmasked( %va, ret %v } -declare @llvm.vp.ceil.nxv16f16(, , i32) - define @vp_ceil_vv_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -846,8 +825,6 @@ define @vp_ceil_vv_nxv16f16_unmasked( % ret %v } -declare @llvm.vp.ceil.nxv32f16(, , i32) - define @vp_ceil_vv_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -1003,8 +980,6 @@ define @vp_ceil_vv_nxv32f16_unmasked( % ret %v } -declare @llvm.vp.ceil.nxv1f32(, , i32) - define @vp_ceil_vv_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv1f32: ; CHECK: # %bb.0: @@ -1045,8 +1020,6 @@ define @vp_ceil_vv_nxv1f32_unmasked( %v ret %v } -declare @llvm.vp.ceil.nxv2f32(, , i32) - define @vp_ceil_vv_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1087,8 +1060,6 @@ define @vp_ceil_vv_nxv2f32_unmasked( %v ret %v } -declare @llvm.vp.ceil.nxv4f32(, , i32) - define @vp_ceil_vv_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1131,8 +1102,6 @@ define @vp_ceil_vv_nxv4f32_unmasked( %v ret %v } -declare @llvm.vp.ceil.nxv8f32(, , i32) - define @vp_ceil_vv_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1175,8 +1144,6 @@ define @vp_ceil_vv_nxv8f32_unmasked( %v ret %v } -declare @llvm.vp.ceil.nxv16f32(, , i32) - define @vp_ceil_vv_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1219,8 +1186,6 @@ define @vp_ceil_vv_nxv16f32_unmasked( ret %v } -declare @llvm.vp.ceil.nxv1f64(, , i32) - define @vp_ceil_vv_nxv1f64( %va, %m, i32 zeroext %evl) { ; RV32ZFH-LABEL: vp_ceil_vv_nxv1f64: ; RV32ZFH: # %bb.0: @@ -1295,8 +1260,6 @@ define @vp_ceil_vv_nxv1f64_unmasked( ret %v } -declare @llvm.vp.ceil.nxv2f64(, , i32) - define @vp_ceil_vv_nxv2f64( %va, %m, i32 zeroext %evl) { ; RV32ZFH-LABEL: vp_ceil_vv_nxv2f64: ; RV32ZFH: # %bb.0: @@ -1375,8 +1338,6 @@ define @vp_ceil_vv_nxv2f64_unmasked( ret %v } -declare @llvm.vp.ceil.nxv4f64(, , i32) - define @vp_ceil_vv_nxv4f64( %va, %m, i32 zeroext %evl) { ; RV32ZFH-LABEL: vp_ceil_vv_nxv4f64: ; RV32ZFH: # %bb.0: @@ -1455,8 +1416,6 @@ define @vp_ceil_vv_nxv4f64_unmasked( ret %v } -declare @llvm.vp.ceil.nxv7f64(, , i32) - define @vp_ceil_vv_nxv7f64( %va, %m, i32 zeroext %evl) { ; RV32ZFH-LABEL: vp_ceil_vv_nxv7f64: ; RV32ZFH: # %bb.0: @@ -1535,8 +1494,6 @@ define @vp_ceil_vv_nxv7f64_unmasked( ret %v } -declare @llvm.vp.ceil.nxv8f64(, , i32) - define @vp_ceil_vv_nxv8f64( %va, %m, i32 zeroext %evl) { ; RV32ZFH-LABEL: vp_ceil_vv_nxv8f64: ; RV32ZFH: # %bb.0: @@ -1616,7 +1573,6 @@ define @vp_ceil_vv_nxv8f64_unmasked( } ; Test splitting. -declare @llvm.vp.ceil.nxv16f64(, , i32) define @vp_ceil_vv_nxv16f64( %va, %m, i32 zeroext %evl) { ; RV32ZFH-LABEL: vp_ceil_vv_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll b/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll index 8f917becafec0..4237a6bcc2ee5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll +++ b/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll @@ -276,10 +276,6 @@ define @vselect_add_const_2_nxv2i64( %a0) { ret %v2 } -declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>) -declare <2 x i64> @llvm.umax.v2i64(<2 x i64>, <2 x i64>) -declare @llvm.umin.nxv2i64(, ) -declare @llvm.umax.nxv2i64(, ) ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; RV32: {{.*}} ; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll b/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll index 496755738e6fa..88411e49ab5ed 100644 --- a/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll @@ -68,8 +68,6 @@ entry: ret void } -declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>) - !0 = !{!1, !1, i64 0} !1 = !{!"int", !2, i64 0} !2 = !{!"omnipotent char", !3, i64 0} diff --git a/llvm/test/CodeGen/RISCV/rvv/commutable.ll b/llvm/test/CodeGen/RISCV/rvv/commutable.ll index 05713bc2bb083..a59fde05866d5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/commutable.ll +++ b/llvm/test/CodeGen/RISCV/rvv/commutable.ll @@ -5,7 +5,6 @@ ; RUN: -verify-machineinstrs | FileCheck %s ; vadd.vv -declare @llvm.riscv.vadd.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vadd_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vadd_vv: ; CHECK: # %bb.0: # %entry @@ -21,7 +20,6 @@ entry: ret %ret } -declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vadd_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vadd_vv_masked: ; CHECK: # %bb.0: @@ -37,7 +35,6 @@ define @commutable_vadd_vv_masked( %0, @llvm.riscv.vand.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vand_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vand_vv: ; CHECK: # %bb.0: # %entry @@ -53,7 +50,6 @@ entry: ret %ret } -declare @llvm.riscv.vand.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vand_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vand_vv_masked: ; CHECK: # %bb.0: @@ -69,7 +65,6 @@ define @commutable_vand_vv_masked( %0, @llvm.riscv.vor.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vor_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vor_vv: ; CHECK: # %bb.0: # %entry @@ -85,7 +80,6 @@ entry: ret %ret } -declare @llvm.riscv.vor.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vor_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vor_vv_masked: ; CHECK: # %bb.0: @@ -101,7 +95,6 @@ define @commutable_vor_vv_masked( %0, @llvm.riscv.vxor.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vxor_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vxor_vv: ; CHECK: # %bb.0: # %entry @@ -117,7 +110,6 @@ entry: ret %ret } -declare @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vxor_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vxor_vv_masked: ; CHECK: # %bb.0: @@ -133,7 +125,6 @@ define @commutable_vxor_vv_masked( %0, @llvm.riscv.vmseq.nxv1i64(, , iXLen); define @commutable_vmseq_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vmseq_vv: ; CHECK: # %bb.0: # %entry @@ -149,7 +140,6 @@ entry: ret %ret } -declare @llvm.riscv.vmseq.mask.nxv1i64(, , , , iXLen); define @commutable_vmseq_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vmseq_vv_masked: ; CHECK: # %bb.0: @@ -165,7 +155,6 @@ define @commutable_vmseq_vv_masked( %0, @llvm.riscv.vmsne.nxv1i64(, , iXLen); define @commutable_vmsne_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vmsne_vv: ; CHECK: # %bb.0: # %entry @@ -181,7 +170,6 @@ entry: ret %ret } -declare @llvm.riscv.vmsne.mask.nxv1i64(, , , , iXLen); define @commutable_vmsne_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vmsne_vv_masked: ; CHECK: # %bb.0: @@ -197,7 +185,6 @@ define @commutable_vmsne_vv_masked( %0, @llvm.riscv.vmin.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vmin_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vmin_vv: ; CHECK: # %bb.0: # %entry @@ -213,7 +200,6 @@ entry: ret %ret } -declare @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vmin_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vmin_vv_masked: ; CHECK: # %bb.0: @@ -229,7 +215,6 @@ define @commutable_vmin_vv_masked( %0, @llvm.riscv.vminu.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vminu_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vminu_vv: ; CHECK: # %bb.0: # %entry @@ -245,7 +230,6 @@ entry: ret %ret } -declare @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vminu_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vminu_vv_masked: ; CHECK: # %bb.0: @@ -261,7 +245,6 @@ define @commutable_vminu_vv_masked( %0, @llvm.riscv.vmax.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vmax_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vmax_vv: ; CHECK: # %bb.0: # %entry @@ -277,7 +260,6 @@ entry: ret %ret } -declare @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vmax_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vmax_vv_masked: ; CHECK: # %bb.0: @@ -293,7 +275,6 @@ define @commutable_vmax_vv_masked( %0, @llvm.riscv.vmaxu.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vmaxu_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vmaxu_vv: ; CHECK: # %bb.0: # %entry @@ -309,7 +290,6 @@ entry: ret %ret } -declare @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vmaxu_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vmaxu_vv_masked: ; CHECK: # %bb.0: @@ -325,7 +305,6 @@ define @commutable_vmaxu_vv_masked( %0, @llvm.riscv.vmul.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vmul_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vmul_vv: ; CHECK: # %bb.0: # %entry @@ -341,7 +320,6 @@ entry: ret %ret } -declare @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vmul_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vmul_vv_masked: ; CHECK: # %bb.0: @@ -357,7 +335,6 @@ define @commutable_vmul_vv_masked( %0, @llvm.riscv.vmulh.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vmulh_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vmulh_vv: ; CHECK: # %bb.0: # %entry @@ -373,7 +350,6 @@ entry: ret %ret } -declare @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vmulh_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vmulh_vv_masked: ; CHECK: # %bb.0: @@ -389,7 +365,6 @@ define @commutable_vmulh_vv_masked( %0, @llvm.riscv.vmulhu.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vmulhu_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vmulhu_vv: ; CHECK: # %bb.0: # %entry @@ -405,7 +380,6 @@ entry: ret %ret } -declare @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vmulhu_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vmulhu_vv_masked: ; CHECK: # %bb.0: @@ -421,7 +395,6 @@ define @commutable_vmulhu_vv_masked( %0, @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(, , , iXLen); define @commutable_vwadd_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vwadd_vv: ; CHECK: # %bb.0: # %entry @@ -437,7 +410,6 @@ entry: ret %ret } -declare @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(, , , , iXLen, iXLen); define @commutable_vwadd_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vwadd_vv_masked: ; CHECK: # %bb.0: @@ -453,7 +425,6 @@ define @commutable_vwadd_vv_masked( %0, @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(, , , iXLen); define @commutable_vwaddu_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vwaddu_vv: ; CHECK: # %bb.0: # %entry @@ -469,7 +440,6 @@ entry: ret %ret } -declare @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(, , , , iXLen, iXLen); define @commutable_vwaddu_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vwaddu_vv_masked: ; CHECK: # %bb.0: @@ -485,7 +455,6 @@ define @commutable_vwaddu_vv_masked( %0, @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(, , , iXLen); define @commutable_vwmul_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vwmul_vv: ; CHECK: # %bb.0: # %entry @@ -501,7 +470,6 @@ entry: ret %ret } -declare @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(, , , , iXLen, iXLen); define @commutable_vwmul_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vwmul_vv_masked: ; CHECK: # %bb.0: @@ -517,7 +485,6 @@ define @commutable_vwmul_vv_masked( %0, @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(, , , iXLen); define @commutable_vwmulu_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vwmulu_vv: ; CHECK: # %bb.0: # %entry @@ -533,7 +500,6 @@ entry: ret %ret } -declare @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(, , , , iXLen, iXLen); define @commutable_vwmulu_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vwmulu_vv_masked: ; CHECK: # %bb.0: @@ -549,7 +515,6 @@ define @commutable_vwmulu_vv_masked( %0, @llvm.riscv.vwmacc.nxv1i64.nxv1i32(, , , iXLen, iXLen); define @commutable_vwmacc_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vwmacc_vv: ; CHECK: # %bb.0: # %entry @@ -565,7 +530,6 @@ entry: ret %ret } -declare @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(, , , , iXLen, iXLen); define @commutable_vwmacc_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vwmacc_vv_masked: ; CHECK: # %bb.0: @@ -582,7 +546,6 @@ define @commutable_vwmacc_vv_masked( %0, @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(, , , iXLen, iXLen); define @commutable_vwmaccu_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vwmaccu_vv: ; CHECK: # %bb.0: # %entry @@ -598,7 +561,6 @@ entry: ret %ret } -declare @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(, , , , iXLen, iXLen); define @commutable_vwmaccu_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vwmaccu_vv_masked: ; CHECK: # %bb.0: @@ -615,7 +577,6 @@ define @commutable_vwmaccu_vv_masked( %0, < } ; vadc.vvm -declare @llvm.riscv.vadc.nxv1i64.nxv1i64(, , , , iXLen); define @commutable_vadc_vv( %0, %1, %mask, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vadc_vv: ; CHECK: # %bb.0: # %entry @@ -632,7 +593,6 @@ entry: } ; vsadd.vv -declare @llvm.riscv.vsadd.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vsadd_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vsadd_vv: ; CHECK: # %bb.0: # %entry @@ -648,7 +608,6 @@ entry: ret %ret } -declare @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vsadd_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vsadd_vv_masked: ; CHECK: # %bb.0: @@ -664,7 +623,6 @@ define @commutable_vsadd_vv_masked( %0, @llvm.riscv.vsaddu.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vsaddu_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vsaddu_vv: ; CHECK: # %bb.0: # %entry @@ -680,7 +638,6 @@ entry: ret %ret } -declare @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vsaddu_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vsaddu_vv_masked: ; CHECK: # %bb.0: @@ -696,7 +653,6 @@ define @commutable_vsaddu_vv_masked( %0, @llvm.riscv.vaadd.nxv1i64.nxv1i64(, , , iXLen, iXLen); define @commutable_vaadd_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vaadd_vv: ; CHECK: # %bb.0: # %entry @@ -713,7 +669,6 @@ entry: ret %ret } -declare @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen, iXLen); define @commutable_vaadd_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vaadd_vv_masked: ; CHECK: # %bb.0: @@ -730,7 +685,6 @@ define @commutable_vaadd_vv_masked( %0, @llvm.riscv.vaaddu.nxv1i64.nxv1i64(, , , iXLen, iXLen); define @commutable_vaaddu_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vaaddu_vv: ; CHECK: # %bb.0: # %entry @@ -747,7 +701,6 @@ entry: ret %ret } -declare @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen, iXLen); define @commutable_vaaddu_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vaaddu_vv_masked: ; CHECK: # %bb.0: @@ -764,7 +717,6 @@ define @commutable_vaaddu_vv_masked( %0, @llvm.riscv.vsmul.nxv1i64.nxv1i64(, , , iXLen, iXLen); define @commutable_vsmul_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vsmul_vv: ; CHECK: # %bb.0: # %entry @@ -781,7 +733,6 @@ entry: ret %ret } -declare @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen, iXLen); define @commutable_vsmul_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vsmul_vv_masked: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll index 69822e9d9d2e3..4b6a115ade642 100644 --- a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll @@ -836,36 +836,3 @@ entry: ret void } -declare void @llvm.masked.compressstore.v1i8(<1 x i8>, ptr, <1 x i1>) -declare void @llvm.masked.compressstore.v2i8(<2 x i8>, ptr, <2 x i1>) -declare void @llvm.masked.compressstore.v4i8(<4 x i8>, ptr, <4 x i1>) -declare void @llvm.masked.compressstore.v8i8(<8 x i8>, ptr, <8 x i1>) -declare void @llvm.masked.compressstore.v16i8(<16 x i8>, ptr, <16 x i1>) -declare void @llvm.masked.compressstore.v32i8(<32 x i8>, ptr, <32 x i1>) -declare void @llvm.masked.compressstore.v64i8(<64 x i8>, ptr, <64 x i1>) -declare void @llvm.masked.compressstore.v128i8(<128 x i8>, ptr, <128 x i1>) -declare void @llvm.masked.compressstore.v256i8(<256 x i8>, ptr, <256 x i1>) - -declare void @llvm.masked.compressstore.v1i16(<1 x i16>, ptr, <1 x i1>) -declare void @llvm.masked.compressstore.v2i16(<2 x i16>, ptr, <2 x i1>) -declare void @llvm.masked.compressstore.v4i16(<4 x i16>, ptr, <4 x i1>) -declare void @llvm.masked.compressstore.v8i16(<8 x i16>, ptr, <8 x i1>) -declare void @llvm.masked.compressstore.v16i16(<16 x i16>, ptr, <16 x i1>) -declare void @llvm.masked.compressstore.v32i16(<32 x i16>, ptr, <32 x i1>) -declare void @llvm.masked.compressstore.v64i16(<64 x i16>, ptr, <64 x i1>) -declare void @llvm.masked.compressstore.v128i16(<128 x i16>, ptr, <128 x i1>) - -declare void @llvm.masked.compressstore.v1i32(<1 x i32>, ptr, <1 x i1>) -declare void @llvm.masked.compressstore.v2i32(<2 x i32>, ptr, <2 x i1>) -declare void @llvm.masked.compressstore.v4i32(<4 x i32>, ptr, <4 x i1>) -declare void @llvm.masked.compressstore.v8i32(<8 x i32>, ptr, <8 x i1>) -declare void @llvm.masked.compressstore.v16i32(<16 x i32>, ptr, <16 x i1>) -declare void @llvm.masked.compressstore.v32i32(<32 x i32>, ptr, <32 x i1>) -declare void @llvm.masked.compressstore.v64i32(<64 x i32>, ptr, <64 x i1>) - -declare void @llvm.masked.compressstore.v1i64(<1 x i64>, ptr, <1 x i1>) -declare void @llvm.masked.compressstore.v2i64(<2 x i64>, ptr, <2 x i1>) -declare void @llvm.masked.compressstore.v4i64(<4 x i64>, ptr, <4 x i1>) -declare void @llvm.masked.compressstore.v8i64(<8 x i64>, ptr, <8 x i1>) -declare void @llvm.masked.compressstore.v16i64(<16 x i64>, ptr, <16 x i1>) -declare void @llvm.masked.compressstore.v32i64(<32 x i64>, ptr, <32 x i1>) diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll index 593f8e2612fec..3248f3f34eedf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll @@ -77,4 +77,3 @@ entry: ret void } -declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>) diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll index 319d82f724ca7..1f45e45f23164 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll @@ -69,7 +69,6 @@ define @ctlz_nxv1i8( %va) { %a = call @llvm.ctlz.nxv1i8( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv1i8(, i1) define @ctlz_nxv2i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv2i8: @@ -132,7 +131,6 @@ define @ctlz_nxv2i8( %va) { %a = call @llvm.ctlz.nxv2i8( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv2i8(, i1) define @ctlz_nxv4i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv4i8: @@ -195,7 +193,6 @@ define @ctlz_nxv4i8( %va) { %a = call @llvm.ctlz.nxv4i8( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv4i8(, i1) define @ctlz_nxv8i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv8i8: @@ -258,7 +255,6 @@ define @ctlz_nxv8i8( %va) { %a = call @llvm.ctlz.nxv8i8( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv8i8(, i1) define @ctlz_nxv16i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv16i8: @@ -321,7 +317,6 @@ define @ctlz_nxv16i8( %va) { %a = call @llvm.ctlz.nxv16i8( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv16i8(, i1) define @ctlz_nxv32i8( %va) { ; CHECK-LABEL: ctlz_nxv32i8: @@ -356,7 +351,6 @@ define @ctlz_nxv32i8( %va) { %a = call @llvm.ctlz.nxv32i8( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv32i8(, i1) define @ctlz_nxv64i8( %va) { ; CHECK-LABEL: ctlz_nxv64i8: @@ -391,7 +385,6 @@ define @ctlz_nxv64i8( %va) { %a = call @llvm.ctlz.nxv64i8( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv64i8(, i1) define @ctlz_nxv1i16( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv1i16: @@ -457,7 +450,6 @@ define @ctlz_nxv1i16( %va) { %a = call @llvm.ctlz.nxv1i16( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv1i16(, i1) define @ctlz_nxv2i16( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv2i16: @@ -523,7 +515,6 @@ define @ctlz_nxv2i16( %va) { %a = call @llvm.ctlz.nxv2i16( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv2i16(, i1) define @ctlz_nxv4i16( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv4i16: @@ -589,7 +580,6 @@ define @ctlz_nxv4i16( %va) { %a = call @llvm.ctlz.nxv4i16( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv4i16(, i1) define @ctlz_nxv8i16( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv8i16: @@ -655,7 +645,6 @@ define @ctlz_nxv8i16( %va) { %a = call @llvm.ctlz.nxv8i16( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv8i16(, i1) define @ctlz_nxv16i16( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv16i16: @@ -721,7 +710,6 @@ define @ctlz_nxv16i16( %va) { %a = call @llvm.ctlz.nxv16i16( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv16i16(, i1) define @ctlz_nxv32i16( %va) { ; CHECK-LABEL: ctlz_nxv32i16: @@ -765,7 +753,6 @@ define @ctlz_nxv32i16( %va) { %a = call @llvm.ctlz.nxv32i16( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv32i16(, i1) define @ctlz_nxv1i32( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv1i32: @@ -837,7 +824,6 @@ define @ctlz_nxv1i32( %va) { %a = call @llvm.ctlz.nxv1i32( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv1i32(, i1) define @ctlz_nxv2i32( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv2i32: @@ -909,7 +895,6 @@ define @ctlz_nxv2i32( %va) { %a = call @llvm.ctlz.nxv2i32( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv2i32(, i1) define @ctlz_nxv4i32( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv4i32: @@ -981,7 +966,6 @@ define @ctlz_nxv4i32( %va) { %a = call @llvm.ctlz.nxv4i32( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv4i32(, i1) define @ctlz_nxv8i32( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv8i32: @@ -1053,7 +1037,6 @@ define @ctlz_nxv8i32( %va) { %a = call @llvm.ctlz.nxv8i32( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv8i32(, i1) define @ctlz_nxv16i32( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv16i32: @@ -1126,7 +1109,6 @@ define @ctlz_nxv16i32( %va) { %a = call @llvm.ctlz.nxv16i32( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv16i32(, i1) define @ctlz_nxv1i64( %va) { ; RV32I-LABEL: ctlz_nxv1i64: @@ -1266,7 +1248,6 @@ define @ctlz_nxv1i64( %va) { %a = call @llvm.ctlz.nxv1i64( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv1i64(, i1) define @ctlz_nxv2i64( %va) { ; RV32I-LABEL: ctlz_nxv2i64: @@ -1406,7 +1387,6 @@ define @ctlz_nxv2i64( %va) { %a = call @llvm.ctlz.nxv2i64( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv2i64(, i1) define @ctlz_nxv4i64( %va) { ; RV32I-LABEL: ctlz_nxv4i64: @@ -1546,7 +1526,6 @@ define @ctlz_nxv4i64( %va) { %a = call @llvm.ctlz.nxv4i64( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv4i64(, i1) define @ctlz_nxv8i64( %va) { ; RV32I-LABEL: ctlz_nxv8i64: @@ -1686,7 +1665,6 @@ define @ctlz_nxv8i64( %va) { %a = call @llvm.ctlz.nxv8i64( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv8i64(, i1) define @ctlz_zero_undef_nxv1i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_zero_undef_nxv1i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll index 570ff34b0f23a..20f397b694180 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB -declare @llvm.vp.ctlz.nxv1i8(, i1 immarg, , i32) - define @vp_ctlz_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv1i8: ; CHECK: # %bb.0: @@ -61,8 +59,6 @@ define @vp_ctlz_nxv1i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.ctlz.nxv2i8(, i1 immarg, , i32) - define @vp_ctlz_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv2i8: ; CHECK: # %bb.0: @@ -114,8 +110,6 @@ define @vp_ctlz_nxv2i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.ctlz.nxv4i8(, i1 immarg, , i32) - define @vp_ctlz_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv4i8: ; CHECK: # %bb.0: @@ -167,8 +161,6 @@ define @vp_ctlz_nxv4i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.ctlz.nxv8i8(, i1 immarg, , i32) - define @vp_ctlz_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv8i8: ; CHECK: # %bb.0: @@ -220,8 +212,6 @@ define @vp_ctlz_nxv8i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.ctlz.nxv16i8(, i1 immarg, , i32) - define @vp_ctlz_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv16i8: ; CHECK: # %bb.0: @@ -273,8 +263,6 @@ define @vp_ctlz_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv32i8(, i1 immarg, , i32) - define @vp_ctlz_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv32i8: ; CHECK: # %bb.0: @@ -343,8 +331,6 @@ define @vp_ctlz_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv64i8(, i1 immarg, , i32) - define @vp_ctlz_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv64i8: ; CHECK: # %bb.0: @@ -413,8 +399,6 @@ define @vp_ctlz_nxv64i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv1i16(, i1 immarg, , i32) - define @vp_ctlz_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv1i16: ; CHECK: # %bb.0: @@ -460,8 +444,6 @@ define @vp_ctlz_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv2i16(, i1 immarg, , i32) - define @vp_ctlz_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv2i16: ; CHECK: # %bb.0: @@ -507,8 +489,6 @@ define @vp_ctlz_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv4i16(, i1 immarg, , i32) - define @vp_ctlz_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv4i16: ; CHECK: # %bb.0: @@ -554,8 +534,6 @@ define @vp_ctlz_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv8i16(, i1 immarg, , i32) - define @vp_ctlz_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv8i16: ; CHECK: # %bb.0: @@ -601,8 +579,6 @@ define @vp_ctlz_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv16i16(, i1 immarg, , i32) - define @vp_ctlz_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv16i16: ; CHECK: # %bb.0: @@ -648,8 +624,6 @@ define @vp_ctlz_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.ctlz.nxv32i16(, i1 immarg, , i32) - define @vp_ctlz_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv32i16: ; CHECK: # %bb.0: @@ -736,8 +710,6 @@ define @vp_ctlz_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.ctlz.nxv1i32(, i1 immarg, , i32) - define @vp_ctlz_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv1i32: ; CHECK: # %bb.0: @@ -785,8 +757,6 @@ define @vp_ctlz_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv2i32(, i1 immarg, , i32) - define @vp_ctlz_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv2i32: ; CHECK: # %bb.0: @@ -834,8 +804,6 @@ define @vp_ctlz_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv4i32(, i1 immarg, , i32) - define @vp_ctlz_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv4i32: ; CHECK: # %bb.0: @@ -883,8 +851,6 @@ define @vp_ctlz_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv8i32(, i1 immarg, , i32) - define @vp_ctlz_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv8i32: ; CHECK: # %bb.0: @@ -932,8 +898,6 @@ define @vp_ctlz_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv16i32(, i1 immarg, , i32) - define @vp_ctlz_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv16i32: ; CHECK: # %bb.0: @@ -980,8 +944,6 @@ define @vp_ctlz_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.ctlz.nxv1i64(, i1 immarg, , i32) - define @vp_ctlz_nxv1i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv1i64: ; CHECK: # %bb.0: @@ -1030,8 +992,6 @@ define @vp_ctlz_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv2i64(, i1 immarg, , i32) - define @vp_ctlz_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv2i64: ; CHECK: # %bb.0: @@ -1080,8 +1040,6 @@ define @vp_ctlz_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv4i64(, i1 immarg, , i32) - define @vp_ctlz_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv4i64: ; CHECK: # %bb.0: @@ -1130,8 +1088,6 @@ define @vp_ctlz_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv7i64(, i1 immarg, , i32) - define @vp_ctlz_nxv7i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv7i64: ; CHECK: # %bb.0: @@ -1180,8 +1136,6 @@ define @vp_ctlz_nxv7i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv8i64(, i1 immarg, , i32) - define @vp_ctlz_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv8i64: ; CHECK: # %bb.0: @@ -1230,8 +1184,6 @@ define @vp_ctlz_nxv8i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv16i64(, i1 immarg, , i32) - define @vp_ctlz_nxv16i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv16i64: ; CHECK: # %bb.0: @@ -1391,7 +1343,6 @@ define @vp_ctlz_zero_undef_nxv1i8_unmasked( % ret %v } - define @vp_ctlz_zero_undef_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv2i8: ; CHECK: # %bb.0: @@ -1439,7 +1390,6 @@ define @vp_ctlz_zero_undef_nxv2i8_unmasked( % ret %v } - define @vp_ctlz_zero_undef_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv4i8: ; CHECK: # %bb.0: @@ -1487,7 +1437,6 @@ define @vp_ctlz_zero_undef_nxv4i8_unmasked( % ret %v } - define @vp_ctlz_zero_undef_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv8i8: ; CHECK: # %bb.0: @@ -1535,7 +1484,6 @@ define @vp_ctlz_zero_undef_nxv8i8_unmasked( % ret %v } - define @vp_ctlz_zero_undef_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv16i8: ; CHECK: # %bb.0: @@ -1583,7 +1531,6 @@ define @vp_ctlz_zero_undef_nxv16i8_unmasked( %v } - define @vp_ctlz_zero_undef_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv32i8: ; CHECK: # %bb.0: @@ -1652,7 +1599,6 @@ define @vp_ctlz_zero_undef_nxv32i8_unmasked( %v } - define @vp_ctlz_zero_undef_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv64i8: ; CHECK: # %bb.0: @@ -1721,7 +1667,6 @@ define @vp_ctlz_zero_undef_nxv64i8_unmasked( %v } - define @vp_ctlz_zero_undef_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv1i16: ; CHECK: # %bb.0: @@ -1763,7 +1708,6 @@ define @vp_ctlz_zero_undef_nxv1i16_unmasked( %v } - define @vp_ctlz_zero_undef_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv2i16: ; CHECK: # %bb.0: @@ -1805,7 +1749,6 @@ define @vp_ctlz_zero_undef_nxv2i16_unmasked( %v } - define @vp_ctlz_zero_undef_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv4i16: ; CHECK: # %bb.0: @@ -1847,7 +1790,6 @@ define @vp_ctlz_zero_undef_nxv4i16_unmasked( %v } - define @vp_ctlz_zero_undef_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv8i16: ; CHECK: # %bb.0: @@ -1889,7 +1831,6 @@ define @vp_ctlz_zero_undef_nxv8i16_unmasked( %v } - define @vp_ctlz_zero_undef_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv16i16: ; CHECK: # %bb.0: @@ -1931,7 +1872,6 @@ define @vp_ctlz_zero_undef_nxv16i16_unmasked( %v } - define @vp_ctlz_zero_undef_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv32i16: ; CHECK: # %bb.0: @@ -2018,7 +1958,6 @@ define @vp_ctlz_zero_undef_nxv32i16_unmasked( %v } - define @vp_ctlz_zero_undef_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv1i32: ; CHECK: # %bb.0: @@ -2062,7 +2001,6 @@ define @vp_ctlz_zero_undef_nxv1i32_unmasked( %v } - define @vp_ctlz_zero_undef_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv2i32: ; CHECK: # %bb.0: @@ -2106,7 +2044,6 @@ define @vp_ctlz_zero_undef_nxv2i32_unmasked( %v } - define @vp_ctlz_zero_undef_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv4i32: ; CHECK: # %bb.0: @@ -2150,7 +2087,6 @@ define @vp_ctlz_zero_undef_nxv4i32_unmasked( %v } - define @vp_ctlz_zero_undef_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv8i32: ; CHECK: # %bb.0: @@ -2194,7 +2130,6 @@ define @vp_ctlz_zero_undef_nxv8i32_unmasked( %v } - define @vp_ctlz_zero_undef_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv16i32: ; CHECK: # %bb.0: @@ -2237,7 +2172,6 @@ define @vp_ctlz_zero_undef_nxv16i32_unmasked( %v } - define @vp_ctlz_zero_undef_nxv1i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv1i64: ; CHECK: # %bb.0: @@ -2282,7 +2216,6 @@ define @vp_ctlz_zero_undef_nxv1i64_unmasked( %v } - define @vp_ctlz_zero_undef_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv2i64: ; CHECK: # %bb.0: @@ -2327,7 +2260,6 @@ define @vp_ctlz_zero_undef_nxv2i64_unmasked( %v } - define @vp_ctlz_zero_undef_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv4i64: ; CHECK: # %bb.0: @@ -2372,7 +2304,6 @@ define @vp_ctlz_zero_undef_nxv4i64_unmasked( %v } - define @vp_ctlz_zero_undef_nxv7i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv7i64: ; CHECK: # %bb.0: @@ -2417,7 +2348,6 @@ define @vp_ctlz_zero_undef_nxv7i64_unmasked( %v } - define @vp_ctlz_zero_undef_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv8i64: ; CHECK: # %bb.0: @@ -2569,7 +2499,6 @@ define @vp_ctlz_zero_undef_nxv16i64_unmasked( @llvm.vp.ctlz.nxv1i9(, i1 immarg, , i32) define @vp_ctlz_nxv1i9( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv1i9: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll index 1018130b472d1..d137ad54193a5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll @@ -30,7 +30,6 @@ define @ctpop_nxv1i8( %va) { %a = call @llvm.ctpop.nxv1i8( %va) ret %a } -declare @llvm.ctpop.nxv1i8() define @ctpop_nxv2i8( %va) { ; CHECK-LABEL: ctpop_nxv2i8: @@ -58,7 +57,6 @@ define @ctpop_nxv2i8( %va) { %a = call @llvm.ctpop.nxv2i8( %va) ret %a } -declare @llvm.ctpop.nxv2i8() define @ctpop_nxv4i8( %va) { ; CHECK-LABEL: ctpop_nxv4i8: @@ -86,7 +84,6 @@ define @ctpop_nxv4i8( %va) { %a = call @llvm.ctpop.nxv4i8( %va) ret %a } -declare @llvm.ctpop.nxv4i8() define @ctpop_nxv8i8( %va) { ; CHECK-LABEL: ctpop_nxv8i8: @@ -114,7 +111,6 @@ define @ctpop_nxv8i8( %va) { %a = call @llvm.ctpop.nxv8i8( %va) ret %a } -declare @llvm.ctpop.nxv8i8() define @ctpop_nxv16i8( %va) { ; CHECK-LABEL: ctpop_nxv16i8: @@ -142,7 +138,6 @@ define @ctpop_nxv16i8( %va) { %a = call @llvm.ctpop.nxv16i8( %va) ret %a } -declare @llvm.ctpop.nxv16i8() define @ctpop_nxv32i8( %va) { ; CHECK-LABEL: ctpop_nxv32i8: @@ -170,7 +165,6 @@ define @ctpop_nxv32i8( %va) { %a = call @llvm.ctpop.nxv32i8( %va) ret %a } -declare @llvm.ctpop.nxv32i8() define @ctpop_nxv64i8( %va) { ; CHECK-LABEL: ctpop_nxv64i8: @@ -198,7 +192,6 @@ define @ctpop_nxv64i8( %va) { %a = call @llvm.ctpop.nxv64i8( %va) ret %a } -declare @llvm.ctpop.nxv64i8() define @ctpop_nxv1i16( %va) { ; CHECK-LABEL: ctpop_nxv1i16: @@ -233,7 +226,6 @@ define @ctpop_nxv1i16( %va) { %a = call @llvm.ctpop.nxv1i16( %va) ret %a } -declare @llvm.ctpop.nxv1i16() define @ctpop_nxv2i16( %va) { ; CHECK-LABEL: ctpop_nxv2i16: @@ -268,7 +260,6 @@ define @ctpop_nxv2i16( %va) { %a = call @llvm.ctpop.nxv2i16( %va) ret %a } -declare @llvm.ctpop.nxv2i16() define @ctpop_nxv4i16( %va) { ; CHECK-LABEL: ctpop_nxv4i16: @@ -303,7 +294,6 @@ define @ctpop_nxv4i16( %va) { %a = call @llvm.ctpop.nxv4i16( %va) ret %a } -declare @llvm.ctpop.nxv4i16() define @ctpop_nxv8i16( %va) { ; CHECK-LABEL: ctpop_nxv8i16: @@ -338,7 +328,6 @@ define @ctpop_nxv8i16( %va) { %a = call @llvm.ctpop.nxv8i16( %va) ret %a } -declare @llvm.ctpop.nxv8i16() define @ctpop_nxv16i16( %va) { ; CHECK-LABEL: ctpop_nxv16i16: @@ -373,7 +362,6 @@ define @ctpop_nxv16i16( %va) { %a = call @llvm.ctpop.nxv16i16( %va) ret %a } -declare @llvm.ctpop.nxv16i16() define @ctpop_nxv32i16( %va) { ; CHECK-LABEL: ctpop_nxv32i16: @@ -408,7 +396,6 @@ define @ctpop_nxv32i16( %va) { %a = call @llvm.ctpop.nxv32i16( %va) ret %a } -declare @llvm.ctpop.nxv32i16() define @ctpop_nxv1i32( %va) { ; CHECK-LABEL: ctpop_nxv1i32: @@ -444,7 +431,6 @@ define @ctpop_nxv1i32( %va) { %a = call @llvm.ctpop.nxv1i32( %va) ret %a } -declare @llvm.ctpop.nxv1i32() define @ctpop_nxv2i32( %va) { ; CHECK-LABEL: ctpop_nxv2i32: @@ -480,7 +466,6 @@ define @ctpop_nxv2i32( %va) { %a = call @llvm.ctpop.nxv2i32( %va) ret %a } -declare @llvm.ctpop.nxv2i32() define @ctpop_nxv4i32( %va) { ; CHECK-LABEL: ctpop_nxv4i32: @@ -516,7 +501,6 @@ define @ctpop_nxv4i32( %va) { %a = call @llvm.ctpop.nxv4i32( %va) ret %a } -declare @llvm.ctpop.nxv4i32() define @ctpop_nxv8i32( %va) { ; CHECK-LABEL: ctpop_nxv8i32: @@ -552,7 +536,6 @@ define @ctpop_nxv8i32( %va) { %a = call @llvm.ctpop.nxv8i32( %va) ret %a } -declare @llvm.ctpop.nxv8i32() define @ctpop_nxv16i32( %va) { ; CHECK-LABEL: ctpop_nxv16i32: @@ -670,8 +653,6 @@ define @ctpop_nxv16i32_ne_one( %va) { ret %cmp } -declare @llvm.ctpop.nxv16i32() - define @ctpop_nxv1i64( %va) { ; RV32-LABEL: ctpop_nxv1i64: ; RV32: # %bb.0: @@ -753,7 +734,6 @@ define @ctpop_nxv1i64( %va) { %a = call @llvm.ctpop.nxv1i64( %va) ret %a } -declare @llvm.ctpop.nxv1i64() define @ctpop_nxv2i64( %va) { ; RV32-LABEL: ctpop_nxv2i64: @@ -836,7 +816,6 @@ define @ctpop_nxv2i64( %va) { %a = call @llvm.ctpop.nxv2i64( %va) ret %a } -declare @llvm.ctpop.nxv2i64() define @ctpop_nxv4i64( %va) { ; RV32-LABEL: ctpop_nxv4i64: @@ -919,7 +898,6 @@ define @ctpop_nxv4i64( %va) { %a = call @llvm.ctpop.nxv4i64( %va) ret %a } -declare @llvm.ctpop.nxv4i64() define @ctpop_nxv8i64( %va) { ; RV32-LABEL: ctpop_nxv8i64: @@ -1084,4 +1062,3 @@ define @ctpop_nxv8i64_ne_one( %va) { ret %cmp } -declare @llvm.ctpop.nxv8i64() diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll index ee18a426c1b12..1bbefc65d3e39 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB -declare @llvm.vp.ctpop.nxv1i8(, , i32) - define @vp_ctpop_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv1i8: ; CHECK: # %bb.0: @@ -64,8 +62,6 @@ define @vp_ctpop_nxv1i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ctpop.nxv2i8(, , i32) - define @vp_ctpop_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv2i8: ; CHECK: # %bb.0: @@ -120,8 +116,6 @@ define @vp_ctpop_nxv2i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ctpop.nxv4i8(, , i32) - define @vp_ctpop_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv4i8: ; CHECK: # %bb.0: @@ -176,8 +170,6 @@ define @vp_ctpop_nxv4i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ctpop.nxv8i8(, , i32) - define @vp_ctpop_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv8i8: ; CHECK: # %bb.0: @@ -232,8 +224,6 @@ define @vp_ctpop_nxv8i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ctpop.nxv16i8(, , i32) - define @vp_ctpop_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv16i8: ; CHECK: # %bb.0: @@ -288,8 +278,6 @@ define @vp_ctpop_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv32i8(, , i32) - define @vp_ctpop_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv32i8: ; CHECK: # %bb.0: @@ -344,8 +332,6 @@ define @vp_ctpop_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv64i8(, , i32) - define @vp_ctpop_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv64i8: ; CHECK: # %bb.0: @@ -400,8 +386,6 @@ define @vp_ctpop_nxv64i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv1i16(, , i32) - define @vp_ctpop_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv1i16: ; CHECK: # %bb.0: @@ -470,8 +454,6 @@ define @vp_ctpop_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv2i16(, , i32) - define @vp_ctpop_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv2i16: ; CHECK: # %bb.0: @@ -540,8 +522,6 @@ define @vp_ctpop_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv4i16(, , i32) - define @vp_ctpop_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv4i16: ; CHECK: # %bb.0: @@ -610,8 +590,6 @@ define @vp_ctpop_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv8i16(, , i32) - define @vp_ctpop_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv8i16: ; CHECK: # %bb.0: @@ -680,8 +658,6 @@ define @vp_ctpop_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv16i16(, , i32) - define @vp_ctpop_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv16i16: ; CHECK: # %bb.0: @@ -750,8 +726,6 @@ define @vp_ctpop_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.ctpop.nxv32i16(, , i32) - define @vp_ctpop_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv32i16: ; CHECK: # %bb.0: @@ -820,8 +794,6 @@ define @vp_ctpop_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.ctpop.nxv1i32(, , i32) - define @vp_ctpop_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv1i32: ; CHECK: # %bb.0: @@ -892,8 +864,6 @@ define @vp_ctpop_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv2i32(, , i32) - define @vp_ctpop_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv2i32: ; CHECK: # %bb.0: @@ -964,8 +934,6 @@ define @vp_ctpop_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv4i32(, , i32) - define @vp_ctpop_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv4i32: ; CHECK: # %bb.0: @@ -1036,8 +1004,6 @@ define @vp_ctpop_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv8i32(, , i32) - define @vp_ctpop_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv8i32: ; CHECK: # %bb.0: @@ -1108,8 +1074,6 @@ define @vp_ctpop_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv16i32(, , i32) - define @vp_ctpop_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv16i32: ; CHECK: # %bb.0: @@ -1180,8 +1144,6 @@ define @vp_ctpop_nxv16i32_unmasked( %va, ret %v } -declare @llvm.vp.ctpop.nxv1i64(, , i32) - define @vp_ctpop_nxv1i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_nxv1i64: ; RV32: # %bb.0: @@ -1346,8 +1308,6 @@ define @vp_ctpop_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv2i64(, , i32) - define @vp_ctpop_nxv2i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_nxv2i64: ; RV32: # %bb.0: @@ -1512,8 +1472,6 @@ define @vp_ctpop_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv4i64(, , i32) - define @vp_ctpop_nxv4i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_nxv4i64: ; RV32: # %bb.0: @@ -1678,8 +1636,6 @@ define @vp_ctpop_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv7i64(, , i32) - define @vp_ctpop_nxv7i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_nxv7i64: ; RV32: # %bb.0: @@ -1844,8 +1800,6 @@ define @vp_ctpop_nxv7i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv8i64(, , i32) - define @vp_ctpop_nxv8i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_nxv8i64: ; RV32: # %bb.0: @@ -2010,8 +1964,6 @@ define @vp_ctpop_nxv8i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv16i64(, , i32) - define @vp_ctpop_nxv16i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_nxv16i64: ; RV32: # %bb.0: @@ -2430,7 +2382,6 @@ define @vp_ctpop_nxv16i64_unmasked( %va, } ; Test promotion. -declare @llvm.vp.ctpop.nxv1i9(, , i32) define @vp_ctpop_nxv1i9( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv1i9: diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll index faa3c48c49e50..79af06db4171e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll @@ -71,7 +71,6 @@ define @cttz_nxv1i8( %va) { %a = call @llvm.cttz.nxv1i8( %va, i1 false) ret %a } -declare @llvm.cttz.nxv1i8(, i1) define @cttz_nxv2i8( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv2i8: @@ -136,7 +135,6 @@ define @cttz_nxv2i8( %va) { %a = call @llvm.cttz.nxv2i8( %va, i1 false) ret %a } -declare @llvm.cttz.nxv2i8(, i1) define @cttz_nxv4i8( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv4i8: @@ -201,7 +199,6 @@ define @cttz_nxv4i8( %va) { %a = call @llvm.cttz.nxv4i8( %va, i1 false) ret %a } -declare @llvm.cttz.nxv4i8(, i1) define @cttz_nxv8i8( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv8i8: @@ -266,7 +263,6 @@ define @cttz_nxv8i8( %va) { %a = call @llvm.cttz.nxv8i8( %va, i1 false) ret %a } -declare @llvm.cttz.nxv8i8(, i1) define @cttz_nxv16i8( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv16i8: @@ -331,7 +327,6 @@ define @cttz_nxv16i8( %va) { %a = call @llvm.cttz.nxv16i8( %va, i1 false) ret %a } -declare @llvm.cttz.nxv16i8(, i1) define @cttz_nxv32i8( %va) { ; CHECK-LABEL: cttz_nxv32i8: @@ -362,7 +357,6 @@ define @cttz_nxv32i8( %va) { %a = call @llvm.cttz.nxv32i8( %va, i1 false) ret %a } -declare @llvm.cttz.nxv32i8(, i1) define @cttz_nxv64i8( %va) { ; CHECK-LABEL: cttz_nxv64i8: @@ -393,7 +387,6 @@ define @cttz_nxv64i8( %va) { %a = call @llvm.cttz.nxv64i8( %va, i1 false) ret %a } -declare @llvm.cttz.nxv64i8(, i1) define @cttz_nxv1i16( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv1i16: @@ -459,7 +452,6 @@ define @cttz_nxv1i16( %va) { %a = call @llvm.cttz.nxv1i16( %va, i1 false) ret %a } -declare @llvm.cttz.nxv1i16(, i1) define @cttz_nxv2i16( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv2i16: @@ -525,7 +517,6 @@ define @cttz_nxv2i16( %va) { %a = call @llvm.cttz.nxv2i16( %va, i1 false) ret %a } -declare @llvm.cttz.nxv2i16(, i1) define @cttz_nxv4i16( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv4i16: @@ -591,7 +582,6 @@ define @cttz_nxv4i16( %va) { %a = call @llvm.cttz.nxv4i16( %va, i1 false) ret %a } -declare @llvm.cttz.nxv4i16(, i1) define @cttz_nxv8i16( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv8i16: @@ -657,7 +647,6 @@ define @cttz_nxv8i16( %va) { %a = call @llvm.cttz.nxv8i16( %va, i1 false) ret %a } -declare @llvm.cttz.nxv8i16(, i1) define @cttz_nxv16i16( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv16i16: @@ -723,7 +712,6 @@ define @cttz_nxv16i16( %va) { %a = call @llvm.cttz.nxv16i16( %va, i1 false) ret %a } -declare @llvm.cttz.nxv16i16(, i1) define @cttz_nxv32i16( %va) { ; CHECK-LABEL: cttz_nxv32i16: @@ -761,7 +749,6 @@ define @cttz_nxv32i16( %va) { %a = call @llvm.cttz.nxv32i16( %va, i1 false) ret %a } -declare @llvm.cttz.nxv32i16(, i1) define @cttz_nxv1i32( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv1i32: @@ -831,7 +818,6 @@ define @cttz_nxv1i32( %va) { %a = call @llvm.cttz.nxv1i32( %va, i1 false) ret %a } -declare @llvm.cttz.nxv1i32(, i1) define @cttz_nxv2i32( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv2i32: @@ -901,7 +887,6 @@ define @cttz_nxv2i32( %va) { %a = call @llvm.cttz.nxv2i32( %va, i1 false) ret %a } -declare @llvm.cttz.nxv2i32(, i1) define @cttz_nxv4i32( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv4i32: @@ -971,7 +956,6 @@ define @cttz_nxv4i32( %va) { %a = call @llvm.cttz.nxv4i32( %va, i1 false) ret %a } -declare @llvm.cttz.nxv4i32(, i1) define @cttz_nxv8i32( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv8i32: @@ -1041,7 +1025,6 @@ define @cttz_nxv8i32( %va) { %a = call @llvm.cttz.nxv8i32( %va, i1 false) ret %a } -declare @llvm.cttz.nxv8i32(, i1) define @cttz_nxv16i32( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv16i32: @@ -1112,7 +1095,6 @@ define @cttz_nxv16i32( %va) { %a = call @llvm.cttz.nxv16i32( %va, i1 false) ret %a } -declare @llvm.cttz.nxv16i32(, i1) define @cttz_nxv1i64( %va) { ; RV32I-LABEL: cttz_nxv1i64: @@ -1236,7 +1218,6 @@ define @cttz_nxv1i64( %va) { %a = call @llvm.cttz.nxv1i64( %va, i1 false) ret %a } -declare @llvm.cttz.nxv1i64(, i1) define @cttz_nxv2i64( %va) { ; RV32I-LABEL: cttz_nxv2i64: @@ -1360,7 +1341,6 @@ define @cttz_nxv2i64( %va) { %a = call @llvm.cttz.nxv2i64( %va, i1 false) ret %a } -declare @llvm.cttz.nxv2i64(, i1) define @cttz_nxv4i64( %va) { ; RV32I-LABEL: cttz_nxv4i64: @@ -1484,7 +1464,6 @@ define @cttz_nxv4i64( %va) { %a = call @llvm.cttz.nxv4i64( %va, i1 false) ret %a } -declare @llvm.cttz.nxv4i64(, i1) define @cttz_nxv8i64( %va) { ; RV32I-LABEL: cttz_nxv8i64: @@ -1608,7 +1587,6 @@ define @cttz_nxv8i64( %va) { %a = call @llvm.cttz.nxv8i64( %va, i1 false) ret %a } -declare @llvm.cttz.nxv8i64(, i1) define @cttz_zero_undef_nxv1i8( %va) { ; CHECK-ZVE64X-LABEL: cttz_zero_undef_nxv1i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll index 52eaa51051631..c82ad17545a6a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB -declare @llvm.vp.cttz.nxv1i8(, i1 immarg, , i32) - define @vp_cttz_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv1i8: ; CHECK: # %bb.0: @@ -70,8 +68,6 @@ define @vp_cttz_nxv1i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.cttz.nxv2i8(, i1 immarg, , i32) - define @vp_cttz_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv2i8: ; CHECK: # %bb.0: @@ -132,8 +128,6 @@ define @vp_cttz_nxv2i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.cttz.nxv4i8(, i1 immarg, , i32) - define @vp_cttz_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv4i8: ; CHECK: # %bb.0: @@ -194,8 +188,6 @@ define @vp_cttz_nxv4i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.cttz.nxv8i8(, i1 immarg, , i32) - define @vp_cttz_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv8i8: ; CHECK: # %bb.0: @@ -256,8 +248,6 @@ define @vp_cttz_nxv8i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.cttz.nxv16i8(, i1 immarg, , i32) - define @vp_cttz_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv16i8: ; CHECK: # %bb.0: @@ -318,8 +308,6 @@ define @vp_cttz_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv32i8(, i1 immarg, , i32) - define @vp_cttz_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv32i8: ; CHECK: # %bb.0: @@ -380,8 +368,6 @@ define @vp_cttz_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv64i8(, i1 immarg, , i32) - define @vp_cttz_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv64i8: ; CHECK: # %bb.0: @@ -442,8 +428,6 @@ define @vp_cttz_nxv64i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv1i16(, i1 immarg, , i32) - define @vp_cttz_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv1i16: ; CHECK: # %bb.0: @@ -518,8 +502,6 @@ define @vp_cttz_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv2i16(, i1 immarg, , i32) - define @vp_cttz_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv2i16: ; CHECK: # %bb.0: @@ -594,8 +576,6 @@ define @vp_cttz_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv4i16(, i1 immarg, , i32) - define @vp_cttz_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv4i16: ; CHECK: # %bb.0: @@ -670,8 +650,6 @@ define @vp_cttz_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv8i16(, i1 immarg, , i32) - define @vp_cttz_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv8i16: ; CHECK: # %bb.0: @@ -746,8 +724,6 @@ define @vp_cttz_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv16i16(, i1 immarg, , i32) - define @vp_cttz_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv16i16: ; CHECK: # %bb.0: @@ -822,8 +798,6 @@ define @vp_cttz_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.cttz.nxv32i16(, i1 immarg, , i32) - define @vp_cttz_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv32i16: ; CHECK: # %bb.0: @@ -898,8 +872,6 @@ define @vp_cttz_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.cttz.nxv1i32(, i1 immarg, , i32) - define @vp_cttz_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv1i32: ; CHECK: # %bb.0: @@ -976,8 +948,6 @@ define @vp_cttz_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv2i32(, i1 immarg, , i32) - define @vp_cttz_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv2i32: ; CHECK: # %bb.0: @@ -1054,8 +1024,6 @@ define @vp_cttz_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv4i32(, i1 immarg, , i32) - define @vp_cttz_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv4i32: ; CHECK: # %bb.0: @@ -1132,8 +1100,6 @@ define @vp_cttz_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv8i32(, i1 immarg, , i32) - define @vp_cttz_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv8i32: ; CHECK: # %bb.0: @@ -1210,8 +1176,6 @@ define @vp_cttz_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv16i32(, i1 immarg, , i32) - define @vp_cttz_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv16i32: ; CHECK: # %bb.0: @@ -1288,8 +1252,6 @@ define @vp_cttz_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.cttz.nxv1i64(, i1 immarg, , i32) - define @vp_cttz_nxv1i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_nxv1i64: ; RV32: # %bb.0: @@ -1466,8 +1428,6 @@ define @vp_cttz_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv2i64(, i1 immarg, , i32) - define @vp_cttz_nxv2i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_nxv2i64: ; RV32: # %bb.0: @@ -1644,8 +1604,6 @@ define @vp_cttz_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv4i64(, i1 immarg, , i32) - define @vp_cttz_nxv4i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_nxv4i64: ; RV32: # %bb.0: @@ -1822,8 +1780,6 @@ define @vp_cttz_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv7i64(, i1 immarg, , i32) - define @vp_cttz_nxv7i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_nxv7i64: ; RV32: # %bb.0: @@ -2000,8 +1956,6 @@ define @vp_cttz_nxv7i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv8i64(, i1 immarg, , i32) - define @vp_cttz_nxv8i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_nxv8i64: ; RV32: # %bb.0: @@ -2178,8 +2132,6 @@ define @vp_cttz_nxv8i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv16i64(, i1 immarg, , i32) - define @vp_cttz_nxv16i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_nxv16i64: ; RV32: # %bb.0: @@ -2664,7 +2616,6 @@ define @vp_cttz_zero_undef_nxv1i8_unmasked( % ret %v } - define @vp_cttz_zero_undef_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv2i8: ; CHECK: # %bb.0: @@ -2718,7 +2669,6 @@ define @vp_cttz_zero_undef_nxv2i8_unmasked( % ret %v } - define @vp_cttz_zero_undef_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv4i8: ; CHECK: # %bb.0: @@ -2772,7 +2722,6 @@ define @vp_cttz_zero_undef_nxv4i8_unmasked( % ret %v } - define @vp_cttz_zero_undef_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv8i8: ; CHECK: # %bb.0: @@ -2826,7 +2775,6 @@ define @vp_cttz_zero_undef_nxv8i8_unmasked( % ret %v } - define @vp_cttz_zero_undef_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv16i8: ; CHECK: # %bb.0: @@ -2880,7 +2828,6 @@ define @vp_cttz_zero_undef_nxv16i8_unmasked( %v } - define @vp_cttz_zero_undef_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv32i8: ; CHECK: # %bb.0: @@ -2941,7 +2888,6 @@ define @vp_cttz_zero_undef_nxv32i8_unmasked( %v } - define @vp_cttz_zero_undef_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv64i8: ; CHECK: # %bb.0: @@ -3002,7 +2948,6 @@ define @vp_cttz_zero_undef_nxv64i8_unmasked( %v } - define @vp_cttz_zero_undef_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv1i16: ; CHECK: # %bb.0: @@ -3048,7 +2993,6 @@ define @vp_cttz_zero_undef_nxv1i16_unmasked( %v } - define @vp_cttz_zero_undef_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv2i16: ; CHECK: # %bb.0: @@ -3094,7 +3038,6 @@ define @vp_cttz_zero_undef_nxv2i16_unmasked( %v } - define @vp_cttz_zero_undef_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv4i16: ; CHECK: # %bb.0: @@ -3140,7 +3083,6 @@ define @vp_cttz_zero_undef_nxv4i16_unmasked( %v } - define @vp_cttz_zero_undef_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv8i16: ; CHECK: # %bb.0: @@ -3186,7 +3128,6 @@ define @vp_cttz_zero_undef_nxv8i16_unmasked( %v } - define @vp_cttz_zero_undef_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv16i16: ; CHECK: # %bb.0: @@ -3232,7 +3173,6 @@ define @vp_cttz_zero_undef_nxv16i16_unmasked( %v } - define @vp_cttz_zero_undef_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv32i16: ; CHECK: # %bb.0: @@ -3307,7 +3247,6 @@ define @vp_cttz_zero_undef_nxv32i16_unmasked( %v } - define @vp_cttz_zero_undef_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv1i32: ; CHECK: # %bb.0: @@ -3355,7 +3294,6 @@ define @vp_cttz_zero_undef_nxv1i32_unmasked( %v } - define @vp_cttz_zero_undef_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv2i32: ; CHECK: # %bb.0: @@ -3403,7 +3341,6 @@ define @vp_cttz_zero_undef_nxv2i32_unmasked( %v } - define @vp_cttz_zero_undef_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv4i32: ; CHECK: # %bb.0: @@ -3451,7 +3388,6 @@ define @vp_cttz_zero_undef_nxv4i32_unmasked( %v } - define @vp_cttz_zero_undef_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv8i32: ; CHECK: # %bb.0: @@ -3499,7 +3435,6 @@ define @vp_cttz_zero_undef_nxv8i32_unmasked( %v } - define @vp_cttz_zero_undef_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv16i32: ; CHECK: # %bb.0: @@ -3546,7 +3481,6 @@ define @vp_cttz_zero_undef_nxv16i32_unmasked( %v } - define @vp_cttz_zero_undef_nxv1i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv1i64: ; CHECK: # %bb.0: @@ -3595,7 +3529,6 @@ define @vp_cttz_zero_undef_nxv1i64_unmasked( %v } - define @vp_cttz_zero_undef_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv2i64: ; CHECK: # %bb.0: @@ -3644,7 +3577,6 @@ define @vp_cttz_zero_undef_nxv2i64_unmasked( %v } - define @vp_cttz_zero_undef_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv4i64: ; CHECK: # %bb.0: @@ -3693,7 +3625,6 @@ define @vp_cttz_zero_undef_nxv4i64_unmasked( %v } - define @vp_cttz_zero_undef_nxv7i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv7i64: ; CHECK: # %bb.0: @@ -3742,7 +3673,6 @@ define @vp_cttz_zero_undef_nxv7i64_unmasked( %v } - define @vp_cttz_zero_undef_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv8i64: ; CHECK: # %bb.0: @@ -3906,7 +3836,6 @@ define @vp_cttz_zero_undef_nxv16i64_unmasked( @llvm.vp.cttz.nxv1i9(, i1 immarg, , i32) define @vp_cttz_nxv1i9( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv1i9: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir b/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir index 5221fa73525cc..1e8aea92c9780 100644 --- a/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir +++ b/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir @@ -57,9 +57,6 @@ unreachable, !dbg !8 } - ; Function Attrs: nounwind readnone speculatable willreturn - declare void @llvm.dbg.value(metadata, metadata, metadata) - !llvm.dbg.cu = !{!0} !llvm.debugify = !{!3, !3} !llvm.module.flags = !{!4} @@ -92,7 +89,6 @@ !25 = !{!26} !26 = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_bregx, 7202, 0, DW_OP_constu, 1, DW_OP_div, DW_OP_constu, 1, DW_OP_mul)) - ... --- name: foo diff --git a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll index 31fa5d025156f..9baa2f71abb8f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll @@ -81,8 +81,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare i64 @llvm.vscale.i64() - define void @sink_splat_add_scalable(ptr nocapture %a, i32 signext %x) { ; NO-SINK-LABEL: sink_splat_add_scalable: ; NO-SINK: # %bb.0: # %entry @@ -261,8 +259,6 @@ for.body: ; preds = %for.body.preheader, br i1 %cmp.not, label %for.cond.cleanup, label %for.body } -declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_add(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; NO-SINK-LABEL: sink_splat_vp_add: ; NO-SINK: # %bb.0: # %entry @@ -578,8 +574,6 @@ for.body: ; preds = %for.body.preheader, br i1 %cmp.not, label %for.cond.cleanup, label %for.body } -declare <4 x float> @llvm.vp.fadd.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32) - define void @sink_splat_vp_fadd(ptr nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) { ; NO-SINK-LABEL: sink_splat_vp_fadd: ; NO-SINK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll b/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll index 51dc7b0714d7f..fb4391a5e30c1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll @@ -8,8 +8,6 @@ ; trunc ; ================================================================================ -declare @llvm.trunc.nxv1f64() - define @trunc_nxv1f64_to_si8( %x) { ; RV32-LABEL: trunc_nxv1f64_to_si8: ; RV32: # %bb.0: @@ -254,8 +252,6 @@ define @trunc_nxv1f64_to_ui64( %x) { ; trunc ; ================================================================================ -declare @llvm.trunc.nxv4f64() - define @trunc_nxv4f64_to_si8( %x) { ; RV32-LABEL: trunc_nxv4f64_to_si8: ; RV32: # %bb.0: @@ -500,8 +496,6 @@ define @trunc_nxv4f64_to_ui64( %x) { ; ceil ; ================================================================================ -declare @llvm.ceil.nxv1f64() - define @ceil_nxv1f64_to_si8( %x) { ; RV32-LABEL: ceil_nxv1f64_to_si8: ; RV32: # %bb.0: @@ -778,8 +772,6 @@ define @ceil_nxv1f64_to_ui64( %x) { ; ceil ; ================================================================================ -declare @llvm.ceil.nxv4f64() - define @ceil_nxv4f64_to_si8( %x) { ; RV32-LABEL: ceil_nxv4f64_to_si8: ; RV32: # %bb.0: @@ -1056,8 +1048,6 @@ define @ceil_nxv4f64_to_ui64( %x) { ; rint ; ================================================================================ -declare @llvm.rint.nxv1f64() - define @rint_nxv1f64_to_si8( %x) { ; RV32-LABEL: rint_nxv1f64_to_si8: ; RV32: # %bb.0: @@ -1302,8 +1292,6 @@ define @rint_nxv1f64_to_ui64( %x) { ; rint ; ================================================================================ -declare @llvm.rint.nxv4f64() - define @rint_nxv4f64_to_si8( %x) { ; RV32-LABEL: rint_nxv4f64_to_si8: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/expand-no-v.ll b/llvm/test/CodeGen/RISCV/rvv/expand-no-v.ll index 81b2b6594890e..fab8363dc7cde 100644 --- a/llvm/test/CodeGen/RISCV/rvv/expand-no-v.ll +++ b/llvm/test/CodeGen/RISCV/rvv/expand-no-v.ll @@ -3,8 +3,6 @@ ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 ; Should expand to scalar instructions and do not crash -declare i32 @llvm.vp.reduce.add.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vpreduce_add_v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) { ; RV32-LABEL: vpreduce_add_v4i32: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll index 7c9a283dd54bc..2245ce8f5fa12 100644 --- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll @@ -548,27 +548,3 @@ define @extract_nxv6bf16_nxv12bf16_6( %res } -declare @llvm.vector.extract.nxv6f16.nxv12f16(, i64) - -declare @llvm.vector.extract.nxv1i8.nxv4i8( %vec, i64 %idx) -declare @llvm.vector.extract.nxv1i8.nxv8i8( %vec, i64 %idx) - -declare @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 %idx) - -declare @llvm.vector.extract.nxv1i32.nxv2i32( %vec, i64 %idx) - -declare @llvm.vector.extract.nxv2i32.nxv8i32( %vec, i64 %idx) -declare @llvm.vector.extract.nxv4i32.nxv8i32( %vec, i64 %idx) - -declare @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 %idx) -declare @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 %idx) -declare @llvm.vector.extract.nxv4i32.nxv16i32( %vec, i64 %idx) -declare @llvm.vector.extract.nxv8i32.nxv16i32( %vec, i64 %idx) - -declare @llvm.vector.extract.nxv2f16.nxv16f16( %vec, i64 %idx) - -declare @llvm.vector.extract.nxv4i1( %vec, i64 %idx) -declare @llvm.vector.extract.nxv16i1( %vec, i64 %idx) - -declare @llvm.vector.extract.nxv2i1( %vec, i64 %idx) -declare @llvm.vector.extract.nxv8i1( %vec, i64 %idx) diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll index 903c0dcaba2d8..241f619b1133f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll @@ -1472,8 +1472,6 @@ define void @store_vfmv_f_s_nxv8f64(ptr %x, ptr %p) { ret void } -declare double @llvm.riscv.vfmv.f.s.nxv8f64() - define float @extractelt_fadd_nxv4f32_splat( %x) { ; CHECK-LABEL: extractelt_fadd_nxv4f32_splat: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll index 316a84f98be2b..1f4eaea90628b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll @@ -26,7 +26,6 @@ define @ceil_nxv1f16( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv1f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv1f16(, metadata) define @ceil_nxv2f16( %x) strictfp { ; CHECK-LABEL: ceil_nxv2f16: @@ -50,7 +49,6 @@ define @ceil_nxv2f16( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv2f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv2f16(, metadata) define @ceil_nxv4f16( %x) strictfp { ; CHECK-LABEL: ceil_nxv4f16: @@ -74,7 +72,6 @@ define @ceil_nxv4f16( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv4f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv4f16(, metadata) define @ceil_nxv8f16( %x) strictfp { ; CHECK-LABEL: ceil_nxv8f16: @@ -98,7 +95,6 @@ define @ceil_nxv8f16( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv8f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv8f16(, metadata) define @ceil_nxv16f16( %x) strictfp { ; CHECK-LABEL: ceil_nxv16f16: @@ -122,7 +118,6 @@ define @ceil_nxv16f16( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv16f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv16f16(, metadata) define @ceil_nxv32f16( %x) strictfp { ; CHECK-LABEL: ceil_nxv32f16: @@ -146,7 +141,6 @@ define @ceil_nxv32f16( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv32f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv32f16(, metadata) define @ceil_nxv1f32( %x) strictfp { ; CHECK-LABEL: ceil_nxv1f32: @@ -169,7 +163,6 @@ define @ceil_nxv1f32( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv1f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv1f32(, metadata) define @ceil_nxv2f32( %x) strictfp { ; CHECK-LABEL: ceil_nxv2f32: @@ -192,7 +185,6 @@ define @ceil_nxv2f32( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv2f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv2f32(, metadata) define @ceil_nxv4f32( %x) strictfp { ; CHECK-LABEL: ceil_nxv4f32: @@ -215,7 +207,6 @@ define @ceil_nxv4f32( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv4f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv4f32(, metadata) define @ceil_nxv8f32( %x) strictfp { ; CHECK-LABEL: ceil_nxv8f32: @@ -238,7 +229,6 @@ define @ceil_nxv8f32( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv8f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv8f32(, metadata) define @ceil_nxv16f32( %x) strictfp { ; CHECK-LABEL: ceil_nxv16f32: @@ -261,7 +251,6 @@ define @ceil_nxv16f32( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv16f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv16f32(, metadata) define @ceil_nxv1f64( %x) strictfp { ; RV32-LABEL: ceil_nxv1f64: @@ -303,7 +292,6 @@ define @ceil_nxv1f64( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv1f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv1f64(, metadata) define @ceil_nxv2f64( %x) strictfp { ; RV32-LABEL: ceil_nxv2f64: @@ -345,7 +333,6 @@ define @ceil_nxv2f64( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv2f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv2f64(, metadata) define @ceil_nxv4f64( %x) strictfp { ; RV32-LABEL: ceil_nxv4f64: @@ -387,7 +374,6 @@ define @ceil_nxv4f64( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv4f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv4f64(, metadata) define @ceil_nxv8f64( %x) strictfp { ; RV32-LABEL: ceil_nxv8f64: @@ -429,4 +415,3 @@ define @ceil_nxv8f64( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv8f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv8f64(, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll index 56edec1cc7a68..504930f07bb13 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll @@ -202,7 +202,6 @@ define @ceil_nxv1f16( %x) { %a = call @llvm.ceil.nxv1f16( %x) ret %a } -declare @llvm.ceil.nxv1f16() define @ceil_nxv2f16( %x) { ; ZVFH-LABEL: ceil_nxv2f16: @@ -242,7 +241,6 @@ define @ceil_nxv2f16( %x) { %a = call @llvm.ceil.nxv2f16( %x) ret %a } -declare @llvm.ceil.nxv2f16() define @ceil_nxv4f16( %x) { ; ZVFH-LABEL: ceil_nxv4f16: @@ -282,7 +280,6 @@ define @ceil_nxv4f16( %x) { %a = call @llvm.ceil.nxv4f16( %x) ret %a } -declare @llvm.ceil.nxv4f16() define @ceil_nxv8f16( %x) { ; ZVFH-LABEL: ceil_nxv8f16: @@ -322,7 +319,6 @@ define @ceil_nxv8f16( %x) { %a = call @llvm.ceil.nxv8f16( %x) ret %a } -declare @llvm.ceil.nxv8f16() define @ceil_nxv16f16( %x) { ; ZVFH-LABEL: ceil_nxv16f16: @@ -362,7 +358,6 @@ define @ceil_nxv16f16( %x) { %a = call @llvm.ceil.nxv16f16( %x) ret %a } -declare @llvm.ceil.nxv16f16() define @ceil_nxv32f16( %x) { ; ZVFH-LABEL: ceil_nxv32f16: @@ -416,7 +411,6 @@ define @ceil_nxv32f16( %x) { %a = call @llvm.ceil.nxv32f16( %x) ret %a } -declare @llvm.ceil.nxv32f16() define @ceil_nxv1f32( %x) { ; CHECK-LABEL: ceil_nxv1f32: @@ -436,7 +430,6 @@ define @ceil_nxv1f32( %x) { %a = call @llvm.ceil.nxv1f32( %x) ret %a } -declare @llvm.ceil.nxv1f32() define @ceil_nxv2f32( %x) { ; CHECK-LABEL: ceil_nxv2f32: @@ -456,7 +449,6 @@ define @ceil_nxv2f32( %x) { %a = call @llvm.ceil.nxv2f32( %x) ret %a } -declare @llvm.ceil.nxv2f32() define @ceil_nxv4f32( %x) { ; CHECK-LABEL: ceil_nxv4f32: @@ -476,7 +468,6 @@ define @ceil_nxv4f32( %x) { %a = call @llvm.ceil.nxv4f32( %x) ret %a } -declare @llvm.ceil.nxv4f32() define @ceil_nxv8f32( %x) { ; CHECK-LABEL: ceil_nxv8f32: @@ -496,7 +487,6 @@ define @ceil_nxv8f32( %x) { %a = call @llvm.ceil.nxv8f32( %x) ret %a } -declare @llvm.ceil.nxv8f32() define @ceil_nxv16f32( %x) { ; CHECK-LABEL: ceil_nxv16f32: @@ -516,7 +506,6 @@ define @ceil_nxv16f32( %x) { %a = call @llvm.ceil.nxv16f32( %x) ret %a } -declare @llvm.ceil.nxv16f32() define @ceil_nxv1f64( %x) { ; RV32ZFH-LABEL: ceil_nxv1f64: @@ -583,7 +572,6 @@ define @ceil_nxv1f64( %x) { %a = call @llvm.ceil.nxv1f64( %x) ret %a } -declare @llvm.ceil.nxv1f64() define @ceil_nxv2f64( %x) { ; RV32ZFH-LABEL: ceil_nxv2f64: @@ -650,7 +638,6 @@ define @ceil_nxv2f64( %x) { %a = call @llvm.ceil.nxv2f64( %x) ret %a } -declare @llvm.ceil.nxv2f64() define @ceil_nxv4f64( %x) { ; RV32ZFH-LABEL: ceil_nxv4f64: @@ -717,7 +704,6 @@ define @ceil_nxv4f64( %x) { %a = call @llvm.ceil.nxv4f64( %x) ret %a } -declare @llvm.ceil.nxv4f64() define @ceil_nxv8f64( %x) { ; RV32ZFH-LABEL: ceil_nxv8f64: @@ -784,4 +770,3 @@ define @ceil_nxv8f64( %x) { %a = call @llvm.ceil.nxv8f64( %x) ret %a } -declare @llvm.ceil.nxv8f64() diff --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll index 7045fc7c50847..3a7de21c14390 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll @@ -26,7 +26,6 @@ define @floor_nxv1f16( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv1f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv1f16(, metadata) define @floor_nxv2f16( %x) strictfp { ; CHECK-LABEL: floor_nxv2f16: @@ -50,7 +49,6 @@ define @floor_nxv2f16( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv2f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv2f16(, metadata) define @floor_nxv4f16( %x) strictfp { ; CHECK-LABEL: floor_nxv4f16: @@ -74,7 +72,6 @@ define @floor_nxv4f16( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv4f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv4f16(, metadata) define @floor_nxv8f16( %x) strictfp { ; CHECK-LABEL: floor_nxv8f16: @@ -98,7 +95,6 @@ define @floor_nxv8f16( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv8f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv8f16(, metadata) define @floor_nxv16f16( %x) strictfp { ; CHECK-LABEL: floor_nxv16f16: @@ -122,7 +118,6 @@ define @floor_nxv16f16( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv16f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv16f16(, metadata) define @floor_nxv32f16( %x) strictfp { ; CHECK-LABEL: floor_nxv32f16: @@ -146,7 +141,6 @@ define @floor_nxv32f16( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv32f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv32f16(, metadata) define @floor_nxv1f32( %x) strictfp { ; CHECK-LABEL: floor_nxv1f32: @@ -169,7 +163,6 @@ define @floor_nxv1f32( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv1f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv1f32(, metadata) define @floor_nxv2f32( %x) strictfp { ; CHECK-LABEL: floor_nxv2f32: @@ -192,7 +185,6 @@ define @floor_nxv2f32( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv2f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv2f32(, metadata) define @floor_nxv4f32( %x) strictfp { ; CHECK-LABEL: floor_nxv4f32: @@ -215,7 +207,6 @@ define @floor_nxv4f32( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv4f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv4f32(, metadata) define @floor_nxv8f32( %x) strictfp { ; CHECK-LABEL: floor_nxv8f32: @@ -238,7 +229,6 @@ define @floor_nxv8f32( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv8f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv8f32(, metadata) define @floor_nxv16f32( %x) strictfp { ; CHECK-LABEL: floor_nxv16f32: @@ -261,7 +251,6 @@ define @floor_nxv16f32( %x) strictfp %a = call @llvm.experimental.constrained.floor.nxv16f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv16f32(, metadata) define @floor_nxv1f64( %x) strictfp { ; RV32-LABEL: floor_nxv1f64: @@ -303,7 +292,6 @@ define @floor_nxv1f64( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv1f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv1f64(, metadata) define @floor_nxv2f64( %x) strictfp { ; RV32-LABEL: floor_nxv2f64: @@ -345,7 +333,6 @@ define @floor_nxv2f64( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv2f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv2f64(, metadata) define @floor_nxv4f64( %x) strictfp { ; RV32-LABEL: floor_nxv4f64: @@ -387,7 +374,6 @@ define @floor_nxv4f64( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv4f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv4f64(, metadata) define @floor_nxv8f64( %x) strictfp { ; RV32-LABEL: floor_nxv8f64: @@ -429,4 +415,3 @@ define @floor_nxv8f64( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv8f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv8f64(, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll index 9adbca55bcd01..326ac8c8c607d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll @@ -34,7 +34,6 @@ define @floor_nxv1bf16( %x) { %a = call @llvm.floor.nxv1bf16( %x) ret %a } -declare @llvm.floor.nxv1bf16() define @floor_nxv2bf16( %x) { ; CHECK-LABEL: floor_nxv2bf16: @@ -58,7 +57,6 @@ define @floor_nxv2bf16( %x) { %a = call @llvm.floor.nxv2bf16( %x) ret %a } -declare @llvm.floor.nxv2bf16() define @floor_nxv4bf16( %x) { ; CHECK-LABEL: floor_nxv4bf16: @@ -82,7 +80,6 @@ define @floor_nxv4bf16( %x) { %a = call @llvm.floor.nxv4bf16( %x) ret %a } -declare @llvm.floor.nxv4bf16() define @floor_nxv8bf16( %x) { ; CHECK-LABEL: floor_nxv8bf16: @@ -106,7 +103,6 @@ define @floor_nxv8bf16( %x) { %a = call @llvm.floor.nxv8bf16( %x) ret %a } -declare @llvm.floor.nxv8bf16() define @floor_nxv16bf16( %x) { ; CHECK-LABEL: floor_nxv16bf16: @@ -130,7 +126,6 @@ define @floor_nxv16bf16( %x) { %a = call @llvm.floor.nxv16bf16( %x) ret %a } -declare @llvm.floor.nxv16bf16() define @floor_nxv32bf16( %x) { ; CHECK-LABEL: floor_nxv32bf16: @@ -168,7 +163,6 @@ define @floor_nxv32bf16( %x) { %a = call @llvm.floor.nxv32bf16( %x) ret %a } -declare @llvm.floor.nxv32bf16() define @floor_nxv1f16( %x) { ; ZVFH-LABEL: floor_nxv1f16: @@ -208,7 +202,6 @@ define @floor_nxv1f16( %x) { %a = call @llvm.floor.nxv1f16( %x) ret %a } -declare @llvm.floor.nxv1f16() define @floor_nxv2f16( %x) { ; ZVFH-LABEL: floor_nxv2f16: @@ -248,7 +241,6 @@ define @floor_nxv2f16( %x) { %a = call @llvm.floor.nxv2f16( %x) ret %a } -declare @llvm.floor.nxv2f16() define @floor_nxv4f16( %x) { ; ZVFH-LABEL: floor_nxv4f16: @@ -288,7 +280,6 @@ define @floor_nxv4f16( %x) { %a = call @llvm.floor.nxv4f16( %x) ret %a } -declare @llvm.floor.nxv4f16() define @floor_nxv8f16( %x) { ; ZVFH-LABEL: floor_nxv8f16: @@ -328,7 +319,6 @@ define @floor_nxv8f16( %x) { %a = call @llvm.floor.nxv8f16( %x) ret %a } -declare @llvm.floor.nxv8f16() define @floor_nxv16f16( %x) { ; ZVFH-LABEL: floor_nxv16f16: @@ -368,7 +358,6 @@ define @floor_nxv16f16( %x) { %a = call @llvm.floor.nxv16f16( %x) ret %a } -declare @llvm.floor.nxv16f16() define @floor_nxv32f16( %x) { ; ZVFH-LABEL: floor_nxv32f16: @@ -422,7 +411,6 @@ define @floor_nxv32f16( %x) { %a = call @llvm.floor.nxv32f16( %x) ret %a } -declare @llvm.floor.nxv32f16() define @floor_nxv1f32( %x) { ; CHECK-LABEL: floor_nxv1f32: @@ -442,7 +430,6 @@ define @floor_nxv1f32( %x) { %a = call @llvm.floor.nxv1f32( %x) ret %a } -declare @llvm.floor.nxv1f32() define @floor_nxv2f32( %x) { ; CHECK-LABEL: floor_nxv2f32: @@ -462,7 +449,6 @@ define @floor_nxv2f32( %x) { %a = call @llvm.floor.nxv2f32( %x) ret %a } -declare @llvm.floor.nxv2f32() define @floor_nxv4f32( %x) { ; CHECK-LABEL: floor_nxv4f32: @@ -482,7 +468,6 @@ define @floor_nxv4f32( %x) { %a = call @llvm.floor.nxv4f32( %x) ret %a } -declare @llvm.floor.nxv4f32() define @floor_nxv8f32( %x) { ; CHECK-LABEL: floor_nxv8f32: @@ -502,7 +487,6 @@ define @floor_nxv8f32( %x) { %a = call @llvm.floor.nxv8f32( %x) ret %a } -declare @llvm.floor.nxv8f32() define @floor_nxv16f32( %x) { ; CHECK-LABEL: floor_nxv16f32: @@ -522,7 +506,6 @@ define @floor_nxv16f32( %x) { %a = call @llvm.floor.nxv16f32( %x) ret %a } -declare @llvm.floor.nxv16f32() define @floor_nxv1f64( %x) { ; RV32ZFH-LABEL: floor_nxv1f64: @@ -589,7 +572,6 @@ define @floor_nxv1f64( %x) { %a = call @llvm.floor.nxv1f64( %x) ret %a } -declare @llvm.floor.nxv1f64() define @floor_nxv2f64( %x) { ; RV32ZFH-LABEL: floor_nxv2f64: @@ -656,7 +638,6 @@ define @floor_nxv2f64( %x) { %a = call @llvm.floor.nxv2f64( %x) ret %a } -declare @llvm.floor.nxv2f64() define @floor_nxv4f64( %x) { ; RV32ZFH-LABEL: floor_nxv4f64: @@ -723,7 +704,6 @@ define @floor_nxv4f64( %x) { %a = call @llvm.floor.nxv4f64( %x) ret %a } -declare @llvm.floor.nxv4f64() define @floor_nxv8f64( %x) { ; RV32ZFH-LABEL: floor_nxv8f64: @@ -790,4 +770,3 @@ define @floor_nxv8f64( %x) { %a = call @llvm.floor.nxv8f64( %x) ret %a } -declare @llvm.floor.nxv8f64() diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll index bd1209a17b534..0f26832cffdc8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll @@ -689,39 +689,6 @@ define <16 x i8> @umaxmin_v16i8_com1(<16 x i8> %0, <16 x i8> %1) { ret <16 x i8> %sub } -declare <8 x i8> @llvm.abs.v8i8(<8 x i8>, i1) -declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1) - -declare <4 x i16> @llvm.abs.v4i16(<4 x i16>, i1) -declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1) -declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1) - -declare <2 x i32> @llvm.abs.v2i32(<2 x i32>, i1) -declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1) -declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1) - -declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1) -declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1) - -declare <2 x i128> @llvm.abs.v2i128(<2 x i128>, i1) - -declare <16 x i8> @llvm.smax.v16i8(<16 x i8>, <16 x i8>) -declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>) -declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) -declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>) -declare <16 x i8> @llvm.smin.v16i8(<16 x i8>, <16 x i8>) -declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>) -declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) -declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>) -declare <16 x i8> @llvm.umax.v16i8(<16 x i8>, <16 x i8>) -declare <8 x i16> @llvm.umax.v8i16(<8 x i16>, <8 x i16>) -declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>) -declare <2 x i64> @llvm.umax.v2i64(<2 x i64>, <2 x i64>) -declare <16 x i8> @llvm.umin.v16i8(<16 x i8>, <16 x i8>) -declare <8 x i16> @llvm.umin.v8i16(<8 x i16>, <8 x i16>) -declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>) -declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>) - ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; RV32: {{.*}} ; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll index 84da351de76ba..fa81e1f6f3514 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK -declare <2 x i8> @llvm.vp.abs.v2i8(<2 x i8>, i1 immarg, <2 x i1>, i32) - define <2 x i8> @vp_abs_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v2i8: ; CHECK: # %bb.0: @@ -28,8 +26,6 @@ define <2 x i8> @vp_abs_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.abs.v4i8(<4 x i8>, i1 immarg, <4 x i1>, i32) - define <4 x i8> @vp_abs_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v4i8: ; CHECK: # %bb.0: @@ -52,8 +48,6 @@ define <4 x i8> @vp_abs_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.abs.v8i8(<8 x i8>, i1 immarg, <8 x i1>, i32) - define <8 x i8> @vp_abs_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v8i8: ; CHECK: # %bb.0: @@ -76,8 +70,6 @@ define <8 x i8> @vp_abs_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.abs.v16i8(<16 x i8>, i1 immarg, <16 x i1>, i32) - define <16 x i8> @vp_abs_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v16i8: ; CHECK: # %bb.0: @@ -100,8 +92,6 @@ define <16 x i8> @vp_abs_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.abs.v2i16(<2 x i16>, i1 immarg, <2 x i1>, i32) - define <2 x i16> @vp_abs_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v2i16: ; CHECK: # %bb.0: @@ -124,8 +114,6 @@ define <2 x i16> @vp_abs_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.abs.v4i16(<4 x i16>, i1 immarg, <4 x i1>, i32) - define <4 x i16> @vp_abs_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v4i16: ; CHECK: # %bb.0: @@ -148,8 +136,6 @@ define <4 x i16> @vp_abs_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.abs.v8i16(<8 x i16>, i1 immarg, <8 x i1>, i32) - define <8 x i16> @vp_abs_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v8i16: ; CHECK: # %bb.0: @@ -172,8 +158,6 @@ define <8 x i16> @vp_abs_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.abs.v16i16(<16 x i16>, i1 immarg, <16 x i1>, i32) - define <16 x i16> @vp_abs_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v16i16: ; CHECK: # %bb.0: @@ -196,8 +180,6 @@ define <16 x i16> @vp_abs_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.abs.v2i32(<2 x i32>, i1 immarg, <2 x i1>, i32) - define <2 x i32> @vp_abs_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v2i32: ; CHECK: # %bb.0: @@ -220,8 +202,6 @@ define <2 x i32> @vp_abs_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.abs.v4i32(<4 x i32>, i1 immarg, <4 x i1>, i32) - define <4 x i32> @vp_abs_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v4i32: ; CHECK: # %bb.0: @@ -244,8 +224,6 @@ define <4 x i32> @vp_abs_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.abs.v8i32(<8 x i32>, i1 immarg, <8 x i1>, i32) - define <8 x i32> @vp_abs_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v8i32: ; CHECK: # %bb.0: @@ -268,8 +246,6 @@ define <8 x i32> @vp_abs_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.abs.v16i32(<16 x i32>, i1 immarg, <16 x i1>, i32) - define <16 x i32> @vp_abs_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v16i32: ; CHECK: # %bb.0: @@ -292,8 +268,6 @@ define <16 x i32> @vp_abs_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.abs.v2i64(<2 x i64>, i1 immarg, <2 x i1>, i32) - define <2 x i64> @vp_abs_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v2i64: ; CHECK: # %bb.0: @@ -316,8 +290,6 @@ define <2 x i64> @vp_abs_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.abs.v4i64(<4 x i64>, i1 immarg, <4 x i1>, i32) - define <4 x i64> @vp_abs_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v4i64: ; CHECK: # %bb.0: @@ -340,8 +312,6 @@ define <4 x i64> @vp_abs_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.abs.v8i64(<8 x i64>, i1 immarg, <8 x i1>, i32) - define <8 x i64> @vp_abs_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v8i64: ; CHECK: # %bb.0: @@ -364,8 +334,6 @@ define <8 x i64> @vp_abs_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <15 x i64> @llvm.vp.abs.v15i64(<15 x i64>, i1 immarg, <15 x i1>, i32) - define <15 x i64> @vp_abs_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v15i64: ; CHECK: # %bb.0: @@ -388,8 +356,6 @@ define <15 x i64> @vp_abs_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { ret <15 x i64> %v } -declare <16 x i64> @llvm.vp.abs.v16i64(<16 x i64>, i1 immarg, <16 x i1>, i32) - define <16 x i64> @vp_abs_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v16i64: ; CHECK: # %bb.0: @@ -412,8 +378,6 @@ define <16 x i64> @vp_abs_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ret <16 x i64> %v } -declare <32 x i64> @llvm.vp.abs.v32i64(<32 x i64>, i1 immarg, <32 x i1>, i32) - define <32 x i64> @vp_abs_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll index 2356237d790b6..847722ae6b8ab 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll @@ -16,7 +16,6 @@ define void @abs_v16i8(ptr %x) { store <16 x i8> %b, ptr %x ret void } -declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1) define void @abs_v8i16(ptr %x) { ; CHECK-LABEL: abs_v8i16: @@ -32,7 +31,6 @@ define void @abs_v8i16(ptr %x) { store <8 x i16> %b, ptr %x ret void } -declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1) define void @abs_v6i16(ptr %x) { ; CHECK-LABEL: abs_v6i16: @@ -48,7 +46,6 @@ define void @abs_v6i16(ptr %x) { store <6 x i16> %b, ptr %x ret void } -declare <6 x i16> @llvm.abs.v6i16(<6 x i16>, i1) define void @abs_v4i32(ptr %x) { ; CHECK-LABEL: abs_v4i32: @@ -64,7 +61,6 @@ define void @abs_v4i32(ptr %x) { store <4 x i32> %b, ptr %x ret void } -declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1) define void @abs_v2i64(ptr %x) { ; CHECK-LABEL: abs_v2i64: @@ -80,7 +76,6 @@ define void @abs_v2i64(ptr %x) { store <2 x i64> %b, ptr %x ret void } -declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1) define void @abs_v32i8(ptr %x) { ; CHECK-LABEL: abs_v32i8: @@ -97,7 +92,6 @@ define void @abs_v32i8(ptr %x) { store <32 x i8> %b, ptr %x ret void } -declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1) define void @abs_v16i16(ptr %x) { ; CHECK-LABEL: abs_v16i16: @@ -113,7 +107,6 @@ define void @abs_v16i16(ptr %x) { store <16 x i16> %b, ptr %x ret void } -declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1) define void @abs_v8i32(ptr %x) { ; CHECK-LABEL: abs_v8i32: @@ -129,7 +122,6 @@ define void @abs_v8i32(ptr %x) { store <8 x i32> %b, ptr %x ret void } -declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1) define void @abs_v4i64(ptr %x) { ; CHECK-LABEL: abs_v4i64: @@ -145,7 +137,6 @@ define void @abs_v4i64(ptr %x) { store <4 x i64> %b, ptr %x ret void } -declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1) define void @abs_v4i64_of_sext_v4i8(ptr %x) { ; CHECK-LABEL: abs_v4i64_of_sext_v4i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll index 3d83065009f28..f436bbb9a66ca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.vp.bitreverse.v2i8(<2 x i8>, <2 x i1>, i32) - define <2 x i8> @vp_bitreverse_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v2i8: ; CHECK: # %bb.0: @@ -58,8 +56,6 @@ define <2 x i8> @vp_bitreverse_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.bitreverse.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i8> @vp_bitreverse_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v4i8: ; CHECK: # %bb.0: @@ -112,8 +108,6 @@ define <4 x i8> @vp_bitreverse_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.bitreverse.v8i8(<8 x i8>, <8 x i1>, i32) - define <8 x i8> @vp_bitreverse_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v8i8: ; CHECK: # %bb.0: @@ -166,8 +160,6 @@ define <8 x i8> @vp_bitreverse_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.bitreverse.v16i8(<16 x i8>, <16 x i1>, i32) - define <16 x i8> @vp_bitreverse_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v16i8: ; CHECK: # %bb.0: @@ -220,8 +212,6 @@ define <16 x i8> @vp_bitreverse_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.bitreverse.v2i16(<2 x i16>, <2 x i1>, i32) - define <2 x i16> @vp_bitreverse_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v2i16: ; CHECK: # %bb.0: @@ -288,8 +278,6 @@ define <2 x i16> @vp_bitreverse_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.bitreverse.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x i16> @vp_bitreverse_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v4i16: ; CHECK: # %bb.0: @@ -356,8 +344,6 @@ define <4 x i16> @vp_bitreverse_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.bitreverse.v8i16(<8 x i16>, <8 x i1>, i32) - define <8 x i16> @vp_bitreverse_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v8i16: ; CHECK: # %bb.0: @@ -424,8 +410,6 @@ define <8 x i16> @vp_bitreverse_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.bitreverse.v16i16(<16 x i16>, <16 x i1>, i32) - define <16 x i16> @vp_bitreverse_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v16i16: ; CHECK: # %bb.0: @@ -492,8 +476,6 @@ define <16 x i16> @vp_bitreverse_v16i16_unmasked(<16 x i16> %va, i32 zeroext %ev ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.bitreverse.v2i32(<2 x i32>, <2 x i1>, i32) - define <2 x i32> @vp_bitreverse_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v2i32: ; CHECK: # %bb.0: @@ -576,8 +558,6 @@ define <2 x i32> @vp_bitreverse_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.bitreverse.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x i32> @vp_bitreverse_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v4i32: ; CHECK: # %bb.0: @@ -660,8 +640,6 @@ define <4 x i32> @vp_bitreverse_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.bitreverse.v8i32(<8 x i32>, <8 x i1>, i32) - define <8 x i32> @vp_bitreverse_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v8i32: ; CHECK: # %bb.0: @@ -744,8 +722,6 @@ define <8 x i32> @vp_bitreverse_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.bitreverse.v16i32(<16 x i32>, <16 x i1>, i32) - define <16 x i32> @vp_bitreverse_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v16i32: ; CHECK: # %bb.0: @@ -828,8 +804,6 @@ define <16 x i32> @vp_bitreverse_v16i32_unmasked(<16 x i32> %va, i32 zeroext %ev ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.bitreverse.v2i64(<2 x i64>, <2 x i1>, i32) - define <2 x i64> @vp_bitreverse_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_v2i64: ; RV32: # %bb.0: @@ -1101,8 +1075,6 @@ define <2 x i64> @vp_bitreverse_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.bitreverse.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x i64> @vp_bitreverse_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_v4i64: ; RV32: # %bb.0: @@ -1374,8 +1346,6 @@ define <4 x i64> @vp_bitreverse_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.bitreverse.v8i64(<8 x i64>, <8 x i1>, i32) - define <8 x i64> @vp_bitreverse_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_v8i64: ; RV32: # %bb.0: @@ -1647,8 +1617,6 @@ define <8 x i64> @vp_bitreverse_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) ret <8 x i64> %v } -declare <15 x i64> @llvm.vp.bitreverse.v15i64(<15 x i64>, <15 x i1>, i32) - define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_v15i64: ; RV32: # %bb.0: @@ -2026,8 +1994,6 @@ define <15 x i64> @vp_bitreverse_v15i64_unmasked(<15 x i64> %va, i32 zeroext %ev ret <15 x i64> %v } -declare <16 x i64> @llvm.vp.bitreverse.v16i64(<16 x i64>, <16 x i1>, i32) - define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_v16i64: ; RV32: # %bb.0: @@ -2405,8 +2371,6 @@ define <16 x i64> @vp_bitreverse_v16i64_unmasked(<16 x i64> %va, i32 zeroext %ev ret <16 x i64> %v } -declare <128 x i16> @llvm.vp.bitreverse.v128i16(<128 x i16>, <128 x i1>, i32) - define <128 x i16> @vp_bitreverse_v128i16(<128 x i16> %va, <128 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v128i16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll index 6d9793c12153e..10b48d2ebe1f5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll @@ -49,7 +49,6 @@ define void @bitreverse_v8i16(ptr %x, ptr %y) { store <8 x i16> %c, ptr %x ret void } -declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>) define void @bitreverse_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: bitreverse_v4i32: @@ -104,7 +103,6 @@ define void @bitreverse_v4i32(ptr %x, ptr %y) { store <4 x i32> %c, ptr %x ret void } -declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>) define void @bitreverse_v2i64(ptr %x, ptr %y) { ; RV32-LABEL: bitreverse_v2i64: @@ -254,7 +252,6 @@ define void @bitreverse_v2i64(ptr %x, ptr %y) { store <2 x i64> %c, ptr %x ret void } -declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>) define void @bitreverse_v16i16(ptr %x, ptr %y) { ; CHECK-LABEL: bitreverse_v16i16: @@ -301,7 +298,6 @@ define void @bitreverse_v16i16(ptr %x, ptr %y) { store <16 x i16> %c, ptr %x ret void } -declare <16 x i16> @llvm.bitreverse.v16i16(<16 x i16>) define void @bitreverse_v8i32(ptr %x, ptr %y) { ; CHECK-LABEL: bitreverse_v8i32: @@ -356,7 +352,6 @@ define void @bitreverse_v8i32(ptr %x, ptr %y) { store <8 x i32> %c, ptr %x ret void } -declare <8 x i32> @llvm.bitreverse.v8i32(<8 x i32>) define void @bitreverse_v4i64(ptr %x, ptr %y) { ; RV32-LABEL: bitreverse_v4i64: @@ -506,4 +501,3 @@ define void @bitreverse_v4i64(ptr %x, ptr %y) { store <4 x i64> %c, ptr %x ret void } -declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll index b7ca932bb1c45..eca94ccb9bf7f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i16> @llvm.vp.bswap.v2i16(<2 x i16>, <2 x i1>, i32) - define <2 x i16> @vp_bswap_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v2i16: ; CHECK: # %bb.0: @@ -30,8 +28,6 @@ define <2 x i16> @vp_bswap_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.bswap.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x i16> @vp_bswap_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v4i16: ; CHECK: # %bb.0: @@ -56,8 +52,6 @@ define <4 x i16> @vp_bswap_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.bswap.v8i16(<8 x i16>, <8 x i1>, i32) - define <8 x i16> @vp_bswap_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v8i16: ; CHECK: # %bb.0: @@ -82,8 +76,6 @@ define <8 x i16> @vp_bswap_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.bswap.v16i16(<16 x i16>, <16 x i1>, i32) - define <16 x i16> @vp_bswap_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v16i16: ; CHECK: # %bb.0: @@ -108,8 +100,6 @@ define <16 x i16> @vp_bswap_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.bswap.v2i32(<2 x i32>, <2 x i1>, i32) - define <2 x i32> @vp_bswap_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v2i32: ; CHECK: # %bb.0: @@ -150,8 +140,6 @@ define <2 x i32> @vp_bswap_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.bswap.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x i32> @vp_bswap_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v4i32: ; CHECK: # %bb.0: @@ -192,8 +180,6 @@ define <4 x i32> @vp_bswap_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.bswap.v8i32(<8 x i32>, <8 x i1>, i32) - define <8 x i32> @vp_bswap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v8i32: ; CHECK: # %bb.0: @@ -234,8 +220,6 @@ define <8 x i32> @vp_bswap_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.bswap.v16i32(<16 x i32>, <16 x i1>, i32) - define <16 x i32> @vp_bswap_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v16i32: ; CHECK: # %bb.0: @@ -276,8 +260,6 @@ define <16 x i32> @vp_bswap_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.bswap.v2i64(<2 x i64>, <2 x i1>, i32) - define <2 x i64> @vp_bswap_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_v2i64: ; RV32: # %bb.0: @@ -436,8 +418,6 @@ define <2 x i64> @vp_bswap_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.bswap.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x i64> @vp_bswap_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_v4i64: ; RV32: # %bb.0: @@ -596,8 +576,6 @@ define <4 x i64> @vp_bswap_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.bswap.v8i64(<8 x i64>, <8 x i1>, i32) - define <8 x i64> @vp_bswap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_v8i64: ; RV32: # %bb.0: @@ -756,8 +734,6 @@ define <8 x i64> @vp_bswap_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <15 x i64> @llvm.vp.bswap.v15i64(<15 x i64>, <15 x i1>, i32) - define <15 x i64> @vp_bswap_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_v15i64: ; RV32: # %bb.0: @@ -1021,8 +997,6 @@ define <15 x i64> @vp_bswap_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { ret <15 x i64> %v } -declare <16 x i64> @llvm.vp.bswap.v16i64(<16 x i64>, <16 x i1>, i32) - define <16 x i64> @vp_bswap_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_v16i64: ; RV32: # %bb.0: @@ -1286,8 +1260,6 @@ define <16 x i64> @vp_bswap_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ret <16 x i64> %v } -declare <128 x i16> @llvm.vp.bswap.v128i16(<128 x i16>, <128 x i1>, i32) - define <128 x i16> @vp_bswap_v128i16(<128 x i16> %va, <128 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v128i16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll index 5b823442c8b04..4479c30151956 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll @@ -28,7 +28,6 @@ define void @bswap_v8i16(ptr %x, ptr %y) { store <8 x i16> %c, ptr %x ret void } -declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>) define void @bswap_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: bswap_v4i32: @@ -62,7 +61,6 @@ define void @bswap_v4i32(ptr %x, ptr %y) { store <4 x i32> %c, ptr %x ret void } -declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>) define void @bswap_v2i64(ptr %x, ptr %y) { ; RV32-LABEL: bswap_v2i64: @@ -155,7 +153,6 @@ define void @bswap_v2i64(ptr %x, ptr %y) { store <2 x i64> %c, ptr %x ret void } -declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>) define void @bswap_v16i16(ptr %x, ptr %y) { ; CHECK-LABEL: bswap_v16i16: @@ -181,7 +178,6 @@ define void @bswap_v16i16(ptr %x, ptr %y) { store <16 x i16> %c, ptr %x ret void } -declare <16 x i16> @llvm.bswap.v16i16(<16 x i16>) define void @bswap_v8i32(ptr %x, ptr %y) { ; CHECK-LABEL: bswap_v8i32: @@ -215,7 +211,6 @@ define void @bswap_v8i32(ptr %x, ptr %y) { store <8 x i32> %c, ptr %x ret void } -declare <8 x i32> @llvm.bswap.v8i32(<8 x i32>) define void @bswap_v4i64(ptr %x, ptr %y) { ; RV32-LABEL: bswap_v4i64: @@ -308,4 +303,3 @@ define void @bswap_v4i64(ptr %x, ptr %y) { store <4 x i64> %c, ptr %x ret void } -declare <4 x i64> @llvm.bswap.v4i64(<4 x i64>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll index 4b42c517379ad..466d5d4b8e80a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare <2 x half> @llvm.vp.ceil.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vp_ceil_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_v2f16: ; ZVFH: # %bb.0: @@ -96,8 +94,6 @@ define <2 x half> @vp_ceil_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ret <2 x half> %v } -declare <4 x half> @llvm.vp.ceil.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vp_ceil_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_v4f16: ; ZVFH: # %bb.0: @@ -184,8 +180,6 @@ define <4 x half> @vp_ceil_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <8 x half> @llvm.vp.ceil.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_v8f16: ; ZVFH: # %bb.0: @@ -272,8 +266,6 @@ define <8 x half> @vp_ceil_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ret <8 x half> %v } -declare <16 x half> @llvm.vp.ceil.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_v16f16: ; ZVFH: # %bb.0: @@ -362,8 +354,6 @@ define <16 x half> @vp_ceil_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) { ret <16 x half> %v } -declare <2 x float> @llvm.vp.ceil.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vp_ceil_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v2f32: ; CHECK: # %bb.0: @@ -404,8 +394,6 @@ define <2 x float> @vp_ceil_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ret <2 x float> %v } -declare <4 x float> @llvm.vp.ceil.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vp_ceil_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v4f32: ; CHECK: # %bb.0: @@ -446,8 +434,6 @@ define <4 x float> @vp_ceil_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ret <4 x float> %v } -declare <8 x float> @llvm.vp.ceil.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vp_ceil_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v8f32: ; CHECK: # %bb.0: @@ -490,8 +476,6 @@ define <8 x float> @vp_ceil_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ret <8 x float> %v } -declare <16 x float> @llvm.vp.ceil.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vp_ceil_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v16f32: ; CHECK: # %bb.0: @@ -534,8 +518,6 @@ define <16 x float> @vp_ceil_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) ret <16 x float> %v } -declare <2 x double> @llvm.vp.ceil.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vp_ceil_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_ceil_v2f64: ; RV32ZVFH: # %bb.0: @@ -676,8 +658,6 @@ define <2 x double> @vp_ceil_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) ret <2 x double> %v } -declare <4 x double> @llvm.vp.ceil.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_ceil_v4f64: ; RV32ZVFH: # %bb.0: @@ -826,8 +806,6 @@ define <4 x double> @vp_ceil_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) ret <4 x double> %v } -declare <8 x double> @llvm.vp.ceil.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_ceil_v8f64: ; RV32ZVFH: # %bb.0: @@ -976,8 +954,6 @@ define <8 x double> @vp_ceil_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) ret <8 x double> %v } -declare <15 x double> @llvm.vp.ceil.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_ceil_v15f64: ; RV32ZVFH: # %bb.0: @@ -1126,8 +1102,6 @@ define <15 x double> @vp_ceil_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev ret <15 x double> %v } -declare <16 x double> @llvm.vp.ceil.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_ceil_v16f64: ; RV32ZVFH: # %bb.0: @@ -1276,8 +1250,6 @@ define <16 x double> @vp_ceil_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev ret <16 x double> %v } -declare <32 x double> @llvm.vp.ceil.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_ceil_v32f64: ; RV32ZVFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll index 9d42f2b6adeed..6c5a0d44a71e0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll @@ -2,7 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 -declare void @llvm.masked.compressstore.v1f16(<1 x half>, ptr, <1 x i1>) define void @compressstore_v1f16(ptr %base, <1 x half> %v, <1 x i1> %mask) { ; RV32-LABEL: compressstore_v1f16: ; RV32: # %bb.0: @@ -25,7 +24,6 @@ define void @compressstore_v1f16(ptr %base, <1 x half> %v, <1 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v2f16(<2 x half>, ptr, <2 x i1>) define void @compressstore_v2f16(ptr %base, <2 x half> %v, <2 x i1> %mask) { ; RV32-LABEL: compressstore_v2f16: ; RV32: # %bb.0: @@ -48,7 +46,6 @@ define void @compressstore_v2f16(ptr %base, <2 x half> %v, <2 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v4f16(<4 x half>, ptr, <4 x i1>) define void @compressstore_v4f16(ptr %base, <4 x half> %v, <4 x i1> %mask) { ; RV32-LABEL: compressstore_v4f16: ; RV32: # %bb.0: @@ -71,7 +68,6 @@ define void @compressstore_v4f16(ptr %base, <4 x half> %v, <4 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v8f16(<8 x half>, ptr, <8 x i1>) define void @compressstore_v8f16(ptr %base, <8 x half> %v, <8 x i1> %mask) { ; RV32-LABEL: compressstore_v8f16: ; RV32: # %bb.0: @@ -94,7 +90,6 @@ define void @compressstore_v8f16(ptr %base, <8 x half> %v, <8 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v1f32(<1 x float>, ptr, <1 x i1>) define void @compressstore_v1f32(ptr %base, <1 x float> %v, <1 x i1> %mask) { ; RV32-LABEL: compressstore_v1f32: ; RV32: # %bb.0: @@ -117,7 +112,6 @@ define void @compressstore_v1f32(ptr %base, <1 x float> %v, <1 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v2f32(<2 x float>, ptr, <2 x i1>) define void @compressstore_v2f32(ptr %base, <2 x float> %v, <2 x i1> %mask) { ; RV32-LABEL: compressstore_v2f32: ; RV32: # %bb.0: @@ -140,7 +134,6 @@ define void @compressstore_v2f32(ptr %base, <2 x float> %v, <2 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v4f32(<4 x float>, ptr, <4 x i1>) define void @compressstore_v4f32(ptr %base, <4 x float> %v, <4 x i1> %mask) { ; RV32-LABEL: compressstore_v4f32: ; RV32: # %bb.0: @@ -163,7 +156,6 @@ define void @compressstore_v4f32(ptr %base, <4 x float> %v, <4 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v8f32(<8 x float>, ptr, <8 x i1>) define void @compressstore_v8f32(ptr %base, <8 x float> %v, <8 x i1> %mask) { ; RV32-LABEL: compressstore_v8f32: ; RV32: # %bb.0: @@ -186,7 +178,6 @@ define void @compressstore_v8f32(ptr %base, <8 x float> %v, <8 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v1f64(<1 x double>, ptr, <1 x i1>) define void @compressstore_v1f64(ptr %base, <1 x double> %v, <1 x i1> %mask) { ; RV32-LABEL: compressstore_v1f64: ; RV32: # %bb.0: @@ -209,7 +200,6 @@ define void @compressstore_v1f64(ptr %base, <1 x double> %v, <1 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v2f64(<2 x double>, ptr, <2 x i1>) define void @compressstore_v2f64(ptr %base, <2 x double> %v, <2 x i1> %mask) { ; RV32-LABEL: compressstore_v2f64: ; RV32: # %bb.0: @@ -232,7 +222,6 @@ define void @compressstore_v2f64(ptr %base, <2 x double> %v, <2 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v4f64(<4 x double>, ptr, <4 x i1>) define void @compressstore_v4f64(ptr %base, <4 x double> %v, <4 x i1> %mask) { ; RV32-LABEL: compressstore_v4f64: ; RV32: # %bb.0: @@ -255,7 +244,6 @@ define void @compressstore_v4f64(ptr %base, <4 x double> %v, <4 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v8f64(<8 x double>, ptr, <8 x i1>) define void @compressstore_v8f64(ptr %base, <8 x double> %v, <8 x i1> %mask) { ; RV32-LABEL: compressstore_v8f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll index a388ba92f302b..002cf3440dd2c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll @@ -2,7 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare void @llvm.masked.compressstore.v1i8(<1 x i8>, ptr, <1 x i1>) define void @compressstore_v1i8(ptr %base, <1 x i8> %v, <1 x i1> %mask) { ; CHECK-LABEL: compressstore_v1i8: ; CHECK: # %bb.0: @@ -16,7 +15,6 @@ define void @compressstore_v1i8(ptr %base, <1 x i8> %v, <1 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v2i8(<2 x i8>, ptr, <2 x i1>) define void @compressstore_v2i8(ptr %base, <2 x i8> %v, <2 x i1> %mask) { ; CHECK-LABEL: compressstore_v2i8: ; CHECK: # %bb.0: @@ -30,7 +28,6 @@ define void @compressstore_v2i8(ptr %base, <2 x i8> %v, <2 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v4i8(<4 x i8>, ptr, <4 x i1>) define void @compressstore_v4i8(ptr %base, <4 x i8> %v, <4 x i1> %mask) { ; CHECK-LABEL: compressstore_v4i8: ; CHECK: # %bb.0: @@ -44,7 +41,6 @@ define void @compressstore_v4i8(ptr %base, <4 x i8> %v, <4 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v8i8(<8 x i8>, ptr, <8 x i1>) define void @compressstore_v8i8(ptr %base, <8 x i8> %v, <8 x i1> %mask) { ; CHECK-LABEL: compressstore_v8i8: ; CHECK: # %bb.0: @@ -58,7 +54,6 @@ define void @compressstore_v8i8(ptr %base, <8 x i8> %v, <8 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v1i16(<1 x i16>, ptr, <1 x i1>) define void @compressstore_v1i16(ptr %base, <1 x i16> %v, <1 x i1> %mask) { ; CHECK-LABEL: compressstore_v1i16: ; CHECK: # %bb.0: @@ -72,7 +67,6 @@ define void @compressstore_v1i16(ptr %base, <1 x i16> %v, <1 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v2i16(<2 x i16>, ptr, <2 x i1>) define void @compressstore_v2i16(ptr %base, <2 x i16> %v, <2 x i1> %mask) { ; CHECK-LABEL: compressstore_v2i16: ; CHECK: # %bb.0: @@ -86,7 +80,6 @@ define void @compressstore_v2i16(ptr %base, <2 x i16> %v, <2 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v4i16(<4 x i16>, ptr, <4 x i1>) define void @compressstore_v4i16(ptr %base, <4 x i16> %v, <4 x i1> %mask) { ; CHECK-LABEL: compressstore_v4i16: ; CHECK: # %bb.0: @@ -100,7 +93,6 @@ define void @compressstore_v4i16(ptr %base, <4 x i16> %v, <4 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v8i16(<8 x i16>, ptr, <8 x i1>) define void @compressstore_v8i16(ptr %base, <8 x i16> %v, <8 x i1> %mask) { ; CHECK-LABEL: compressstore_v8i16: ; CHECK: # %bb.0: @@ -114,7 +106,6 @@ define void @compressstore_v8i16(ptr %base, <8 x i16> %v, <8 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v1i32(<1 x i32>, ptr, <1 x i1>) define void @compressstore_v1i32(ptr %base, <1 x i32> %v, <1 x i1> %mask) { ; CHECK-LABEL: compressstore_v1i32: ; CHECK: # %bb.0: @@ -128,7 +119,6 @@ define void @compressstore_v1i32(ptr %base, <1 x i32> %v, <1 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v2i32(<2 x i32>, ptr, <2 x i1>) define void @compressstore_v2i32(ptr %base, <2 x i32> %v, <2 x i1> %mask) { ; CHECK-LABEL: compressstore_v2i32: ; CHECK: # %bb.0: @@ -142,7 +132,6 @@ define void @compressstore_v2i32(ptr %base, <2 x i32> %v, <2 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v4i32(<4 x i32>, ptr, <4 x i1>) define void @compressstore_v4i32(ptr %base, <4 x i32> %v, <4 x i1> %mask) { ; CHECK-LABEL: compressstore_v4i32: ; CHECK: # %bb.0: @@ -156,7 +145,6 @@ define void @compressstore_v4i32(ptr %base, <4 x i32> %v, <4 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v8i32(<8 x i32>, ptr, <8 x i1>) define void @compressstore_v8i32(ptr %base, <8 x i32> %v, <8 x i1> %mask) { ; CHECK-LABEL: compressstore_v8i32: ; CHECK: # %bb.0: @@ -170,7 +158,6 @@ define void @compressstore_v8i32(ptr %base, <8 x i32> %v, <8 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v1i64(<1 x i64>, ptr, <1 x i1>) define void @compressstore_v1i64(ptr %base, <1 x i64> %v, <1 x i1> %mask) { ; CHECK-LABEL: compressstore_v1i64: ; CHECK: # %bb.0: @@ -184,7 +171,6 @@ define void @compressstore_v1i64(ptr %base, <1 x i64> %v, <1 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v2i64(<2 x i64>, ptr, <2 x i1>) define void @compressstore_v2i64(ptr %base, <2 x i64> %v, <2 x i1> %mask) { ; CHECK-LABEL: compressstore_v2i64: ; CHECK: # %bb.0: @@ -198,7 +184,6 @@ define void @compressstore_v2i64(ptr %base, <2 x i64> %v, <2 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v4i64(<4 x i64>, ptr, <4 x i1>) define void @compressstore_v4i64(ptr %base, <4 x i64> %v, <4 x i1> %mask) { ; CHECK-LABEL: compressstore_v4i64: ; CHECK: # %bb.0: @@ -212,7 +197,6 @@ define void @compressstore_v4i64(ptr %base, <4 x i64> %v, <4 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v8i64(<8 x i64>, ptr, <8 x i1>) define void @compressstore_v8i64(ptr %base, <8 x i64> %v, <8 x i1> %mask) { ; CHECK-LABEL: compressstore_v8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll index b1af4e685c58f..00c36cb7f7327 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.vp.ctlz.v2i8(<2 x i8>, i1 immarg, <2 x i1>, i32) - define <2 x i8> @vp_ctlz_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v2i8: ; CHECK: # %bb.0: @@ -62,8 +60,6 @@ define <2 x i8> @vp_ctlz_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.ctlz.v4i8(<4 x i8>, i1 immarg, <4 x i1>, i32) - define <4 x i8> @vp_ctlz_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v4i8: ; CHECK: # %bb.0: @@ -120,8 +116,6 @@ define <4 x i8> @vp_ctlz_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.ctlz.v8i8(<8 x i8>, i1 immarg, <8 x i1>, i32) - define <8 x i8> @vp_ctlz_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v8i8: ; CHECK: # %bb.0: @@ -178,8 +172,6 @@ define <8 x i8> @vp_ctlz_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.ctlz.v16i8(<16 x i8>, i1 immarg, <16 x i1>, i32) - define <16 x i8> @vp_ctlz_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v16i8: ; CHECK: # %bb.0: @@ -236,8 +228,6 @@ define <16 x i8> @vp_ctlz_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16>, i1 immarg, <2 x i1>, i32) - define <2 x i16> @vp_ctlz_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v2i16: ; CHECK: # %bb.0: @@ -312,8 +302,6 @@ define <2 x i16> @vp_ctlz_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16>, i1 immarg, <4 x i1>, i32) - define <4 x i16> @vp_ctlz_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v4i16: ; CHECK: # %bb.0: @@ -388,8 +376,6 @@ define <4 x i16> @vp_ctlz_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16>, i1 immarg, <8 x i1>, i32) - define <8 x i16> @vp_ctlz_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v8i16: ; CHECK: # %bb.0: @@ -464,8 +450,6 @@ define <8 x i16> @vp_ctlz_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16>, i1 immarg, <16 x i1>, i32) - define <16 x i16> @vp_ctlz_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v16i16: ; CHECK: # %bb.0: @@ -540,8 +524,6 @@ define <16 x i16> @vp_ctlz_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.ctlz.v2i32(<2 x i32>, i1 immarg, <2 x i1>, i32) - define <2 x i32> @vp_ctlz_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v2i32: ; CHECK: # %bb.0: @@ -622,8 +604,6 @@ define <2 x i32> @vp_ctlz_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.ctlz.v4i32(<4 x i32>, i1 immarg, <4 x i1>, i32) - define <4 x i32> @vp_ctlz_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v4i32: ; CHECK: # %bb.0: @@ -704,8 +684,6 @@ define <4 x i32> @vp_ctlz_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.ctlz.v8i32(<8 x i32>, i1 immarg, <8 x i1>, i32) - define <8 x i32> @vp_ctlz_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v8i32: ; CHECK: # %bb.0: @@ -786,8 +764,6 @@ define <8 x i32> @vp_ctlz_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.ctlz.v16i32(<16 x i32>, i1 immarg, <16 x i1>, i32) - define <16 x i32> @vp_ctlz_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v16i32: ; CHECK: # %bb.0: @@ -868,8 +844,6 @@ define <16 x i32> @vp_ctlz_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.ctlz.v2i64(<2 x i64>, i1 immarg, <2 x i1>, i32) - define <2 x i64> @vp_ctlz_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctlz_v2i64: ; RV32: # %bb.0: @@ -1078,8 +1052,6 @@ define <2 x i64> @vp_ctlz_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.ctlz.v4i64(<4 x i64>, i1 immarg, <4 x i1>, i32) - define <4 x i64> @vp_ctlz_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctlz_v4i64: ; RV32: # %bb.0: @@ -1288,8 +1260,6 @@ define <4 x i64> @vp_ctlz_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.ctlz.v8i64(<8 x i64>, i1 immarg, <8 x i1>, i32) - define <8 x i64> @vp_ctlz_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctlz_v8i64: ; RV32: # %bb.0: @@ -1498,8 +1468,6 @@ define <8 x i64> @vp_ctlz_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <15 x i64> @llvm.vp.ctlz.v15i64(<15 x i64>, i1 immarg, <15 x i1>, i32) - define <15 x i64> @vp_ctlz_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctlz_v15i64: ; RV32: # %bb.0: @@ -1708,8 +1676,6 @@ define <15 x i64> @vp_ctlz_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { ret <15 x i64> %v } -declare <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64>, i1 immarg, <16 x i1>, i32) - define <16 x i64> @vp_ctlz_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctlz_v16i64: ; RV32: # %bb.0: @@ -1918,8 +1884,6 @@ define <16 x i64> @vp_ctlz_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ret <16 x i64> %v } -declare <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64>, i1 immarg, <32 x i1>, i32) - define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctlz_v32i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll index 61730b87c5517..02e1ec8da49fe 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll @@ -80,7 +80,6 @@ define void @ctlz_v16i8(ptr %x, ptr %y) nounwind { store <16 x i8> %c, ptr %x ret void } -declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1) define void @ctlz_v8i16(ptr %x, ptr %y) nounwind { ; RVI-LABEL: ctlz_v8i16: @@ -157,7 +156,6 @@ define void @ctlz_v8i16(ptr %x, ptr %y) nounwind { store <8 x i16> %c, ptr %x ret void } -declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1) define void @ctlz_v4i32(ptr %x, ptr %y) nounwind { ; RVI-LABEL: ctlz_v4i32: @@ -240,7 +238,6 @@ define void @ctlz_v4i32(ptr %x, ptr %y) nounwind { store <4 x i32> %c, ptr %x ret void } -declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) define void @ctlz_v2i64(ptr %x, ptr %y) nounwind { ; RV32I-LABEL: ctlz_v2i64: @@ -393,7 +390,6 @@ define void @ctlz_v2i64(ptr %x, ptr %y) nounwind { store <2 x i64> %c, ptr %x ret void } -declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) define void @ctlz_v32i8(ptr %x, ptr %y) nounwind { ; RVI-LABEL: ctlz_v32i8: @@ -471,7 +467,6 @@ define void @ctlz_v32i8(ptr %x, ptr %y) nounwind { store <32 x i8> %c, ptr %x ret void } -declare <32 x i8> @llvm.ctlz.v32i8(<32 x i8>, i1) define void @ctlz_v16i16(ptr %x, ptr %y) nounwind { ; RVI-LABEL: ctlz_v16i16: @@ -548,7 +543,6 @@ define void @ctlz_v16i16(ptr %x, ptr %y) nounwind { store <16 x i16> %c, ptr %x ret void } -declare <16 x i16> @llvm.ctlz.v16i16(<16 x i16>, i1) define void @ctlz_v8i32(ptr %x, ptr %y) nounwind { ; RVI-LABEL: ctlz_v8i32: @@ -631,7 +625,6 @@ define void @ctlz_v8i32(ptr %x, ptr %y) nounwind { store <8 x i32> %c, ptr %x ret void } -declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>, i1) define void @ctlz_v4i64(ptr %x, ptr %y) nounwind { ; RV32I-LABEL: ctlz_v4i64: @@ -784,7 +777,6 @@ define void @ctlz_v4i64(ptr %x, ptr %y) nounwind { store <4 x i64> %c, ptr %x ret void } -declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>, i1) define void @ctlz_zero_undef_v16i8(ptr %x, ptr %y) nounwind { ; RVI-LABEL: ctlz_zero_undef_v16i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll index a993ed909d940..f56438bf87e6a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.vp.ctpop.v2i8(<2 x i8>, <2 x i1>, i32) - define <2 x i8> @vp_ctpop_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v2i8: ; CHECK: # %bb.0: @@ -48,8 +46,6 @@ define <2 x i8> @vp_ctpop_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.ctpop.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i8> @vp_ctpop_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v4i8: ; CHECK: # %bb.0: @@ -92,8 +88,6 @@ define <4 x i8> @vp_ctpop_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.ctpop.v8i8(<8 x i8>, <8 x i1>, i32) - define <8 x i8> @vp_ctpop_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v8i8: ; CHECK: # %bb.0: @@ -136,8 +130,6 @@ define <8 x i8> @vp_ctpop_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.ctpop.v16i8(<16 x i8>, <16 x i1>, i32) - define <16 x i8> @vp_ctpop_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v16i8: ; CHECK: # %bb.0: @@ -180,8 +172,6 @@ define <16 x i8> @vp_ctpop_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16>, <2 x i1>, i32) - define <2 x i16> @vp_ctpop_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v2i16: ; CHECK: # %bb.0: @@ -238,8 +228,6 @@ define <2 x i16> @vp_ctpop_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x i16> @vp_ctpop_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v4i16: ; CHECK: # %bb.0: @@ -296,8 +284,6 @@ define <4 x i16> @vp_ctpop_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16>, <8 x i1>, i32) - define <8 x i16> @vp_ctpop_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v8i16: ; CHECK: # %bb.0: @@ -354,8 +340,6 @@ define <8 x i16> @vp_ctpop_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16>, <16 x i1>, i32) - define <16 x i16> @vp_ctpop_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v16i16: ; CHECK: # %bb.0: @@ -412,8 +396,6 @@ define <16 x i16> @vp_ctpop_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32>, <2 x i1>, i32) - define <2 x i32> @vp_ctpop_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v2i32: ; CHECK: # %bb.0: @@ -472,8 +454,6 @@ define <2 x i32> @vp_ctpop_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x i32> @vp_ctpop_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v4i32: ; CHECK: # %bb.0: @@ -532,8 +512,6 @@ define <4 x i32> @vp_ctpop_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32>, <8 x i1>, i32) - define <8 x i32> @vp_ctpop_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v8i32: ; CHECK: # %bb.0: @@ -592,8 +570,6 @@ define <8 x i32> @vp_ctpop_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32>, <16 x i1>, i32) - define <16 x i32> @vp_ctpop_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v16i32: ; CHECK: # %bb.0: @@ -652,8 +628,6 @@ define <16 x i32> @vp_ctpop_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64>, <2 x i1>, i32) - define <2 x i64> @vp_ctpop_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v2i64: ; RV32: # %bb.0: @@ -806,8 +780,6 @@ define <2 x i64> @vp_ctpop_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x i64> @vp_ctpop_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v4i64: ; RV32: # %bb.0: @@ -960,8 +932,6 @@ define <4 x i64> @vp_ctpop_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64>, <8 x i1>, i32) - define <8 x i64> @vp_ctpop_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v8i64: ; RV32: # %bb.0: @@ -1114,8 +1084,6 @@ define <8 x i64> @vp_ctpop_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <15 x i64> @llvm.vp.ctpop.v15i64(<15 x i64>, <15 x i1>, i32) - define <15 x i64> @vp_ctpop_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v15i64: ; RV32: # %bb.0: @@ -1268,8 +1236,6 @@ define <15 x i64> @vp_ctpop_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { ret <15 x i64> %v } -declare <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64>, <16 x i1>, i32) - define <16 x i64> @vp_ctpop_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v16i64: ; RV32: # %bb.0: @@ -1422,8 +1388,6 @@ define <16 x i64> @vp_ctpop_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ret <16 x i64> %v } -declare <32 x i64> @llvm.vp.ctpop.v32i64(<32 x i64>, <32 x i1>, i32) - define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v32i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll index 44b9331fd2caf..f7835cbbfafa5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll @@ -37,7 +37,6 @@ define void @ctpop_v16i8(ptr %x, ptr %y) { store <16 x i8> %c, ptr %x ret void } -declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>) define void @ctpop_v8i16(ptr %x, ptr %y) { ; CHECK-LABEL: ctpop_v8i16: @@ -79,7 +78,6 @@ define void @ctpop_v8i16(ptr %x, ptr %y) { store <8 x i16> %c, ptr %x ret void } -declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>) define void @ctpop_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: ctpop_v4i32: @@ -122,7 +120,6 @@ define void @ctpop_v4i32(ptr %x, ptr %y) { store <4 x i32> %c, ptr %x ret void } -declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>) define void @ctpop_v2i64(ptr %x, ptr %y) { ; RV32-LABEL: ctpop_v2i64: @@ -214,7 +211,6 @@ define void @ctpop_v2i64(ptr %x, ptr %y) { store <2 x i64> %c, ptr %x ret void } -declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) define void @ctpop_v32i8(ptr %x, ptr %y) { ; CHECK-LABEL: ctpop_v32i8: @@ -251,7 +247,6 @@ define void @ctpop_v32i8(ptr %x, ptr %y) { store <32 x i8> %c, ptr %x ret void } -declare <32 x i8> @llvm.ctpop.v32i8(<32 x i8>) define void @ctpop_v16i16(ptr %x, ptr %y) { ; CHECK-LABEL: ctpop_v16i16: @@ -293,7 +288,6 @@ define void @ctpop_v16i16(ptr %x, ptr %y) { store <16 x i16> %c, ptr %x ret void } -declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>) define void @ctpop_v8i32(ptr %x, ptr %y) { ; CHECK-LABEL: ctpop_v8i32: @@ -428,7 +422,6 @@ define <8 x i1> @ctpop_v8i32_ne_one(ptr %x, ptr %y) { %cmp = icmp ne <8 x i32> %c, ret <8 x i1> %cmp } -declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>) define void @ctpop_v4i64(ptr %x, ptr %y) { ; RV32-LABEL: ctpop_v4i64: @@ -612,4 +605,3 @@ define <4 x i1> @ctpop_v4i64_ne_one(ptr %x, ptr %y) { %cmp = icmp ne <4 x i64> %c, ret <4 x i1> %cmp } -declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll index 1922006b8a581..098384d200045 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.vp.cttz.v2i8(<2 x i8>, i1 immarg, <2 x i1>, i32) - define <2 x i8> @vp_cttz_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v2i8: ; CHECK: # %bb.0: @@ -54,8 +52,6 @@ define <2 x i8> @vp_cttz_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.cttz.v4i8(<4 x i8>, i1 immarg, <4 x i1>, i32) - define <4 x i8> @vp_cttz_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v4i8: ; CHECK: # %bb.0: @@ -104,8 +100,6 @@ define <4 x i8> @vp_cttz_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.cttz.v8i8(<8 x i8>, i1 immarg, <8 x i1>, i32) - define <8 x i8> @vp_cttz_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v8i8: ; CHECK: # %bb.0: @@ -154,8 +148,6 @@ define <8 x i8> @vp_cttz_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.cttz.v16i8(<16 x i8>, i1 immarg, <16 x i1>, i32) - define <16 x i8> @vp_cttz_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v16i8: ; CHECK: # %bb.0: @@ -204,8 +196,6 @@ define <16 x i8> @vp_cttz_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16>, i1 immarg, <2 x i1>, i32) - define <2 x i16> @vp_cttz_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v2i16: ; CHECK: # %bb.0: @@ -268,8 +258,6 @@ define <2 x i16> @vp_cttz_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16>, i1 immarg, <4 x i1>, i32) - define <4 x i16> @vp_cttz_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v4i16: ; CHECK: # %bb.0: @@ -332,8 +320,6 @@ define <4 x i16> @vp_cttz_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16>, i1 immarg, <8 x i1>, i32) - define <8 x i16> @vp_cttz_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v8i16: ; CHECK: # %bb.0: @@ -396,8 +382,6 @@ define <8 x i16> @vp_cttz_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16>, i1 immarg, <16 x i1>, i32) - define <16 x i16> @vp_cttz_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v16i16: ; CHECK: # %bb.0: @@ -460,8 +444,6 @@ define <16 x i16> @vp_cttz_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.cttz.v2i32(<2 x i32>, i1 immarg, <2 x i1>, i32) - define <2 x i32> @vp_cttz_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v2i32: ; CHECK: # %bb.0: @@ -526,8 +508,6 @@ define <2 x i32> @vp_cttz_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.cttz.v4i32(<4 x i32>, i1 immarg, <4 x i1>, i32) - define <4 x i32> @vp_cttz_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v4i32: ; CHECK: # %bb.0: @@ -592,8 +572,6 @@ define <4 x i32> @vp_cttz_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.cttz.v8i32(<8 x i32>, i1 immarg, <8 x i1>, i32) - define <8 x i32> @vp_cttz_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v8i32: ; CHECK: # %bb.0: @@ -658,8 +636,6 @@ define <8 x i32> @vp_cttz_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.cttz.v16i32(<16 x i32>, i1 immarg, <16 x i1>, i32) - define <16 x i32> @vp_cttz_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v16i32: ; CHECK: # %bb.0: @@ -724,8 +700,6 @@ define <16 x i32> @vp_cttz_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.cttz.v2i64(<2 x i64>, i1 immarg, <2 x i1>, i32) - define <2 x i64> @vp_cttz_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_v2i64: ; RV32: # %bb.0: @@ -890,8 +864,6 @@ define <2 x i64> @vp_cttz_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.cttz.v4i64(<4 x i64>, i1 immarg, <4 x i1>, i32) - define <4 x i64> @vp_cttz_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_v4i64: ; RV32: # %bb.0: @@ -1056,8 +1028,6 @@ define <4 x i64> @vp_cttz_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.cttz.v8i64(<8 x i64>, i1 immarg, <8 x i1>, i32) - define <8 x i64> @vp_cttz_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_v8i64: ; RV32: # %bb.0: @@ -1222,8 +1192,6 @@ define <8 x i64> @vp_cttz_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <15 x i64> @llvm.vp.cttz.v15i64(<15 x i64>, i1 immarg, <15 x i1>, i32) - define <15 x i64> @vp_cttz_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_v15i64: ; RV32: # %bb.0: @@ -1388,8 +1356,6 @@ define <15 x i64> @vp_cttz_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { ret <15 x i64> %v } -declare <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64>, i1 immarg, <16 x i1>, i32) - define <16 x i64> @vp_cttz_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_v16i64: ; RV32: # %bb.0: @@ -1554,8 +1520,6 @@ define <16 x i64> @vp_cttz_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ret <16 x i64> %v } -declare <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64>, i1 immarg, <32 x i1>, i32) - define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_v32i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll index 307b143f4449f..ad51cab1ba8d2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll @@ -82,7 +82,6 @@ define void @cttz_v16i8(ptr %x, ptr %y) nounwind { store <16 x i8> %c, ptr %x ret void } -declare <16 x i8> @llvm.cttz.v16i8(<16 x i8>, i1) define void @cttz_v8i16(ptr %x, ptr %y) nounwind { ; RVI-LABEL: cttz_v8i16: @@ -159,7 +158,6 @@ define void @cttz_v8i16(ptr %x, ptr %y) nounwind { store <8 x i16> %c, ptr %x ret void } -declare <8 x i16> @llvm.cttz.v8i16(<8 x i16>, i1) define void @cttz_v4i32(ptr %x, ptr %y) nounwind { ; RVI-LABEL: cttz_v4i32: @@ -240,7 +238,6 @@ define void @cttz_v4i32(ptr %x, ptr %y) nounwind { store <4 x i32> %c, ptr %x ret void } -declare <4 x i32> @llvm.cttz.v4i32(<4 x i32>, i1) define void @cttz_v2i64(ptr %x, ptr %y) nounwind { ; RV32I-LABEL: cttz_v2i64: @@ -379,7 +376,6 @@ define void @cttz_v2i64(ptr %x, ptr %y) nounwind { store <2 x i64> %c, ptr %x ret void } -declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>, i1) define void @cttz_v32i8(ptr %x, ptr %y) nounwind { ; RVI-LABEL: cttz_v32i8: @@ -459,7 +455,6 @@ define void @cttz_v32i8(ptr %x, ptr %y) nounwind { store <32 x i8> %c, ptr %x ret void } -declare <32 x i8> @llvm.cttz.v32i8(<32 x i8>, i1) define void @cttz_v16i16(ptr %x, ptr %y) nounwind { ; RVI-LABEL: cttz_v16i16: @@ -536,7 +531,6 @@ define void @cttz_v16i16(ptr %x, ptr %y) nounwind { store <16 x i16> %c, ptr %x ret void } -declare <16 x i16> @llvm.cttz.v16i16(<16 x i16>, i1) define void @cttz_v8i32(ptr %x, ptr %y) nounwind { ; RVI-LABEL: cttz_v8i32: @@ -617,7 +611,6 @@ define void @cttz_v8i32(ptr %x, ptr %y) nounwind { store <8 x i32> %c, ptr %x ret void } -declare <8 x i32> @llvm.cttz.v8i32(<8 x i32>, i1) define void @cttz_v4i64(ptr %x, ptr %y) nounwind { ; RV32I-LABEL: cttz_v4i64: @@ -756,7 +749,6 @@ define void @cttz_v4i64(ptr %x, ptr %y) nounwind { store <4 x i64> %c, ptr %x ret void } -declare <4 x i64> @llvm.cttz.v4i64(<4 x i64>, i1) define void @cttz_zero_undef_v16i8(ptr %x, ptr %y) nounwind { ; RVI-LABEL: cttz_zero_undef_v16i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll index fa311154fa973..21b4b81651966 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfh -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-RV32 -declare <1 x half> @llvm.masked.expandload.v1f16(ptr, <1 x i1>, <1 x half>) define <1 x half> @expandload_v1f16(ptr %base, <1 x half> %src0, <1 x i1> %mask) { ; CHECK-LABEL: expandload_v1f16: ; CHECK: # %bb.0: @@ -20,7 +19,6 @@ define <1 x half> @expandload_v1f16(ptr %base, <1 x half> %src0, <1 x i1> %mask) ret <1 x half>%res } -declare <2 x half> @llvm.masked.expandload.v2f16(ptr, <2 x i1>, <2 x half>) define <2 x half> @expandload_v2f16(ptr %base, <2 x half> %src0, <2 x i1> %mask) { ; CHECK-LABEL: expandload_v2f16: ; CHECK: # %bb.0: @@ -36,7 +34,6 @@ define <2 x half> @expandload_v2f16(ptr %base, <2 x half> %src0, <2 x i1> %mask) ret <2 x half>%res } -declare <4 x half> @llvm.masked.expandload.v4f16(ptr, <4 x i1>, <4 x half>) define <4 x half> @expandload_v4f16(ptr %base, <4 x half> %src0, <4 x i1> %mask) { ; CHECK-LABEL: expandload_v4f16: ; CHECK: # %bb.0: @@ -52,7 +49,6 @@ define <4 x half> @expandload_v4f16(ptr %base, <4 x half> %src0, <4 x i1> %mask) ret <4 x half>%res } -declare <8 x half> @llvm.masked.expandload.v8f16(ptr, <8 x i1>, <8 x half>) define <8 x half> @expandload_v8f16(ptr %base, <8 x half> %src0, <8 x i1> %mask) { ; CHECK-LABEL: expandload_v8f16: ; CHECK: # %bb.0: @@ -68,7 +64,6 @@ define <8 x half> @expandload_v8f16(ptr %base, <8 x half> %src0, <8 x i1> %mask) ret <8 x half>%res } -declare <1 x float> @llvm.masked.expandload.v1f32(ptr, <1 x i1>, <1 x float>) define <1 x float> @expandload_v1f32(ptr %base, <1 x float> %src0, <1 x i1> %mask) { ; CHECK-LABEL: expandload_v1f32: ; CHECK: # %bb.0: @@ -84,7 +79,6 @@ define <1 x float> @expandload_v1f32(ptr %base, <1 x float> %src0, <1 x i1> %mas ret <1 x float>%res } -declare <2 x float> @llvm.masked.expandload.v2f32(ptr, <2 x i1>, <2 x float>) define <2 x float> @expandload_v2f32(ptr %base, <2 x float> %src0, <2 x i1> %mask) { ; CHECK-LABEL: expandload_v2f32: ; CHECK: # %bb.0: @@ -100,7 +94,6 @@ define <2 x float> @expandload_v2f32(ptr %base, <2 x float> %src0, <2 x i1> %mas ret <2 x float>%res } -declare <4 x float> @llvm.masked.expandload.v4f32(ptr, <4 x i1>, <4 x float>) define <4 x float> @expandload_v4f32(ptr %base, <4 x float> %src0, <4 x i1> %mask) { ; CHECK-LABEL: expandload_v4f32: ; CHECK: # %bb.0: @@ -116,7 +109,6 @@ define <4 x float> @expandload_v4f32(ptr %base, <4 x float> %src0, <4 x i1> %mas ret <4 x float>%res } -declare <8 x float> @llvm.masked.expandload.v8f32(ptr, <8 x i1>, <8 x float>) define <8 x float> @expandload_v8f32(ptr %base, <8 x float> %src0, <8 x i1> %mask) { ; CHECK-LABEL: expandload_v8f32: ; CHECK: # %bb.0: @@ -132,7 +124,6 @@ define <8 x float> @expandload_v8f32(ptr %base, <8 x float> %src0, <8 x i1> %mas ret <8 x float>%res } -declare <1 x double> @llvm.masked.expandload.v1f64(ptr, <1 x i1>, <1 x double>) define <1 x double> @expandload_v1f64(ptr %base, <1 x double> %src0, <1 x i1> %mask) { ; CHECK-LABEL: expandload_v1f64: ; CHECK: # %bb.0: @@ -148,7 +139,6 @@ define <1 x double> @expandload_v1f64(ptr %base, <1 x double> %src0, <1 x i1> %m ret <1 x double>%res } -declare <2 x double> @llvm.masked.expandload.v2f64(ptr, <2 x i1>, <2 x double>) define <2 x double> @expandload_v2f64(ptr %base, <2 x double> %src0, <2 x i1> %mask) { ; CHECK-LABEL: expandload_v2f64: ; CHECK: # %bb.0: @@ -164,7 +154,6 @@ define <2 x double> @expandload_v2f64(ptr %base, <2 x double> %src0, <2 x i1> %m ret <2 x double>%res } -declare <4 x double> @llvm.masked.expandload.v4f64(ptr, <4 x i1>, <4 x double>) define <4 x double> @expandload_v4f64(ptr %base, <4 x double> %src0, <4 x i1> %mask) { ; CHECK-LABEL: expandload_v4f64: ; CHECK: # %bb.0: @@ -180,7 +169,6 @@ define <4 x double> @expandload_v4f64(ptr %base, <4 x double> %src0, <4 x i1> %m ret <4 x double>%res } -declare <8 x double> @llvm.masked.expandload.v8f64(ptr, <8 x i1>, <8 x double>) define <8 x double> @expandload_v8f64(ptr %base, <8 x double> %src0, <8 x i1> %mask) { ; CHECK-LABEL: expandload_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll index 269d3df00f05d..7128f538354aa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll @@ -4,7 +4,6 @@ ; RUN: llc -verify-machineinstrs -mtriple=riscv64 -mattr=+m,+v %s -o - \ ; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-RV64 -declare <1 x i8> @llvm.masked.expandload.v1i8(ptr, <1 x i1>, <1 x i8>) define <1 x i8> @expandload_v1i8(ptr %base, <1 x i8> %src0, <1 x i1> %mask) { ; CHECK-LABEL: expandload_v1i8: ; CHECK: # %bb.0: @@ -20,7 +19,6 @@ define <1 x i8> @expandload_v1i8(ptr %base, <1 x i8> %src0, <1 x i1> %mask) { ret <1 x i8>%res } -declare <2 x i8> @llvm.masked.expandload.v2i8(ptr, <2 x i1>, <2 x i8>) define <2 x i8> @expandload_v2i8(ptr %base, <2 x i8> %src0, <2 x i1> %mask) { ; CHECK-LABEL: expandload_v2i8: ; CHECK: # %bb.0: @@ -36,7 +34,6 @@ define <2 x i8> @expandload_v2i8(ptr %base, <2 x i8> %src0, <2 x i1> %mask) { ret <2 x i8>%res } -declare <4 x i8> @llvm.masked.expandload.v4i8(ptr, <4 x i1>, <4 x i8>) define <4 x i8> @expandload_v4i8(ptr %base, <4 x i8> %src0, <4 x i1> %mask) { ; CHECK-LABEL: expandload_v4i8: ; CHECK: # %bb.0: @@ -52,7 +49,6 @@ define <4 x i8> @expandload_v4i8(ptr %base, <4 x i8> %src0, <4 x i1> %mask) { ret <4 x i8>%res } -declare <8 x i8> @llvm.masked.expandload.v8i8(ptr, <8 x i1>, <8 x i8>) define <8 x i8> @expandload_v8i8(ptr %base, <8 x i8> %src0, <8 x i1> %mask) { ; CHECK-LABEL: expandload_v8i8: ; CHECK: # %bb.0: @@ -68,7 +64,6 @@ define <8 x i8> @expandload_v8i8(ptr %base, <8 x i8> %src0, <8 x i1> %mask) { ret <8 x i8>%res } -declare <1 x i16> @llvm.masked.expandload.v1i16(ptr, <1 x i1>, <1 x i16>) define <1 x i16> @expandload_v1i16(ptr %base, <1 x i16> %src0, <1 x i1> %mask) { ; CHECK-LABEL: expandload_v1i16: ; CHECK: # %bb.0: @@ -84,7 +79,6 @@ define <1 x i16> @expandload_v1i16(ptr %base, <1 x i16> %src0, <1 x i1> %mask) { ret <1 x i16>%res } -declare <2 x i16> @llvm.masked.expandload.v2i16(ptr, <2 x i1>, <2 x i16>) define <2 x i16> @expandload_v2i16(ptr %base, <2 x i16> %src0, <2 x i1> %mask) { ; CHECK-LABEL: expandload_v2i16: ; CHECK: # %bb.0: @@ -100,7 +94,6 @@ define <2 x i16> @expandload_v2i16(ptr %base, <2 x i16> %src0, <2 x i1> %mask) { ret <2 x i16>%res } -declare <4 x i16> @llvm.masked.expandload.v4i16(ptr, <4 x i1>, <4 x i16>) define <4 x i16> @expandload_v4i16(ptr %base, <4 x i16> %src0, <4 x i1> %mask) { ; CHECK-LABEL: expandload_v4i16: ; CHECK: # %bb.0: @@ -116,7 +109,6 @@ define <4 x i16> @expandload_v4i16(ptr %base, <4 x i16> %src0, <4 x i1> %mask) { ret <4 x i16>%res } -declare <8 x i16> @llvm.masked.expandload.v8i16(ptr, <8 x i1>, <8 x i16>) define <8 x i16> @expandload_v8i16(ptr %base, <8 x i16> %src0, <8 x i1> %mask) { ; CHECK-LABEL: expandload_v8i16: ; CHECK: # %bb.0: @@ -132,7 +124,6 @@ define <8 x i16> @expandload_v8i16(ptr %base, <8 x i16> %src0, <8 x i1> %mask) { ret <8 x i16>%res } -declare <1 x i32> @llvm.masked.expandload.v1i32(ptr, <1 x i1>, <1 x i32>) define <1 x i32> @expandload_v1i32(ptr %base, <1 x i32> %src0, <1 x i1> %mask) { ; CHECK-LABEL: expandload_v1i32: ; CHECK: # %bb.0: @@ -148,7 +139,6 @@ define <1 x i32> @expandload_v1i32(ptr %base, <1 x i32> %src0, <1 x i1> %mask) { ret <1 x i32>%res } -declare <2 x i32> @llvm.masked.expandload.v2i32(ptr, <2 x i1>, <2 x i32>) define <2 x i32> @expandload_v2i32(ptr %base, <2 x i32> %src0, <2 x i1> %mask) { ; CHECK-LABEL: expandload_v2i32: ; CHECK: # %bb.0: @@ -164,7 +154,6 @@ define <2 x i32> @expandload_v2i32(ptr %base, <2 x i32> %src0, <2 x i1> %mask) { ret <2 x i32>%res } -declare <4 x i32> @llvm.masked.expandload.v4i32(ptr, <4 x i1>, <4 x i32>) define <4 x i32> @expandload_v4i32(ptr %base, <4 x i32> %src0, <4 x i1> %mask) { ; CHECK-LABEL: expandload_v4i32: ; CHECK: # %bb.0: @@ -180,7 +169,6 @@ define <4 x i32> @expandload_v4i32(ptr %base, <4 x i32> %src0, <4 x i1> %mask) { ret <4 x i32>%res } -declare <8 x i32> @llvm.masked.expandload.v8i32(ptr, <8 x i1>, <8 x i32>) define <8 x i32> @expandload_v8i32(ptr %base, <8 x i32> %src0, <8 x i1> %mask) { ; CHECK-LABEL: expandload_v8i32: ; CHECK: # %bb.0: @@ -196,7 +184,6 @@ define <8 x i32> @expandload_v8i32(ptr %base, <8 x i32> %src0, <8 x i1> %mask) { ret <8 x i32>%res } -declare <1 x i64> @llvm.masked.expandload.v1i64(ptr, <1 x i1>, <1 x i64>) define <1 x i64> @expandload_v1i64(ptr %base, <1 x i64> %src0, <1 x i1> %mask) { ; CHECK-LABEL: expandload_v1i64: ; CHECK: # %bb.0: @@ -212,7 +199,6 @@ define <1 x i64> @expandload_v1i64(ptr %base, <1 x i64> %src0, <1 x i1> %mask) { ret <1 x i64>%res } -declare <2 x i64> @llvm.masked.expandload.v2i64(ptr, <2 x i1>, <2 x i64>) define <2 x i64> @expandload_v2i64(ptr %base, <2 x i64> %src0, <2 x i1> %mask) { ; CHECK-LABEL: expandload_v2i64: ; CHECK: # %bb.0: @@ -228,7 +214,6 @@ define <2 x i64> @expandload_v2i64(ptr %base, <2 x i64> %src0, <2 x i1> %mask) { ret <2 x i64>%res } -declare <4 x i64> @llvm.masked.expandload.v4i64(ptr, <4 x i1>, <4 x i64>) define <4 x i64> @expandload_v4i64(ptr %base, <4 x i64> %src0, <4 x i1> %mask) { ; CHECK-LABEL: expandload_v4i64: ; CHECK: # %bb.0: @@ -244,7 +229,6 @@ define <4 x i64> @expandload_v4i64(ptr %base, <4 x i64> %src0, <4 x i1> %mask) { ret <4 x i64>%res } -declare <8 x i64> @llvm.masked.expandload.v8i64(ptr, <8 x i1>, <8 x i64>) define <8 x i64> @expandload_v8i64(ptr %base, <8 x i64> %src0, <8 x i1> %mask) { ; CHECK-LABEL: expandload_v8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll index e2711a0231509..3263539d5c20d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll @@ -176,7 +176,6 @@ define void @extract_v2i32_nxv16i32_0( %x, ptr %y) { ret void } - define void @extract_v2i32_nxv16i32_2( %x, ptr %y) { ; CHECK-LABEL: extract_v2i32_nxv16i32_2: ; CHECK: # %bb.0: @@ -834,25 +833,3 @@ define void @extract_v2f16_v4f16_2(ptr %x, ptr %y) { ret void } -declare <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %vec, i64 %idx) -declare <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %vec, i64 %idx) - -declare <2 x i1> @llvm.vector.extract.v2i1.nxv2i1( %vec, i64 %idx) -declare <8 x i1> @llvm.vector.extract.v8i1.nxv2i1( %vec, i64 %idx) - -declare <2 x i1> @llvm.vector.extract.v2i1.nxv32i1( %vec, i64 %idx) -declare <8 x i1> @llvm.vector.extract.v8i1.nxv32i1( %vec, i64 %idx) - -declare <2 x i1> @llvm.vector.extract.v2i1.nxv64i1( %vec, i64 %idx) -declare <8 x i1> @llvm.vector.extract.v8i1.nxv64i1( %vec, i64 %idx) - -declare <2 x i8> @llvm.vector.extract.v2i8.v4i8(<4 x i8> %vec, i64 %idx) -declare <2 x i8> @llvm.vector.extract.v2i8.v8i8(<8 x i8> %vec, i64 %idx) - -declare <1 x i32> @llvm.vector.extract.v1i32.v8i32(<8 x i32> %vec, i64 %idx) -declare <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %vec, i64 %idx) - -declare <2 x i8> @llvm.vector.extract.v2i8.nxv2i8( %vec, i64 %idx) - -declare <2 x i32> @llvm.vector.extract.v2i32.nxv16i32( %vec, i64 %idx) -declare <8 x i32> @llvm.vector.extract.v8i32.nxv16i32( %vec, i64 %idx) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll index 71b0624d91f22..22aef4899a6c2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll @@ -26,7 +26,6 @@ define <1 x half> @ceil_v1f16(<1 x half> %x) strictfp { %a = call <1 x half> @llvm.experimental.constrained.ceil.v1f16(<1 x half> %x, metadata !"fpexcept.strict") ret <1 x half> %a } -declare <1 x half> @llvm.experimental.constrained.ceil.v1f16(<1 x half>, metadata) define <2 x half> @ceil_v2f16(<2 x half> %x) strictfp { ; CHECK-LABEL: ceil_v2f16: @@ -50,7 +49,6 @@ define <2 x half> @ceil_v2f16(<2 x half> %x) strictfp { %a = call <2 x half> @llvm.experimental.constrained.ceil.v2f16(<2 x half> %x, metadata !"fpexcept.strict") ret <2 x half> %a } -declare <2 x half> @llvm.experimental.constrained.ceil.v2f16(<2 x half>, metadata) define <4 x half> @ceil_v4f16(<4 x half> %x) strictfp { ; CHECK-LABEL: ceil_v4f16: @@ -74,7 +72,6 @@ define <4 x half> @ceil_v4f16(<4 x half> %x) strictfp { %a = call <4 x half> @llvm.experimental.constrained.ceil.v4f16(<4 x half> %x, metadata !"fpexcept.strict") ret <4 x half> %a } -declare <4 x half> @llvm.experimental.constrained.ceil.v4f16(<4 x half>, metadata) define <8 x half> @ceil_v8f16(<8 x half> %x) strictfp { ; CHECK-LABEL: ceil_v8f16: @@ -98,7 +95,6 @@ define <8 x half> @ceil_v8f16(<8 x half> %x) strictfp { %a = call <8 x half> @llvm.experimental.constrained.ceil.v8f16(<8 x half> %x, metadata !"fpexcept.strict") ret <8 x half> %a } -declare <8 x half> @llvm.experimental.constrained.ceil.v8f16(<8 x half>, metadata) define <16 x half> @ceil_v16f16(<16 x half> %x) strictfp { ; CHECK-LABEL: ceil_v16f16: @@ -122,7 +118,6 @@ define <16 x half> @ceil_v16f16(<16 x half> %x) strictfp { %a = call <16 x half> @llvm.experimental.constrained.ceil.v16f16(<16 x half> %x, metadata !"fpexcept.strict") ret <16 x half> %a } -declare <16 x half> @llvm.experimental.constrained.ceil.v16f16(<16 x half>, metadata) define <32 x half> @ceil_v32f16(<32 x half> %x) strictfp { ; CHECK-LABEL: ceil_v32f16: @@ -147,7 +142,6 @@ define <32 x half> @ceil_v32f16(<32 x half> %x) strictfp { %a = call <32 x half> @llvm.experimental.constrained.ceil.v32f16(<32 x half> %x, metadata !"fpexcept.strict") ret <32 x half> %a } -declare <32 x half> @llvm.experimental.constrained.ceil.v32f16(<32 x half>, metadata) define <1 x float> @ceil_v1f32(<1 x float> %x) strictfp { ; CHECK-LABEL: ceil_v1f32: @@ -170,7 +164,6 @@ define <1 x float> @ceil_v1f32(<1 x float> %x) strictfp { %a = call <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float> %x, metadata !"fpexcept.strict") ret <1 x float> %a } -declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata) define <2 x float> @ceil_v2f32(<2 x float> %x) strictfp { ; CHECK-LABEL: ceil_v2f32: @@ -193,7 +186,6 @@ define <2 x float> @ceil_v2f32(<2 x float> %x) strictfp { %a = call <2 x float> @llvm.experimental.constrained.ceil.v2f32(<2 x float> %x, metadata !"fpexcept.strict") ret <2 x float> %a } -declare <2 x float> @llvm.experimental.constrained.ceil.v2f32(<2 x float>, metadata) define <4 x float> @ceil_v4f32(<4 x float> %x) strictfp { ; CHECK-LABEL: ceil_v4f32: @@ -216,7 +208,6 @@ define <4 x float> @ceil_v4f32(<4 x float> %x) strictfp { %a = call <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float> %x, metadata !"fpexcept.strict") ret <4 x float> %a } -declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metadata) define <8 x float> @ceil_v8f32(<8 x float> %x) strictfp { ; CHECK-LABEL: ceil_v8f32: @@ -239,7 +230,6 @@ define <8 x float> @ceil_v8f32(<8 x float> %x) strictfp { %a = call <8 x float> @llvm.experimental.constrained.ceil.v8f32(<8 x float> %x, metadata !"fpexcept.strict") ret <8 x float> %a } -declare <8 x float> @llvm.experimental.constrained.ceil.v8f32(<8 x float>, metadata) define <16 x float> @ceil_v16f32(<16 x float> %x) strictfp { ; CHECK-LABEL: ceil_v16f32: @@ -262,7 +252,6 @@ define <16 x float> @ceil_v16f32(<16 x float> %x) strictfp { %a = call <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float> %x, metadata !"fpexcept.strict") ret <16 x float> %a } -declare <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float>, metadata) define <1 x double> @ceil_v1f64(<1 x double> %x) strictfp { ; RV32-LABEL: ceil_v1f64: @@ -304,7 +293,6 @@ define <1 x double> @ceil_v1f64(<1 x double> %x) strictfp { %a = call <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double> %x, metadata !"fpexcept.strict") ret <1 x double> %a } -declare <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double>, metadata) define <2 x double> @ceil_v2f64(<2 x double> %x) strictfp { ; RV32-LABEL: ceil_v2f64: @@ -346,7 +334,6 @@ define <2 x double> @ceil_v2f64(<2 x double> %x) strictfp { %a = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %x, metadata !"fpexcept.strict") ret <2 x double> %a } -declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata) define <4 x double> @ceil_v4f64(<4 x double> %x) strictfp { ; RV32-LABEL: ceil_v4f64: @@ -388,7 +375,6 @@ define <4 x double> @ceil_v4f64(<4 x double> %x) strictfp { %a = call <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double> %x, metadata !"fpexcept.strict") ret <4 x double> %a } -declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata) define <8 x double> @ceil_v8f64(<8 x double> %x) strictfp { ; RV32-LABEL: ceil_v8f64: @@ -430,4 +416,3 @@ define <8 x double> @ceil_v8f64(<8 x double> %x) strictfp { %a = call <8 x double> @llvm.experimental.constrained.ceil.v8f64(<8 x double> %x, metadata !"fpexcept.strict") ret <8 x double> %a } -declare <8 x double> @llvm.experimental.constrained.ceil.v8f64(<8 x double>, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll index 9eca66eea865c..511382cf5436e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll @@ -26,7 +26,6 @@ define <1 x half> @floor_v1f16(<1 x half> %x) strictfp { %a = call <1 x half> @llvm.experimental.constrained.floor.v1f16(<1 x half> %x, metadata !"fpexcept.strict") ret <1 x half> %a } -declare <1 x half> @llvm.experimental.constrained.floor.v1f16(<1 x half>, metadata) define <2 x half> @floor_v2f16(<2 x half> %x) strictfp { ; CHECK-LABEL: floor_v2f16: @@ -50,7 +49,6 @@ define <2 x half> @floor_v2f16(<2 x half> %x) strictfp { %a = call <2 x half> @llvm.experimental.constrained.floor.v2f16(<2 x half> %x, metadata !"fpexcept.strict") ret <2 x half> %a } -declare <2 x half> @llvm.experimental.constrained.floor.v2f16(<2 x half>, metadata) define <4 x half> @floor_v4f16(<4 x half> %x) strictfp { ; CHECK-LABEL: floor_v4f16: @@ -74,7 +72,6 @@ define <4 x half> @floor_v4f16(<4 x half> %x) strictfp { %a = call <4 x half> @llvm.experimental.constrained.floor.v4f16(<4 x half> %x, metadata !"fpexcept.strict") ret <4 x half> %a } -declare <4 x half> @llvm.experimental.constrained.floor.v4f16(<4 x half>, metadata) define <8 x half> @floor_v8f16(<8 x half> %x) strictfp { ; CHECK-LABEL: floor_v8f16: @@ -98,7 +95,6 @@ define <8 x half> @floor_v8f16(<8 x half> %x) strictfp { %a = call <8 x half> @llvm.experimental.constrained.floor.v8f16(<8 x half> %x, metadata !"fpexcept.strict") ret <8 x half> %a } -declare <8 x half> @llvm.experimental.constrained.floor.v8f16(<8 x half>, metadata) define <16 x half> @floor_v16f16(<16 x half> %x) strictfp { ; CHECK-LABEL: floor_v16f16: @@ -122,7 +118,6 @@ define <16 x half> @floor_v16f16(<16 x half> %x) strictfp { %a = call <16 x half> @llvm.experimental.constrained.floor.v16f16(<16 x half> %x, metadata !"fpexcept.strict") ret <16 x half> %a } -declare <16 x half> @llvm.experimental.constrained.floor.v16f16(<16 x half>, metadata) define <32 x half> @floor_v32f16(<32 x half> %x) strictfp { ; CHECK-LABEL: floor_v32f16: @@ -147,7 +142,6 @@ define <32 x half> @floor_v32f16(<32 x half> %x) strictfp { %a = call <32 x half> @llvm.experimental.constrained.floor.v32f16(<32 x half> %x, metadata !"fpexcept.strict") ret <32 x half> %a } -declare <32 x half> @llvm.experimental.constrained.floor.v32f16(<32 x half>, metadata) define <1 x float> @floor_v1f32(<1 x float> %x) strictfp { ; CHECK-LABEL: floor_v1f32: @@ -170,7 +164,6 @@ define <1 x float> @floor_v1f32(<1 x float> %x) strictfp { %a = call <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float> %x, metadata !"fpexcept.strict") ret <1 x float> %a } -declare <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float>, metadata) define <2 x float> @floor_v2f32(<2 x float> %x) strictfp { ; CHECK-LABEL: floor_v2f32: @@ -193,7 +186,6 @@ define <2 x float> @floor_v2f32(<2 x float> %x) strictfp { %a = call <2 x float> @llvm.experimental.constrained.floor.v2f32(<2 x float> %x, metadata !"fpexcept.strict") ret <2 x float> %a } -declare <2 x float> @llvm.experimental.constrained.floor.v2f32(<2 x float>, metadata) define <4 x float> @floor_v4f32(<4 x float> %x) strictfp { ; CHECK-LABEL: floor_v4f32: @@ -216,7 +208,6 @@ define <4 x float> @floor_v4f32(<4 x float> %x) strictfp { %a = call <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float> %x, metadata !"fpexcept.strict") ret <4 x float> %a } -declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata) define <8 x float> @floor_v8f32(<8 x float> %x) strictfp { ; CHECK-LABEL: floor_v8f32: @@ -239,7 +230,6 @@ define <8 x float> @floor_v8f32(<8 x float> %x) strictfp { %a = call <8 x float> @llvm.experimental.constrained.floor.v8f32(<8 x float> %x, metadata !"fpexcept.strict") ret <8 x float> %a } -declare <8 x float> @llvm.experimental.constrained.floor.v8f32(<8 x float>, metadata) define <16 x float> @floor_v16f32(<16 x float> %x) strictfp { ; CHECK-LABEL: floor_v16f32: @@ -262,7 +252,6 @@ define <16 x float> @floor_v16f32(<16 x float> %x) strictfp { %a = call <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float> %x, metadata !"fpexcept.strict") ret <16 x float> %a } -declare <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float>, metadata) define <1 x double> @floor_v1f64(<1 x double> %x) strictfp { ; RV32-LABEL: floor_v1f64: @@ -304,7 +293,6 @@ define <1 x double> @floor_v1f64(<1 x double> %x) strictfp { %a = call <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double> %x, metadata !"fpexcept.strict") ret <1 x double> %a } -declare <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double>, metadata) define <2 x double> @floor_v2f64(<2 x double> %x) strictfp { ; RV32-LABEL: floor_v2f64: @@ -346,7 +334,6 @@ define <2 x double> @floor_v2f64(<2 x double> %x) strictfp { %a = call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %x, metadata !"fpexcept.strict") ret <2 x double> %a } -declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata) define <4 x double> @floor_v4f64(<4 x double> %x) strictfp { ; RV32-LABEL: floor_v4f64: @@ -388,7 +375,6 @@ define <4 x double> @floor_v4f64(<4 x double> %x) strictfp { %a = call <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double> %x, metadata !"fpexcept.strict") ret <4 x double> %a } -declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata) define <8 x double> @floor_v8f64(<8 x double> %x) strictfp { ; RV32-LABEL: floor_v8f64: @@ -430,4 +416,3 @@ define <8 x double> @floor_v8f64(<8 x double> %x) strictfp { %a = call <8 x double> @llvm.experimental.constrained.floor.v8f64(<8 x double> %x, metadata !"fpexcept.strict") ret <8 x double> %a } -declare <8 x double> @llvm.experimental.constrained.floor.v8f64(<8 x double>, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll index 4494b97119403..76f5f0a32bd1c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare <2 x half> @llvm.vp.floor.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vp_floor_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_v2f16: ; ZVFH: # %bb.0: @@ -96,8 +94,6 @@ define <2 x half> @vp_floor_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ret <2 x half> %v } -declare <4 x half> @llvm.vp.floor.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vp_floor_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_v4f16: ; ZVFH: # %bb.0: @@ -184,8 +180,6 @@ define <4 x half> @vp_floor_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <8 x half> @llvm.vp.floor.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_v8f16: ; ZVFH: # %bb.0: @@ -272,8 +266,6 @@ define <8 x half> @vp_floor_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ret <8 x half> %v } -declare <16 x half> @llvm.vp.floor.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_v16f16: ; ZVFH: # %bb.0: @@ -362,8 +354,6 @@ define <16 x half> @vp_floor_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) ret <16 x half> %v } -declare <2 x float> @llvm.vp.floor.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vp_floor_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v2f32: ; CHECK: # %bb.0: @@ -404,8 +394,6 @@ define <2 x float> @vp_floor_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ret <2 x float> %v } -declare <4 x float> @llvm.vp.floor.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vp_floor_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v4f32: ; CHECK: # %bb.0: @@ -446,8 +434,6 @@ define <4 x float> @vp_floor_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ret <4 x float> %v } -declare <8 x float> @llvm.vp.floor.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vp_floor_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v8f32: ; CHECK: # %bb.0: @@ -490,8 +476,6 @@ define <8 x float> @vp_floor_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ret <8 x float> %v } -declare <16 x float> @llvm.vp.floor.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vp_floor_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v16f32: ; CHECK: # %bb.0: @@ -534,8 +518,6 @@ define <16 x float> @vp_floor_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl ret <16 x float> %v } -declare <2 x double> @llvm.vp.floor.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vp_floor_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_v2f64: ; RV32ZVFH: # %bb.0: @@ -676,8 +658,6 @@ define <2 x double> @vp_floor_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) ret <2 x double> %v } -declare <4 x double> @llvm.vp.floor.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_v4f64: ; RV32ZVFH: # %bb.0: @@ -826,8 +806,6 @@ define <4 x double> @vp_floor_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) ret <4 x double> %v } -declare <8 x double> @llvm.vp.floor.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_v8f64: ; RV32ZVFH: # %bb.0: @@ -976,8 +954,6 @@ define <8 x double> @vp_floor_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) ret <8 x double> %v } -declare <15 x double> @llvm.vp.floor.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_v15f64: ; RV32ZVFH: # %bb.0: @@ -1126,8 +1102,6 @@ define <15 x double> @vp_floor_v15f64_unmasked(<15 x double> %va, i32 zeroext %e ret <15 x double> %v } -declare <16 x double> @llvm.vp.floor.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_v16f64: ; RV32ZVFH: # %bb.0: @@ -1276,8 +1250,6 @@ define <16 x double> @vp_floor_v16f64_unmasked(<16 x double> %va, i32 zeroext %e ret <16 x double> %v } -declare <32 x double> @llvm.vp.floor.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_v32f64: ; RV32ZVFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll index 2bf039bd0104a..da6e2fae93687 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.maximum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfmax_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v2f16: ; ZVFH: # %bb.0: @@ -76,8 +74,6 @@ define <2 x half> @vfmax_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 z ret <2 x half> %v } -declare <4 x half> @llvm.vp.maximum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfmax_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v4f16: ; ZVFH: # %bb.0: @@ -144,8 +140,6 @@ define <4 x half> @vfmax_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 z ret <4 x half> %v } -declare <8 x half> @llvm.vp.maximum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v8f16: ; ZVFH: # %bb.0: @@ -214,8 +208,6 @@ define <8 x half> @vfmax_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 z ret <8 x half> %v } -declare <16 x half> @llvm.vp.maximum.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v16f16: ; ZVFH: # %bb.0: @@ -286,8 +278,6 @@ define <16 x half> @vfmax_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i ret <16 x half> %v } -declare <2 x float> @llvm.vp.maximum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfmax_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v2f32: ; CHECK: # %bb.0: @@ -319,8 +309,6 @@ define <2 x float> @vfmax_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i3 ret <2 x float> %v } -declare <4 x float> @llvm.vp.maximum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfmax_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v4f32: ; CHECK: # %bb.0: @@ -352,8 +340,6 @@ define <4 x float> @vfmax_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i3 ret <4 x float> %v } -declare <8 x float> @llvm.vp.maximum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfmax_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v8f32: ; CHECK: # %bb.0: @@ -387,8 +373,6 @@ define <8 x float> @vfmax_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i3 ret <8 x float> %v } -declare <16 x float> @llvm.vp.maximum.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfmax_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v16f32: ; CHECK: # %bb.0: @@ -422,8 +406,6 @@ define <16 x float> @vfmax_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb ret <16 x float> %v } -declare <2 x double> @llvm.vp.maximum.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfmax_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v2f64: ; CHECK: # %bb.0: @@ -455,8 +437,6 @@ define <2 x double> @vfmax_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, ret <2 x double> %v } -declare <4 x double> @llvm.vp.maximum.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfmax_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v4f64: ; CHECK: # %bb.0: @@ -490,8 +470,6 @@ define <4 x double> @vfmax_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, ret <4 x double> %v } -declare <8 x double> @llvm.vp.maximum.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfmax_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v8f64: ; CHECK: # %bb.0: @@ -525,8 +503,6 @@ define <8 x double> @vfmax_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, ret <8 x double> %v } -declare <16 x double> @llvm.vp.maximum.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfmax_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v16f64: ; CHECK: # %bb.0: @@ -576,8 +552,6 @@ define <16 x double> @vfmax_vv_v16f64_unmasked(<16 x double> %va, <16 x double> ret <16 x double> %v } -declare <32 x double> @llvm.vp.maximum.v32f64(<32 x double>, <32 x double>, <32 x i1>, i32) - define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum.ll index e2cbdd3911ad5..6ee2e204bcfe3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.maximum.v2f16(<2 x half>, <2 x half>) - define <2 x half> @vfmax_v2f16_vv(<2 x half> %a, <2 x half> %b) { ; ZVFH-LABEL: vfmax_v2f16_vv: ; ZVFH: # %bb.0: @@ -41,8 +39,6 @@ define <2 x half> @vfmax_v2f16_vv(<2 x half> %a, <2 x half> %b) { ret <2 x half> %v } -declare <4 x half> @llvm.maximum.v4f16(<4 x half>, <4 x half>) - define <4 x half> @vfmax_v4f16_vv(<4 x half> %a, <4 x half> %b) { ; ZVFH-LABEL: vfmax_v4f16_vv: ; ZVFH: # %bb.0: @@ -74,8 +70,6 @@ define <4 x half> @vfmax_v4f16_vv(<4 x half> %a, <4 x half> %b) { ret <4 x half> %v } -declare <8 x half> @llvm.maximum.v8f16(<8 x half>, <8 x half>) - define <8 x half> @vfmax_v8f16_vv(<8 x half> %a, <8 x half> %b) { ; ZVFH-LABEL: vfmax_v8f16_vv: ; ZVFH: # %bb.0: @@ -107,8 +101,6 @@ define <8 x half> @vfmax_v8f16_vv(<8 x half> %a, <8 x half> %b) { ret <8 x half> %v } -declare <16 x half> @llvm.maximum.v16f16(<16 x half>, <16 x half>) - define <16 x half> @vfmax_v16f16_vv(<16 x half> %a, <16 x half> %b) { ; ZVFH-LABEL: vfmax_v16f16_vv: ; ZVFH: # %bb.0: @@ -140,8 +132,6 @@ define <16 x half> @vfmax_v16f16_vv(<16 x half> %a, <16 x half> %b) { ret <16 x half> %v } -declare <2 x float> @llvm.maximum.v2f32(<2 x float>, <2 x float>) - define <2 x float> @vfmax_v2f32_vv(<2 x float> %a, <2 x float> %b) { ; CHECK-LABEL: vfmax_v2f32_vv: ; CHECK: # %bb.0: @@ -156,8 +146,6 @@ define <2 x float> @vfmax_v2f32_vv(<2 x float> %a, <2 x float> %b) { ret <2 x float> %v } -declare <4 x float> @llvm.maximum.v4f32(<4 x float>, <4 x float>) - define <4 x float> @vfmax_v4f32_vv(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: vfmax_v4f32_vv: ; CHECK: # %bb.0: @@ -172,8 +160,6 @@ define <4 x float> @vfmax_v4f32_vv(<4 x float> %a, <4 x float> %b) { ret <4 x float> %v } -declare <8 x float> @llvm.maximum.v8f32(<8 x float>, <8 x float>) - define <8 x float> @vfmax_v8f32_vv(<8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: vfmax_v8f32_vv: ; CHECK: # %bb.0: @@ -188,8 +174,6 @@ define <8 x float> @vfmax_v8f32_vv(<8 x float> %a, <8 x float> %b) { ret <8 x float> %v } -declare <16 x float> @llvm.maximum.v16f32(<16 x float>, <16 x float>) - define <16 x float> @vfmax_v16f32_vv(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: vfmax_v16f32_vv: ; CHECK: # %bb.0: @@ -204,8 +188,6 @@ define <16 x float> @vfmax_v16f32_vv(<16 x float> %a, <16 x float> %b) { ret <16 x float> %v } -declare <2 x double> @llvm.maximum.v2f64(<2 x double>, <2 x double>) - define <2 x double> @vfmax_v2f64_vv(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: vfmax_v2f64_vv: ; CHECK: # %bb.0: @@ -220,8 +202,6 @@ define <2 x double> @vfmax_v2f64_vv(<2 x double> %a, <2 x double> %b) { ret <2 x double> %v } -declare <4 x double> @llvm.maximum.v4f64(<4 x double>, <4 x double>) - define <4 x double> @vfmax_v4f64_vv(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: vfmax_v4f64_vv: ; CHECK: # %bb.0: @@ -236,8 +216,6 @@ define <4 x double> @vfmax_v4f64_vv(<4 x double> %a, <4 x double> %b) { ret <4 x double> %v } -declare <8 x double> @llvm.maximum.v8f64(<8 x double>, <8 x double>) - define <8 x double> @vfmax_v8f64_vv(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: vfmax_v8f64_vv: ; CHECK: # %bb.0: @@ -252,8 +230,6 @@ define <8 x double> @vfmax_v8f64_vv(<8 x double> %a, <8 x double> %b) { ret <8 x double> %v } -declare <16 x double> @llvm.maximum.v16f64(<16 x double>, <16 x double>) - define <16 x double> @vfmax_v16f64_vv(<16 x double> %a, <16 x double> %b) nounwind { ; CHECK-LABEL: vfmax_v16f64_vv: ; CHECK: # %bb.0: @@ -358,8 +334,6 @@ define <2 x half> @vfmax_v2f16_vv_nnanb(<2 x half> %a, <2 x half> %b) { ret <2 x half> %v } -declare <4 x half> @llvm.vector.insert.v2f32.v4f32(<4 x half>, <2 x half>, i64) - define <4 x half> @vfmax_v2f16_vv_nnan_insert_subvector(<2 x half> %a, <2 x half> %b, <4 x half> %c) { ; ZVFH-LABEL: vfmax_v2f16_vv_nnan_insert_subvector: ; ZVFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll index 73d83e86af4c6..e179970199171 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.minimum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v2f16: ; ZVFH: # %bb.0: @@ -76,8 +74,6 @@ define <2 x half> @vfmin_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 z ret <2 x half> %v } -declare <4 x half> @llvm.vp.minimum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v4f16: ; ZVFH: # %bb.0: @@ -144,8 +140,6 @@ define <4 x half> @vfmin_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 z ret <4 x half> %v } -declare <8 x half> @llvm.vp.minimum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v8f16: ; ZVFH: # %bb.0: @@ -214,8 +208,6 @@ define <8 x half> @vfmin_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 z ret <8 x half> %v } -declare <16 x half> @llvm.vp.minimum.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v16f16: ; ZVFH: # %bb.0: @@ -286,8 +278,6 @@ define <16 x half> @vfmin_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i ret <16 x half> %v } -declare <2 x float> @llvm.vp.minimum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfmin_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v2f32: ; CHECK: # %bb.0: @@ -319,8 +309,6 @@ define <2 x float> @vfmin_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i3 ret <2 x float> %v } -declare <4 x float> @llvm.vp.minimum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v4f32: ; CHECK: # %bb.0: @@ -352,8 +340,6 @@ define <4 x float> @vfmin_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i3 ret <4 x float> %v } -declare <8 x float> @llvm.vp.minimum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfmin_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v8f32: ; CHECK: # %bb.0: @@ -387,8 +373,6 @@ define <8 x float> @vfmin_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i3 ret <8 x float> %v } -declare <16 x float> @llvm.vp.minimum.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfmin_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v16f32: ; CHECK: # %bb.0: @@ -422,8 +406,6 @@ define <16 x float> @vfmin_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb ret <16 x float> %v } -declare <2 x double> @llvm.vp.minimum.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfmin_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v2f64: ; CHECK: # %bb.0: @@ -455,8 +437,6 @@ define <2 x double> @vfmin_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, ret <2 x double> %v } -declare <4 x double> @llvm.vp.minimum.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfmin_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v4f64: ; CHECK: # %bb.0: @@ -490,8 +470,6 @@ define <4 x double> @vfmin_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, ret <4 x double> %v } -declare <8 x double> @llvm.vp.minimum.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfmin_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v8f64: ; CHECK: # %bb.0: @@ -525,8 +503,6 @@ define <8 x double> @vfmin_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, ret <8 x double> %v } -declare <16 x double> @llvm.vp.minimum.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v16f64: ; CHECK: # %bb.0: @@ -576,8 +552,6 @@ define <16 x double> @vfmin_vv_v16f64_unmasked(<16 x double> %va, <16 x double> ret <16 x double> %v } -declare <32 x double> @llvm.vp.minimum.v32f64(<32 x double>, <32 x double>, <32 x i1>, i32) - define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum.ll index 9c75af359a4cb..a95177a1de9a6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.minimum.v2f16(<2 x half>, <2 x half>) - define <2 x half> @vfmin_v2f16_vv(<2 x half> %a, <2 x half> %b) { ; ZVFH-LABEL: vfmin_v2f16_vv: ; ZVFH: # %bb.0: @@ -41,8 +39,6 @@ define <2 x half> @vfmin_v2f16_vv(<2 x half> %a, <2 x half> %b) { ret <2 x half> %v } -declare <4 x half> @llvm.minimum.v4f16(<4 x half>, <4 x half>) - define <4 x half> @vfmin_v4f16_vv(<4 x half> %a, <4 x half> %b) { ; ZVFH-LABEL: vfmin_v4f16_vv: ; ZVFH: # %bb.0: @@ -74,8 +70,6 @@ define <4 x half> @vfmin_v4f16_vv(<4 x half> %a, <4 x half> %b) { ret <4 x half> %v } -declare <8 x half> @llvm.minimum.v8f16(<8 x half>, <8 x half>) - define <8 x half> @vfmin_v8f16_vv(<8 x half> %a, <8 x half> %b) { ; ZVFH-LABEL: vfmin_v8f16_vv: ; ZVFH: # %bb.0: @@ -107,8 +101,6 @@ define <8 x half> @vfmin_v8f16_vv(<8 x half> %a, <8 x half> %b) { ret <8 x half> %v } -declare <16 x half> @llvm.minimum.v16f16(<16 x half>, <16 x half>) - define <16 x half> @vfmin_v16f16_vv(<16 x half> %a, <16 x half> %b) { ; ZVFH-LABEL: vfmin_v16f16_vv: ; ZVFH: # %bb.0: @@ -140,8 +132,6 @@ define <16 x half> @vfmin_v16f16_vv(<16 x half> %a, <16 x half> %b) { ret <16 x half> %v } -declare <2 x float> @llvm.minimum.v2f32(<2 x float>, <2 x float>) - define <2 x float> @vfmin_v2f32_vv(<2 x float> %a, <2 x float> %b) { ; CHECK-LABEL: vfmin_v2f32_vv: ; CHECK: # %bb.0: @@ -156,8 +146,6 @@ define <2 x float> @vfmin_v2f32_vv(<2 x float> %a, <2 x float> %b) { ret <2 x float> %v } -declare <4 x float> @llvm.minimum.v4f32(<4 x float>, <4 x float>) - define <4 x float> @vfmin_v4f32_vv(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: vfmin_v4f32_vv: ; CHECK: # %bb.0: @@ -172,8 +160,6 @@ define <4 x float> @vfmin_v4f32_vv(<4 x float> %a, <4 x float> %b) { ret <4 x float> %v } -declare <8 x float> @llvm.minimum.v8f32(<8 x float>, <8 x float>) - define <8 x float> @vfmin_v8f32_vv(<8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: vfmin_v8f32_vv: ; CHECK: # %bb.0: @@ -188,8 +174,6 @@ define <8 x float> @vfmin_v8f32_vv(<8 x float> %a, <8 x float> %b) { ret <8 x float> %v } -declare <16 x float> @llvm.minimum.v16f32(<16 x float>, <16 x float>) - define <16 x float> @vfmin_v16f32_vv(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: vfmin_v16f32_vv: ; CHECK: # %bb.0: @@ -204,8 +188,6 @@ define <16 x float> @vfmin_v16f32_vv(<16 x float> %a, <16 x float> %b) { ret <16 x float> %v } -declare <2 x double> @llvm.minimum.v2f64(<2 x double>, <2 x double>) - define <2 x double> @vfmin_v2f64_vv(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: vfmin_v2f64_vv: ; CHECK: # %bb.0: @@ -220,8 +202,6 @@ define <2 x double> @vfmin_v2f64_vv(<2 x double> %a, <2 x double> %b) { ret <2 x double> %v } -declare <4 x double> @llvm.minimum.v4f64(<4 x double>, <4 x double>) - define <4 x double> @vfmin_v4f64_vv(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: vfmin_v4f64_vv: ; CHECK: # %bb.0: @@ -236,8 +216,6 @@ define <4 x double> @vfmin_v4f64_vv(<4 x double> %a, <4 x double> %b) { ret <4 x double> %v } -declare <8 x double> @llvm.minimum.v8f64(<8 x double>, <8 x double>) - define <8 x double> @vfmin_v8f64_vv(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: vfmin_v8f64_vv: ; CHECK: # %bb.0: @@ -252,8 +230,6 @@ define <8 x double> @vfmin_v8f64_vv(<8 x double> %a, <8 x double> %b) { ret <8 x double> %v } -declare <16 x double> @llvm.minimum.v16f64(<16 x double>, <16 x double>) - define <16 x double> @vfmin_v16f64_vv(<16 x double> %a, <16 x double> %b) nounwind { ; CHECK-LABEL: vfmin_v16f64_vv: ; CHECK: # %bb.0: @@ -358,8 +334,6 @@ define <2 x half> @vfmin_v2f16_vv_nnanb(<2 x half> %a, <2 x half> %b) { ret <2 x half> %v } -declare <4 x half> @llvm.vector.insert.v2f32.v4f32(<4 x half>, <2 x half>, i64) - define <4 x half> @vfmin_v2f16_vv_nnan_insert_subvector(<2 x half> %a, <2 x half> %b, <4 x half> %c) { ; ZVFH-LABEL: vfmin_v2f16_vv_nnan_insert_subvector: ; ZVFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll index dd1b99bee6d55..8485eb8ac1caa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s -declare <2 x half> @llvm.experimental.constrained.nearbyint.v2f16(<2 x half>, metadata, metadata) - define <2 x half> @nearbyint_v2f16(<2 x half> %v) strictfp { ; CHECK-LABEL: nearbyint_v2f16: ; CHECK: # %bb.0: @@ -29,8 +27,6 @@ define <2 x half> @nearbyint_v2f16(<2 x half> %v) strictfp { ret <2 x half> %r } -declare <4 x half> @llvm.experimental.constrained.nearbyint.v4f16(<4 x half>, metadata, metadata) - define <4 x half> @nearbyint_v4f16(<4 x half> %v) strictfp { ; CHECK-LABEL: nearbyint_v4f16: ; CHECK: # %bb.0: @@ -54,8 +50,6 @@ define <4 x half> @nearbyint_v4f16(<4 x half> %v) strictfp { ret <4 x half> %r } -declare <8 x half> @llvm.experimental.constrained.nearbyint.v8f16(<8 x half>, metadata, metadata) - define <8 x half> @nearbyint_v8f16(<8 x half> %v) strictfp { ; CHECK-LABEL: nearbyint_v8f16: ; CHECK: # %bb.0: @@ -79,8 +73,6 @@ define <8 x half> @nearbyint_v8f16(<8 x half> %v) strictfp { ret <8 x half> %r } -declare <16 x half> @llvm.experimental.constrained.nearbyint.v16f16(<16 x half>, metadata, metadata) - define <16 x half> @nearbyint_v16f16(<16 x half> %v) strictfp { ; CHECK-LABEL: nearbyint_v16f16: ; CHECK: # %bb.0: @@ -104,8 +96,6 @@ define <16 x half> @nearbyint_v16f16(<16 x half> %v) strictfp { ret <16 x half> %r } -declare <32 x half> @llvm.experimental.constrained.nearbyint.v32f16(<32 x half>, metadata, metadata) - define <32 x half> @nearbyint_v32f16(<32 x half> %v) strictfp { ; CHECK-LABEL: nearbyint_v32f16: ; CHECK: # %bb.0: @@ -130,8 +120,6 @@ define <32 x half> @nearbyint_v32f16(<32 x half> %v) strictfp { ret <32 x half> %r } -declare <2 x float> @llvm.experimental.constrained.nearbyint.v2f32(<2 x float>, metadata, metadata) - define <2 x float> @nearbyint_v2f32(<2 x float> %v) strictfp { ; CHECK-LABEL: nearbyint_v2f32: ; CHECK: # %bb.0: @@ -154,8 +142,6 @@ define <2 x float> @nearbyint_v2f32(<2 x float> %v) strictfp { ret <2 x float> %r } -declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata) - define <4 x float> @nearbyint_v4f32(<4 x float> %v) strictfp { ; CHECK-LABEL: nearbyint_v4f32: ; CHECK: # %bb.0: @@ -178,8 +164,6 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %v) strictfp { ret <4 x float> %r } -declare <8 x float> @llvm.experimental.constrained.nearbyint.v8f32(<8 x float>, metadata, metadata) - define <8 x float> @nearbyint_v8f32(<8 x float> %v) strictfp { ; CHECK-LABEL: nearbyint_v8f32: ; CHECK: # %bb.0: @@ -202,8 +186,6 @@ define <8 x float> @nearbyint_v8f32(<8 x float> %v) strictfp { ret <8 x float> %r } -declare <16 x float> @llvm.experimental.constrained.nearbyint.v16f32(<16 x float>, metadata, metadata) - define <16 x float> @nearbyint_v16f32(<16 x float> %v) strictfp { ; CHECK-LABEL: nearbyint_v16f32: ; CHECK: # %bb.0: @@ -226,8 +208,6 @@ define <16 x float> @nearbyint_v16f32(<16 x float> %v) strictfp { ret <16 x float> %r } -declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata) - define <2 x double> @nearbyint_v2f64(<2 x double> %v) strictfp { ; RV32-LABEL: nearbyint_v2f64: ; RV32: # %bb.0: @@ -269,8 +249,6 @@ define <2 x double> @nearbyint_v2f64(<2 x double> %v) strictfp { ret <2 x double> %r } -declare <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(<4 x double>, metadata, metadata) - define <4 x double> @nearbyint_v4f64(<4 x double> %v) strictfp { ; RV32-LABEL: nearbyint_v4f64: ; RV32: # %bb.0: @@ -312,8 +290,6 @@ define <4 x double> @nearbyint_v4f64(<4 x double> %v) strictfp { ret <4 x double> %r } -declare <8 x double> @llvm.experimental.constrained.nearbyint.v8f64(<8 x double>, metadata, metadata) - define <8 x double> @nearbyint_v8f64(<8 x double> %v) strictfp { ; RV32-LABEL: nearbyint_v8f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll index 1f9a8bf8133c8..0788d0a719e11 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll @@ -17,7 +17,6 @@ define void @fp2si_v2f32_v2i32(ptr %x, ptr %y) { store <2 x i32> %d, ptr %y ret void } -declare <2 x i32> @llvm.fptosi.sat.v2i32.v2f32(<2 x float>) define void @fp2ui_v2f32_v2i32(ptr %x, ptr %y) { ; CHECK-LABEL: fp2ui_v2f32_v2i32: @@ -34,7 +33,6 @@ define void @fp2ui_v2f32_v2i32(ptr %x, ptr %y) { store <2 x i32> %d, ptr %y ret void } -declare <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float>) define void @fp2si_v8f32_v8i32(ptr %x, ptr %y) { ; @@ -52,7 +50,6 @@ define void @fp2si_v8f32_v8i32(ptr %x, ptr %y) { store <8 x i32> %d, ptr %y ret void } -declare <8 x i32> @llvm.fptosi.sat.v8i32.v8f32(<8 x float>) define void @fp2ui_v8f32_v8i32(ptr %x, ptr %y) { ; @@ -70,7 +67,6 @@ define void @fp2ui_v8f32_v8i32(ptr %x, ptr %y) { store <8 x i32> %d, ptr %y ret void } -declare <8 x i32> @llvm.fptoui.sat.v8i32.v8f32(<8 x float>) define void @fp2si_v2f32_v2i64(ptr %x, ptr %y) { ; CHECK-LABEL: fp2si_v2f32_v2i64: @@ -88,7 +84,6 @@ define void @fp2si_v2f32_v2i64(ptr %x, ptr %y) { store <2 x i64> %d, ptr %y ret void } -declare <2 x i64> @llvm.fptosi.sat.v2i64.v2f32(<2 x float>) define void @fp2ui_v2f32_v2i64(ptr %x, ptr %y) { ; CHECK-LABEL: fp2ui_v2f32_v2i64: @@ -106,7 +101,6 @@ define void @fp2ui_v2f32_v2i64(ptr %x, ptr %y) { store <2 x i64> %d, ptr %y ret void } -declare <2 x i64> @llvm.fptoui.sat.v2i64.v2f32(<2 x float>) define void @fp2si_v8f32_v8i64(ptr %x, ptr %y) { ; @@ -125,7 +119,6 @@ define void @fp2si_v8f32_v8i64(ptr %x, ptr %y) { store <8 x i64> %d, ptr %y ret void } -declare <8 x i64> @llvm.fptosi.sat.v8i64.v8f32(<8 x float>) define void @fp2ui_v8f32_v8i64(ptr %x, ptr %y) { ; @@ -144,7 +137,6 @@ define void @fp2ui_v8f32_v8i64(ptr %x, ptr %y) { store <8 x i64> %d, ptr %y ret void } -declare <8 x i64> @llvm.fptoui.sat.v8i64.v8f32(<8 x float>) define void @fp2si_v2f16_v2i64(ptr %x, ptr %y) { ; CHECK-LABEL: fp2si_v2f16_v2i64: @@ -164,7 +156,6 @@ define void @fp2si_v2f16_v2i64(ptr %x, ptr %y) { store <2 x i64> %d, ptr %y ret void } -declare <2 x i64> @llvm.fptosi.sat.v2i64.v2f16(<2 x half>) define void @fp2ui_v2f16_v2i64(ptr %x, ptr %y) { ; CHECK-LABEL: fp2ui_v2f16_v2i64: @@ -184,7 +175,6 @@ define void @fp2ui_v2f16_v2i64(ptr %x, ptr %y) { store <2 x i64> %d, ptr %y ret void } -declare <2 x i64> @llvm.fptoui.sat.v2i64.v2f16(<2 x half>) define void @fp2si_v2f64_v2i8(ptr %x, ptr %y) { ; CHECK-LABEL: fp2si_v2f64_v2i8: @@ -206,7 +196,6 @@ define void @fp2si_v2f64_v2i8(ptr %x, ptr %y) { store <2 x i8> %d, ptr %y ret void } -declare <2 x i8> @llvm.fptosi.sat.v2i8.v2f64(<2 x double>) define void @fp2ui_v2f64_v2i8(ptr %x, ptr %y) { ; CHECK-LABEL: fp2ui_v2f64_v2i8: @@ -228,7 +217,6 @@ define void @fp2ui_v2f64_v2i8(ptr %x, ptr %y) { store <2 x i8> %d, ptr %y ret void } -declare <2 x i8> @llvm.fptoui.sat.v2i8.v2f64(<2 x double>) define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) { ; @@ -251,7 +239,6 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) { store <8 x i8> %d, ptr %y ret void } -declare <8 x i8> @llvm.fptosi.sat.v8i8.v8f64(<8 x double>) define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) { ; @@ -274,7 +261,6 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) { store <8 x i8> %d, ptr %y ret void } -declare <8 x i8> @llvm.fptoui.sat.v8i8.v8f64(<8 x double> %a) define void @fp2si_v2f64_v2i32(ptr %x, ptr %y) { ; CHECK-LABEL: fp2si_v2f64_v2i32: @@ -292,7 +278,6 @@ define void @fp2si_v2f64_v2i32(ptr %x, ptr %y) { store <2 x i32> %d, ptr %y ret void } -declare <2 x i32> @llvm.fptosi.sat.v2i32.v2f64(<2 x double>) define void @fp2ui_v2f64_v2i32(ptr %x, ptr %y) { ; CHECK-LABEL: fp2ui_v2f64_v2i32: @@ -310,7 +295,6 @@ define void @fp2ui_v2f64_v2i32(ptr %x, ptr %y) { store <2 x i32> %d, ptr %y ret void } -declare <2 x i32> @llvm.fptoui.sat.v2i32.v2f64(<2 x double>) ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; RV32: {{.*}} ; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll index adb9016b30d23..465b166826a37 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s -declare <2 x float> @llvm.vp.fpext.v2f32.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x float> @vfpext_v2f16_v2f32(<2 x half> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v2f16_v2f32: ; CHECK: # %bb.0: @@ -28,8 +26,6 @@ define <2 x float> @vfpext_v2f16_v2f32_unmasked(<2 x half> %a, i32 zeroext %vl) ret <2 x float> %v } -declare <2 x double> @llvm.vp.fpext.v2f64.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x double> @vfpext_v2f16_v2f64(<2 x half> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v2f16_v2f64: ; CHECK: # %bb.0: @@ -54,8 +50,6 @@ define <2 x double> @vfpext_v2f16_v2f64_unmasked(<2 x half> %a, i32 zeroext %vl) ret <2 x double> %v } -declare <2 x double> @llvm.vp.fpext.v2f64.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x double> @vfpext_v2f32_v2f64(<2 x float> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v2f32_v2f64: ; CHECK: # %bb.0: @@ -78,8 +72,6 @@ define <2 x double> @vfpext_v2f32_v2f64_unmasked(<2 x float> %a, i32 zeroext %vl ret <2 x double> %v } -declare <15 x double> @llvm.vp.fpext.v15f64.v15f32(<15 x float>, <15 x i1>, i32) - define <15 x double> @vfpext_v15f32_v15f64(<15 x float> %a, <15 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v15f32_v15f64: ; CHECK: # %bb.0: @@ -91,8 +83,6 @@ define <15 x double> @vfpext_v15f32_v15f64(<15 x float> %a, <15 x i1> %m, i32 ze ret <15 x double> %v } -declare <32 x double> @llvm.vp.fpext.v32f64.v32f32(<32 x float>, <32 x i1>, i32) - define <32 x double> @vfpext_v32f32_v32f64(<32 x float> %a, <32 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v32f32_v32f64: ; CHECK: # %bb.0: @@ -122,8 +112,6 @@ define <32 x double> @vfpext_v32f32_v32f64(<32 x float> %a, <32 x i1> %m, i32 ze ret <32 x double> %v } -declare <2 x float> @llvm.vp.fpext.v2f32.v2bf16(<2 x bfloat>, <2 x i1>, i32) - define <2 x float> @vfpext_v2bf16_v2f32(<2 x bfloat> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v2bf16_v2f32: ; CHECK: # %bb.0: @@ -146,8 +134,6 @@ define <2 x float> @vfpext_v2bf16_v2f32_unmasked(<2 x bfloat> %a, i32 zeroext %v ret <2 x float> %v } -declare <2 x double> @llvm.vp.fpext.v2f64.v2bf16(<2 x bfloat>, <2 x i1>, i32) - define <2 x double> @vfpext_v2bf16_v2f64(<2 x bfloat> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v2bf16_v2f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpowi.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpowi.ll index 71d6af6ea34c2..4b05de75dbfe3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpowi.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpowi.ll @@ -34,7 +34,6 @@ define <1 x float> @powi_v1f32(<1 x float> %x, i32 %y) nounwind { %a = call <1 x float> @llvm.powi.v1f32.i32(<1 x float> %x, i32 %y) ret <1 x float> %a } -declare <1 x float> @llvm.powi.v1f32.i32(<1 x float>, i32) define <2 x float> @powi_v2f32(<2 x float> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v2f32: @@ -100,7 +99,6 @@ define <2 x float> @powi_v2f32(<2 x float> %x, i32 %y) nounwind { %a = call <2 x float> @llvm.powi.v2f32.i32(<2 x float> %x, i32 %y) ret <2 x float> %a } -declare <2 x float> @llvm.powi.v2f32.i32(<2 x float>, i32) define <3 x float> @powi_v3f32(<3 x float> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v3f32: @@ -208,7 +206,6 @@ define <3 x float> @powi_v3f32(<3 x float> %x, i32 %y) nounwind { %a = call <3 x float> @llvm.powi.v3f32.i32(<3 x float> %x, i32 %y) ret <3 x float> %a } -declare <3 x float> @llvm.powi.v3f32.i32(<3 x float>, i32) define <4 x float> @powi_v4f32(<4 x float> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v4f32: @@ -340,7 +337,6 @@ define <4 x float> @powi_v4f32(<4 x float> %x, i32 %y) nounwind { %a = call <4 x float> @llvm.powi.v4f32.i32(<4 x float> %x, i32 %y) ret <4 x float> %a } -declare <4 x float> @llvm.powi.v4f32.i32(<4 x float>, i32) define <8 x float> @powi_v8f32(<8 x float> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v8f32: @@ -602,7 +598,6 @@ define <8 x float> @powi_v8f32(<8 x float> %x, i32 %y) nounwind { %a = call <8 x float> @llvm.powi.v8f32.i32(<8 x float> %x, i32 %y) ret <8 x float> %a } -declare <8 x float> @llvm.powi.v8f32.i32(<8 x float>, i32) define <16 x float> @powi_v16f32(<16 x float> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v16f32: @@ -851,7 +846,6 @@ define <16 x float> @powi_v16f32(<16 x float> %x, i32 %y) nounwind { %a = call <16 x float> @llvm.powi.v16f32.i32(<16 x float> %x, i32 %y) ret <16 x float> %a } -declare <16 x float> @llvm.powi.v16f32.i32(<16 x float>, i32) define <1 x double> @powi_v1f64(<1 x double> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v1f64: @@ -883,7 +877,6 @@ define <1 x double> @powi_v1f64(<1 x double> %x, i32 %y) nounwind { %a = call <1 x double> @llvm.powi.v1f64.i32(<1 x double> %x, i32 %y) ret <1 x double> %a } -declare <1 x double> @llvm.powi.v1f64.i32(<1 x double>, i32) define <2 x double> @powi_v2f64(<2 x double> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v2f64: @@ -949,7 +942,6 @@ define <2 x double> @powi_v2f64(<2 x double> %x, i32 %y) nounwind { %a = call <2 x double> @llvm.powi.v2f64.i32(<2 x double> %x, i32 %y) ret <2 x double> %a } -declare <2 x double> @llvm.powi.v2f64.i32(<2 x double>, i32) define <4 x double> @powi_v4f64(<4 x double> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v4f64: @@ -1095,7 +1087,6 @@ define <4 x double> @powi_v4f64(<4 x double> %x, i32 %y) nounwind { %a = call <4 x double> @llvm.powi.v4f64.i32(<4 x double> %x, i32 %y) ret <4 x double> %a } -declare <4 x double> @llvm.powi.v4f64.i32(<4 x double>, i32) define <8 x double> @powi_v8f64(<8 x double> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v8f64: @@ -1248,4 +1239,3 @@ define <8 x double> @powi_v8f64(<8 x double> %x, i32 %y) nounwind { %a = call <8 x double> @llvm.powi.v8f64.i32(<8 x double> %x, i32 %y) ret <8 x double> %a } -declare <8 x double> @llvm.powi.v8f64.i32(<8 x double>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll index bc86be6f62fd1..53dbbedc9a055 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <4 x i1> @llvm.vp.fptosi.v4i1.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i1> @vfptosi_v4i1_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_v4i1_v4f16: ; ZVFH: # %bb.0: @@ -46,8 +44,6 @@ define <4 x i1> @vfptosi_v4i1_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x i1> %v } -declare <4 x i1> @llvm.vp.fptosi.v4i1.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i1> @vfptosi_v4i1_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i1_v4f32: ; CHECK: # %bb.0: @@ -70,8 +66,6 @@ define <4 x i1> @vfptosi_v4i1_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) ret <4 x i1> %v } -declare <4 x i1> @llvm.vp.fptosi.v4i1.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i1> @vfptosi_v4i1_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i1_v4f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll index cbc4c69669b51..96eda109e1c70 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <4 x i7> @llvm.vp.fptosi.v4i7.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i7> @vfptosi_v4i7_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_v4i7_v4f16: ; ZVFH: # %bb.0: @@ -27,8 +25,6 @@ define <4 x i7> @vfptosi_v4i7_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %ev ret <4 x i7> %v } -declare <4 x i8> @llvm.vp.fptosi.v4i8.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i8> @vfptosi_v4i8_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_v4i8_v4f16: ; ZVFH: # %bb.0: @@ -71,8 +67,6 @@ define <4 x i8> @vfptosi_v4i8_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i16> @vfptosi_v4i16_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_v4i16_v4f16: ; ZVFH: # %bb.0: @@ -109,8 +103,6 @@ define <4 x i16> @vfptosi_v4i16_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.fptosi.v4i32.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i32> @vfptosi_v4i32_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_v4i32_v4f16: ; ZVFH: # %bb.0: @@ -149,8 +141,6 @@ define <4 x i32> @vfptosi_v4i32_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i64> @vfptosi_v4i64_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_v4i64_v4f16: ; ZVFH: # %bb.0: @@ -191,8 +181,6 @@ define <4 x i64> @vfptosi_v4i64_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) ret <4 x i64> %v } -declare <4 x i8> @llvm.vp.fptosi.v4i8.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i8> @vfptosi_v4i8_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i8_v4f32: ; CHECK: # %bb.0: @@ -217,8 +205,6 @@ define <4 x i8> @vfptosi_v4i8_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) ret <4 x i8> %v } -declare <4 x i16> @llvm.vp.fptosi.v4i16.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i16> @vfptosi_v4i16_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i16_v4f32: ; CHECK: # %bb.0: @@ -241,8 +227,6 @@ define <4 x i16> @vfptosi_v4i16_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.fptosi.v4i32.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i32> @vfptosi_v4i32_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i32_v4f32: ; CHECK: # %bb.0: @@ -263,8 +247,6 @@ define <4 x i32> @vfptosi_v4i32_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.fptosi.v4i64.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i64> @vfptosi_v4i64_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i64_v4f32: ; CHECK: # %bb.0: @@ -287,8 +269,6 @@ define <4 x i64> @vfptosi_v4i64_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl ret <4 x i64> %v } -declare <4 x i8> @llvm.vp.fptosi.v4i8.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i8> @vfptosi_v4i8_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i8_v4f64: ; CHECK: # %bb.0: @@ -317,8 +297,6 @@ define <4 x i8> @vfptosi_v4i8_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) ret <4 x i8> %v } -declare <4 x i16> @llvm.vp.fptosi.v4i16.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i16> @vfptosi_v4i16_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i16_v4f64: ; CHECK: # %bb.0: @@ -343,8 +321,6 @@ define <4 x i16> @vfptosi_v4i16_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.fptosi.v4i32.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i32> @vfptosi_v4i32_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i32_v4f64: ; CHECK: # %bb.0: @@ -367,8 +343,6 @@ define <4 x i32> @vfptosi_v4i32_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.fptosi.v4i64.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i64> @vfptosi_v4i64_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i64_v4f64: ; CHECK: # %bb.0: @@ -389,8 +363,6 @@ define <4 x i64> @vfptosi_v4i64_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev ret <4 x i64> %v } -declare <32 x i64> @llvm.vp.fptosi.v32i64.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x i64> @vfptosi_v32i64_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v32i64_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll index c41f14076db31..55fccb5db1aee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <4 x i1> @llvm.vp.fptoui.v4i1.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i1> @vfptoui_v4i1_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_v4i1_v4f16: ; ZVFH: # %bb.0: @@ -46,8 +44,6 @@ define <4 x i1> @vfptoui_v4i1_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x i1> %v } -declare <4 x i1> @llvm.vp.fptoui.v4i1.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i1> @vfptoui_v4i1_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i1_v4f32: ; CHECK: # %bb.0: @@ -70,8 +66,6 @@ define <4 x i1> @vfptoui_v4i1_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) ret <4 x i1> %v } -declare <4 x i1> @llvm.vp.fptoui.v4i1.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i1> @vfptoui_v4i1_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i1_v4f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll index 5dd3e0372f401..4020100bf364b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <4 x i7> @llvm.vp.fptoui.v4i7.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i7> @vfptoui_v4i7_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_v4i7_v4f16: ; ZVFH: # %bb.0: @@ -27,8 +25,6 @@ define <4 x i7> @vfptoui_v4i7_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %ev ret <4 x i7> %v } -declare <4 x i8> @llvm.vp.fptoui.v4i8.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i8> @vfptoui_v4i8_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_v4i8_v4f16: ; ZVFH: # %bb.0: @@ -71,8 +67,6 @@ define <4 x i8> @vfptoui_v4i8_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <4 x i16> @llvm.vp.fptoui.v4i16.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i16> @vfptoui_v4i16_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_v4i16_v4f16: ; ZVFH: # %bb.0: @@ -109,8 +103,6 @@ define <4 x i16> @vfptoui_v4i16_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.fptoui.v4i32.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i32> @vfptoui_v4i32_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_v4i32_v4f16: ; ZVFH: # %bb.0: @@ -149,8 +141,6 @@ define <4 x i32> @vfptoui_v4i32_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.fptoui.v4i64.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i64> @vfptoui_v4i64_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_v4i64_v4f16: ; ZVFH: # %bb.0: @@ -191,8 +181,6 @@ define <4 x i64> @vfptoui_v4i64_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) ret <4 x i64> %v } -declare <4 x i8> @llvm.vp.fptoui.v4i8.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i8> @vfptoui_v4i8_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i8_v4f32: ; CHECK: # %bb.0: @@ -217,8 +205,6 @@ define <4 x i8> @vfptoui_v4i8_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) ret <4 x i8> %v } -declare <4 x i16> @llvm.vp.fptoui.v4i16.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i16> @vfptoui_v4i16_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i16_v4f32: ; CHECK: # %bb.0: @@ -241,8 +227,6 @@ define <4 x i16> @vfptoui_v4i16_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.fptoui.v4i32.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i32> @vfptoui_v4i32_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i32_v4f32: ; CHECK: # %bb.0: @@ -263,8 +247,6 @@ define <4 x i32> @vfptoui_v4i32_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.fptoui.v4i64.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i64> @vfptoui_v4i64_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i64_v4f32: ; CHECK: # %bb.0: @@ -287,8 +269,6 @@ define <4 x i64> @vfptoui_v4i64_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl ret <4 x i64> %v } -declare <4 x i8> @llvm.vp.fptoui.v4i8.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i8> @vfptoui_v4i8_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i8_v4f64: ; CHECK: # %bb.0: @@ -317,8 +297,6 @@ define <4 x i8> @vfptoui_v4i8_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) ret <4 x i8> %v } -declare <4 x i16> @llvm.vp.fptoui.v4i16.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i16> @vfptoui_v4i16_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i16_v4f64: ; CHECK: # %bb.0: @@ -343,8 +321,6 @@ define <4 x i16> @vfptoui_v4i16_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.fptoui.v4i32.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i32> @vfptoui_v4i32_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i32_v4f64: ; CHECK: # %bb.0: @@ -367,8 +343,6 @@ define <4 x i32> @vfptoui_v4i32_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.fptoui.v4i64.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i64> @vfptoui_v4i64_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i64_v4f64: ; CHECK: # %bb.0: @@ -389,8 +363,6 @@ define <4 x i64> @vfptoui_v4i64_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev ret <4 x i64> %v } -declare <32 x i64> @llvm.vp.fptoui.v32i64.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x i64> @vfptoui_v32i64_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v32i64_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll index 13891cb84e0f2..e509722b623a2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll @@ -4,9 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s - -declare <2 x half> @llvm.vp.fptrunc.v2f16.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x half> @vfptrunc_v2f16_v2f32(<2 x float> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v2f16_v2f32: ; CHECK: # %bb.0: @@ -29,8 +26,6 @@ define <2 x half> @vfptrunc_v2f16_v2f32_unmasked(<2 x float> %a, i32 zeroext %vl ret <2 x half> %v } -declare <2 x half> @llvm.vp.fptrunc.v2f16.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x half> @vfptrunc_v2f16_v2f64(<2 x double> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v2f16_v2f64: ; CHECK: # %bb.0: @@ -55,8 +50,6 @@ define <2 x half> @vfptrunc_v2f16_v2f64_unmasked(<2 x double> %a, i32 zeroext %v ret <2 x half> %v } -declare <2 x float> @llvm.vp.fptrunc.v2f64.v2f32(<2 x double>, <2 x i1>, i32) - define <2 x float> @vfptrunc_v2f32_v2f64(<2 x double> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v2f32_v2f64: ; CHECK: # %bb.0: @@ -79,8 +72,6 @@ define <2 x float> @vfptrunc_v2f32_v2f64_unmasked(<2 x double> %a, i32 zeroext % ret <2 x float> %v } -declare <15 x float> @llvm.vp.fptrunc.v15f64.v15f32(<15 x double>, <15 x i1>, i32) - define <15 x float> @vfptrunc_v15f32_v15f64(<15 x double> %a, <15 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v15f32_v15f64: ; CHECK: # %bb.0: @@ -92,8 +83,6 @@ define <15 x float> @vfptrunc_v15f32_v15f64(<15 x double> %a, <15 x i1> %m, i32 ret <15 x float> %v } -declare <32 x float> @llvm.vp.fptrunc.v32f64.v32f32(<32 x double>, <32 x i1>, i32) - define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v32f32_v32f64: ; CHECK: # %bb.0: @@ -123,8 +112,6 @@ define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32 ret <32 x float> %v } -declare <2 x bfloat> @llvm.vp.fptrunc.v2bf16.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x bfloat> @vfptrunc_v2bf16_v2f32(<2 x float> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v2bf16_v2f32: ; CHECK: # %bb.0: @@ -147,8 +134,6 @@ define <2 x bfloat> @vfptrunc_v2bf16_v2f32_unmasked(<2 x float> %a, i32 zeroext ret <2 x bfloat> %v } -declare <2 x bfloat> @llvm.vp.fptrunc.v2bf16.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x bfloat> @vfptrunc_v2bf16_v2f64(<2 x double> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v2bf16_v2f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll index c0b67dd603ebb..ad56aee72a432 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll @@ -28,7 +28,6 @@ define <1 x half> @round_v1f16(<1 x half> %x) strictfp { %a = call <1 x half> @llvm.experimental.constrained.round.v1f16(<1 x half> %x, metadata !"fpexcept.strict") ret <1 x half> %a } -declare <1 x half> @llvm.experimental.constrained.round.v1f16(<1 x half>, metadata) define <2 x half> @round_v2f16(<2 x half> %x) strictfp { ; CHECK-LABEL: round_v2f16: @@ -52,7 +51,6 @@ define <2 x half> @round_v2f16(<2 x half> %x) strictfp { %a = call <2 x half> @llvm.experimental.constrained.round.v2f16(<2 x half> %x, metadata !"fpexcept.strict") ret <2 x half> %a } -declare <2 x half> @llvm.experimental.constrained.round.v2f16(<2 x half>, metadata) define <4 x half> @round_v4f16(<4 x half> %x) strictfp { ; CHECK-LABEL: round_v4f16: @@ -76,7 +74,6 @@ define <4 x half> @round_v4f16(<4 x half> %x) strictfp { %a = call <4 x half> @llvm.experimental.constrained.round.v4f16(<4 x half> %x, metadata !"fpexcept.strict") ret <4 x half> %a } -declare <4 x half> @llvm.experimental.constrained.round.v4f16(<4 x half>, metadata) define <8 x half> @round_v8f16(<8 x half> %x) strictfp { ; CHECK-LABEL: round_v8f16: @@ -100,7 +97,6 @@ define <8 x half> @round_v8f16(<8 x half> %x) strictfp { %a = call <8 x half> @llvm.experimental.constrained.round.v8f16(<8 x half> %x, metadata !"fpexcept.strict") ret <8 x half> %a } -declare <8 x half> @llvm.experimental.constrained.round.v8f16(<8 x half>, metadata) define <16 x half> @round_v16f16(<16 x half> %x) strictfp { ; CHECK-LABEL: round_v16f16: @@ -124,7 +120,6 @@ define <16 x half> @round_v16f16(<16 x half> %x) strictfp { %a = call <16 x half> @llvm.experimental.constrained.round.v16f16(<16 x half> %x, metadata !"fpexcept.strict") ret <16 x half> %a } -declare <16 x half> @llvm.experimental.constrained.round.v16f16(<16 x half>, metadata) define <32 x half> @round_v32f16(<32 x half> %x) strictfp { ; CHECK-LABEL: round_v32f16: @@ -149,7 +144,6 @@ define <32 x half> @round_v32f16(<32 x half> %x) strictfp { %a = call <32 x half> @llvm.experimental.constrained.round.v32f16(<32 x half> %x, metadata !"fpexcept.strict") ret <32 x half> %a } -declare <32 x half> @llvm.experimental.constrained.round.v32f16(<32 x half>, metadata) define <1 x float> @round_v1f32(<1 x float> %x) strictfp { ; CHECK-LABEL: round_v1f32: @@ -172,7 +166,6 @@ define <1 x float> @round_v1f32(<1 x float> %x) strictfp { %a = call <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float> %x, metadata !"fpexcept.strict") ret <1 x float> %a } -declare <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float>, metadata) define <2 x float> @round_v2f32(<2 x float> %x) strictfp { ; CHECK-LABEL: round_v2f32: @@ -195,7 +188,6 @@ define <2 x float> @round_v2f32(<2 x float> %x) strictfp { %a = call <2 x float> @llvm.experimental.constrained.round.v2f32(<2 x float> %x, metadata !"fpexcept.strict") ret <2 x float> %a } -declare <2 x float> @llvm.experimental.constrained.round.v2f32(<2 x float>, metadata) define <4 x float> @round_v4f32(<4 x float> %x) strictfp { ; CHECK-LABEL: round_v4f32: @@ -218,7 +210,6 @@ define <4 x float> @round_v4f32(<4 x float> %x) strictfp { %a = call <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float> %x, metadata !"fpexcept.strict") ret <4 x float> %a } -declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata) define <8 x float> @round_v8f32(<8 x float> %x) strictfp { ; CHECK-LABEL: round_v8f32: @@ -241,7 +232,6 @@ define <8 x float> @round_v8f32(<8 x float> %x) strictfp { %a = call <8 x float> @llvm.experimental.constrained.round.v8f32(<8 x float> %x, metadata !"fpexcept.strict") ret <8 x float> %a } -declare <8 x float> @llvm.experimental.constrained.round.v8f32(<8 x float>, metadata) define <16 x float> @round_v16f32(<16 x float> %x) strictfp { ; CHECK-LABEL: round_v16f32: @@ -264,7 +254,6 @@ define <16 x float> @round_v16f32(<16 x float> %x) strictfp { %a = call <16 x float> @llvm.experimental.constrained.round.v16f32(<16 x float> %x, metadata !"fpexcept.strict") ret <16 x float> %a } -declare <16 x float> @llvm.experimental.constrained.round.v16f32(<16 x float>, metadata) define <1 x double> @round_v1f64(<1 x double> %x) strictfp { ; RV32-LABEL: round_v1f64: @@ -306,7 +295,6 @@ define <1 x double> @round_v1f64(<1 x double> %x) strictfp { %a = call <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double> %x, metadata !"fpexcept.strict") ret <1 x double> %a } -declare <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double>, metadata) define <2 x double> @round_v2f64(<2 x double> %x) strictfp { ; RV32-LABEL: round_v2f64: @@ -348,7 +336,6 @@ define <2 x double> @round_v2f64(<2 x double> %x) strictfp { %a = call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %x, metadata !"fpexcept.strict") ret <2 x double> %a } -declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata) define <4 x double> @round_v4f64(<4 x double> %x) strictfp { ; RV32-LABEL: round_v4f64: @@ -390,7 +377,6 @@ define <4 x double> @round_v4f64(<4 x double> %x) strictfp { %a = call <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double> %x, metadata !"fpexcept.strict") ret <4 x double> %a } -declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata) define <8 x double> @round_v8f64(<8 x double> %x) strictfp { ; RV32-LABEL: round_v8f64: @@ -432,4 +418,3 @@ define <8 x double> @round_v8f64(<8 x double> %x) strictfp { %a = call <8 x double> @llvm.experimental.constrained.round.v8f64(<8 x double> %x, metadata !"fpexcept.strict") ret <8 x double> %a } -declare <8 x double> @llvm.experimental.constrained.round.v8f64(<8 x double>, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll index 455dc0b83c03d..4c6d420767ebf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll @@ -48,7 +48,6 @@ define <1 x half> @round_v1f16(<1 x half> %x) { %a = call <1 x half> @llvm.round.v1f16(<1 x half> %x) ret <1 x half> %a } -declare <1 x half> @llvm.round.v1f16(<1 x half>) define <2 x half> @round_v2f16(<2 x half> %x) { ; ZVFH-LABEL: round_v2f16: @@ -88,7 +87,6 @@ define <2 x half> @round_v2f16(<2 x half> %x) { %a = call <2 x half> @llvm.round.v2f16(<2 x half> %x) ret <2 x half> %a } -declare <2 x half> @llvm.round.v2f16(<2 x half>) define <4 x half> @round_v4f16(<4 x half> %x) { ; ZVFH-LABEL: round_v4f16: @@ -128,7 +126,6 @@ define <4 x half> @round_v4f16(<4 x half> %x) { %a = call <4 x half> @llvm.round.v4f16(<4 x half> %x) ret <4 x half> %a } -declare <4 x half> @llvm.round.v4f16(<4 x half>) define <8 x half> @round_v8f16(<8 x half> %x) { ; ZVFH-LABEL: round_v8f16: @@ -168,7 +165,6 @@ define <8 x half> @round_v8f16(<8 x half> %x) { %a = call <8 x half> @llvm.round.v8f16(<8 x half> %x) ret <8 x half> %a } -declare <8 x half> @llvm.round.v8f16(<8 x half>) define <16 x half> @round_v16f16(<16 x half> %x) { ; ZVFH-LABEL: round_v16f16: @@ -208,7 +204,6 @@ define <16 x half> @round_v16f16(<16 x half> %x) { %a = call <16 x half> @llvm.round.v16f16(<16 x half> %x) ret <16 x half> %a } -declare <16 x half> @llvm.round.v16f16(<16 x half>) define <32 x half> @round_v32f16(<32 x half> %x) { ; ZVFH-LABEL: round_v32f16: @@ -250,7 +245,6 @@ define <32 x half> @round_v32f16(<32 x half> %x) { %a = call <32 x half> @llvm.round.v32f16(<32 x half> %x) ret <32 x half> %a } -declare <32 x half> @llvm.round.v32f16(<32 x half>) define <1 x float> @round_v1f32(<1 x float> %x) { ; CHECK-LABEL: round_v1f32: @@ -270,7 +264,6 @@ define <1 x float> @round_v1f32(<1 x float> %x) { %a = call <1 x float> @llvm.round.v1f32(<1 x float> %x) ret <1 x float> %a } -declare <1 x float> @llvm.round.v1f32(<1 x float>) define <2 x float> @round_v2f32(<2 x float> %x) { ; CHECK-LABEL: round_v2f32: @@ -290,7 +283,6 @@ define <2 x float> @round_v2f32(<2 x float> %x) { %a = call <2 x float> @llvm.round.v2f32(<2 x float> %x) ret <2 x float> %a } -declare <2 x float> @llvm.round.v2f32(<2 x float>) define <4 x float> @round_v4f32(<4 x float> %x) { ; CHECK-LABEL: round_v4f32: @@ -310,7 +302,6 @@ define <4 x float> @round_v4f32(<4 x float> %x) { %a = call <4 x float> @llvm.round.v4f32(<4 x float> %x) ret <4 x float> %a } -declare <4 x float> @llvm.round.v4f32(<4 x float>) define <8 x float> @round_v8f32(<8 x float> %x) { ; CHECK-LABEL: round_v8f32: @@ -330,7 +321,6 @@ define <8 x float> @round_v8f32(<8 x float> %x) { %a = call <8 x float> @llvm.round.v8f32(<8 x float> %x) ret <8 x float> %a } -declare <8 x float> @llvm.round.v8f32(<8 x float>) define <16 x float> @round_v16f32(<16 x float> %x) { ; CHECK-LABEL: round_v16f32: @@ -350,7 +340,6 @@ define <16 x float> @round_v16f32(<16 x float> %x) { %a = call <16 x float> @llvm.round.v16f32(<16 x float> %x) ret <16 x float> %a } -declare <16 x float> @llvm.round.v16f32(<16 x float>) define <1 x double> @round_v1f64(<1 x double> %x) { ; RV32ZVFH-LABEL: round_v1f64: @@ -417,7 +406,6 @@ define <1 x double> @round_v1f64(<1 x double> %x) { %a = call <1 x double> @llvm.round.v1f64(<1 x double> %x) ret <1 x double> %a } -declare <1 x double> @llvm.round.v1f64(<1 x double>) define <2 x double> @round_v2f64(<2 x double> %x) { ; RV32ZVFH-LABEL: round_v2f64: @@ -484,7 +472,6 @@ define <2 x double> @round_v2f64(<2 x double> %x) { %a = call <2 x double> @llvm.round.v2f64(<2 x double> %x) ret <2 x double> %a } -declare <2 x double> @llvm.round.v2f64(<2 x double>) define <4 x double> @round_v4f64(<4 x double> %x) { ; RV32ZVFH-LABEL: round_v4f64: @@ -551,7 +538,6 @@ define <4 x double> @round_v4f64(<4 x double> %x) { %a = call <4 x double> @llvm.round.v4f64(<4 x double> %x) ret <4 x double> %a } -declare <4 x double> @llvm.round.v4f64(<4 x double>) define <8 x double> @round_v8f64(<8 x double> %x) { ; RV32ZVFH-LABEL: round_v8f64: @@ -618,4 +604,3 @@ define <8 x double> @round_v8f64(<8 x double> %x) { %a = call <8 x double> @llvm.round.v8f64(<8 x double> %x) ret <8 x double> %a } -declare <8 x double> @llvm.round.v8f64(<8 x double>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll index b1d35d3bcdc1d..5e5c64fd891fd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll @@ -28,7 +28,6 @@ define <1 x half> @roundeven_v1f16(<1 x half> %x) strictfp { %a = call <1 x half> @llvm.experimental.constrained.roundeven.v1f16(<1 x half> %x, metadata !"fpexcept.strict") ret <1 x half> %a } -declare <1 x half> @llvm.experimental.constrained.roundeven.v1f16(<1 x half>, metadata) define <2 x half> @roundeven_v2f16(<2 x half> %x) strictfp { ; CHECK-LABEL: roundeven_v2f16: @@ -52,7 +51,6 @@ define <2 x half> @roundeven_v2f16(<2 x half> %x) strictfp { %a = call <2 x half> @llvm.experimental.constrained.roundeven.v2f16(<2 x half> %x, metadata !"fpexcept.strict") ret <2 x half> %a } -declare <2 x half> @llvm.experimental.constrained.roundeven.v2f16(<2 x half>, metadata) define <4 x half> @roundeven_v4f16(<4 x half> %x) strictfp { ; CHECK-LABEL: roundeven_v4f16: @@ -76,7 +74,6 @@ define <4 x half> @roundeven_v4f16(<4 x half> %x) strictfp { %a = call <4 x half> @llvm.experimental.constrained.roundeven.v4f16(<4 x half> %x, metadata !"fpexcept.strict") ret <4 x half> %a } -declare <4 x half> @llvm.experimental.constrained.roundeven.v4f16(<4 x half>, metadata) define <8 x half> @roundeven_v8f16(<8 x half> %x) strictfp { ; CHECK-LABEL: roundeven_v8f16: @@ -100,7 +97,6 @@ define <8 x half> @roundeven_v8f16(<8 x half> %x) strictfp { %a = call <8 x half> @llvm.experimental.constrained.roundeven.v8f16(<8 x half> %x, metadata !"fpexcept.strict") ret <8 x half> %a } -declare <8 x half> @llvm.experimental.constrained.roundeven.v8f16(<8 x half>, metadata) define <16 x half> @roundeven_v16f16(<16 x half> %x) strictfp { ; CHECK-LABEL: roundeven_v16f16: @@ -124,7 +120,6 @@ define <16 x half> @roundeven_v16f16(<16 x half> %x) strictfp { %a = call <16 x half> @llvm.experimental.constrained.roundeven.v16f16(<16 x half> %x, metadata !"fpexcept.strict") ret <16 x half> %a } -declare <16 x half> @llvm.experimental.constrained.roundeven.v16f16(<16 x half>, metadata) define <32 x half> @roundeven_v32f16(<32 x half> %x) strictfp { ; CHECK-LABEL: roundeven_v32f16: @@ -149,7 +144,6 @@ define <32 x half> @roundeven_v32f16(<32 x half> %x) strictfp { %a = call <32 x half> @llvm.experimental.constrained.roundeven.v32f16(<32 x half> %x, metadata !"fpexcept.strict") ret <32 x half> %a } -declare <32 x half> @llvm.experimental.constrained.roundeven.v32f16(<32 x half>, metadata) define <1 x float> @roundeven_v1f32(<1 x float> %x) strictfp { ; CHECK-LABEL: roundeven_v1f32: @@ -172,7 +166,6 @@ define <1 x float> @roundeven_v1f32(<1 x float> %x) strictfp { %a = call <1 x float> @llvm.experimental.constrained.roundeven.v1f32(<1 x float> %x, metadata !"fpexcept.strict") ret <1 x float> %a } -declare <1 x float> @llvm.experimental.constrained.roundeven.v1f32(<1 x float>, metadata) define <2 x float> @roundeven_v2f32(<2 x float> %x) strictfp { ; CHECK-LABEL: roundeven_v2f32: @@ -195,7 +188,6 @@ define <2 x float> @roundeven_v2f32(<2 x float> %x) strictfp { %a = call <2 x float> @llvm.experimental.constrained.roundeven.v2f32(<2 x float> %x, metadata !"fpexcept.strict") ret <2 x float> %a } -declare <2 x float> @llvm.experimental.constrained.roundeven.v2f32(<2 x float>, metadata) define <4 x float> @roundeven_v4f32(<4 x float> %x) strictfp { ; CHECK-LABEL: roundeven_v4f32: @@ -218,7 +210,6 @@ define <4 x float> @roundeven_v4f32(<4 x float> %x) strictfp { %a = call <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float> %x, metadata !"fpexcept.strict") ret <4 x float> %a } -declare <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float>, metadata) define <8 x float> @roundeven_v8f32(<8 x float> %x) strictfp { ; CHECK-LABEL: roundeven_v8f32: @@ -241,7 +232,6 @@ define <8 x float> @roundeven_v8f32(<8 x float> %x) strictfp { %a = call <8 x float> @llvm.experimental.constrained.roundeven.v8f32(<8 x float> %x, metadata !"fpexcept.strict") ret <8 x float> %a } -declare <8 x float> @llvm.experimental.constrained.roundeven.v8f32(<8 x float>, metadata) define <16 x float> @roundeven_v16f32(<16 x float> %x) strictfp { ; CHECK-LABEL: roundeven_v16f32: @@ -264,7 +254,6 @@ define <16 x float> @roundeven_v16f32(<16 x float> %x) strictfp { %a = call <16 x float> @llvm.experimental.constrained.roundeven.v16f32(<16 x float> %x, metadata !"fpexcept.strict") ret <16 x float> %a } -declare <16 x float> @llvm.experimental.constrained.roundeven.v16f32(<16 x float>, metadata) define <1 x double> @roundeven_v1f64(<1 x double> %x) strictfp { ; RV32-LABEL: roundeven_v1f64: @@ -306,7 +295,6 @@ define <1 x double> @roundeven_v1f64(<1 x double> %x) strictfp { %a = call <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double> %x, metadata !"fpexcept.strict") ret <1 x double> %a } -declare <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double>, metadata) define <2 x double> @roundeven_v2f64(<2 x double> %x) strictfp { ; RV32-LABEL: roundeven_v2f64: @@ -348,7 +336,6 @@ define <2 x double> @roundeven_v2f64(<2 x double> %x) strictfp { %a = call <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double> %x, metadata !"fpexcept.strict") ret <2 x double> %a } -declare <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double>, metadata) define <4 x double> @roundeven_v4f64(<4 x double> %x) strictfp { ; RV32-LABEL: roundeven_v4f64: @@ -390,7 +377,6 @@ define <4 x double> @roundeven_v4f64(<4 x double> %x) strictfp { %a = call <4 x double> @llvm.experimental.constrained.roundeven.v4f64(<4 x double> %x, metadata !"fpexcept.strict") ret <4 x double> %a } -declare <4 x double> @llvm.experimental.constrained.roundeven.v4f64(<4 x double>, metadata) define <8 x double> @roundeven_v8f64(<8 x double> %x) strictfp { ; RV32-LABEL: roundeven_v8f64: @@ -432,4 +418,3 @@ define <8 x double> @roundeven_v8f64(<8 x double> %x) strictfp { %a = call <8 x double> @llvm.experimental.constrained.roundeven.v8f64(<8 x double> %x, metadata !"fpexcept.strict") ret <8 x double> %a } -declare <8 x double> @llvm.experimental.constrained.roundeven.v8f64(<8 x double>, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll index f8b3cb5897dfa..b175549c132b4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll @@ -48,7 +48,6 @@ define <1 x half> @roundeven_v1f16(<1 x half> %x) { %a = call <1 x half> @llvm.roundeven.v1f16(<1 x half> %x) ret <1 x half> %a } -declare <1 x half> @llvm.roundeven.v1f16(<1 x half>) define <2 x half> @roundeven_v2f16(<2 x half> %x) { ; ZVFH-LABEL: roundeven_v2f16: @@ -88,7 +87,6 @@ define <2 x half> @roundeven_v2f16(<2 x half> %x) { %a = call <2 x half> @llvm.roundeven.v2f16(<2 x half> %x) ret <2 x half> %a } -declare <2 x half> @llvm.roundeven.v2f16(<2 x half>) define <4 x half> @roundeven_v4f16(<4 x half> %x) { ; ZVFH-LABEL: roundeven_v4f16: @@ -128,7 +126,6 @@ define <4 x half> @roundeven_v4f16(<4 x half> %x) { %a = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %x) ret <4 x half> %a } -declare <4 x half> @llvm.roundeven.v4f16(<4 x half>) define <8 x half> @roundeven_v8f16(<8 x half> %x) { ; ZVFH-LABEL: roundeven_v8f16: @@ -168,7 +165,6 @@ define <8 x half> @roundeven_v8f16(<8 x half> %x) { %a = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %x) ret <8 x half> %a } -declare <8 x half> @llvm.roundeven.v8f16(<8 x half>) define <16 x half> @roundeven_v16f16(<16 x half> %x) { ; ZVFH-LABEL: roundeven_v16f16: @@ -208,7 +204,6 @@ define <16 x half> @roundeven_v16f16(<16 x half> %x) { %a = call <16 x half> @llvm.roundeven.v16f16(<16 x half> %x) ret <16 x half> %a } -declare <16 x half> @llvm.roundeven.v16f16(<16 x half>) define <32 x half> @roundeven_v32f16(<32 x half> %x) { ; ZVFH-LABEL: roundeven_v32f16: @@ -250,7 +245,6 @@ define <32 x half> @roundeven_v32f16(<32 x half> %x) { %a = call <32 x half> @llvm.roundeven.v32f16(<32 x half> %x) ret <32 x half> %a } -declare <32 x half> @llvm.roundeven.v32f16(<32 x half>) define <1 x float> @roundeven_v1f32(<1 x float> %x) { ; CHECK-LABEL: roundeven_v1f32: @@ -270,7 +264,6 @@ define <1 x float> @roundeven_v1f32(<1 x float> %x) { %a = call <1 x float> @llvm.roundeven.v1f32(<1 x float> %x) ret <1 x float> %a } -declare <1 x float> @llvm.roundeven.v1f32(<1 x float>) define <2 x float> @roundeven_v2f32(<2 x float> %x) { ; CHECK-LABEL: roundeven_v2f32: @@ -290,7 +283,6 @@ define <2 x float> @roundeven_v2f32(<2 x float> %x) { %a = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %x) ret <2 x float> %a } -declare <2 x float> @llvm.roundeven.v2f32(<2 x float>) define <4 x float> @roundeven_v4f32(<4 x float> %x) { ; CHECK-LABEL: roundeven_v4f32: @@ -310,7 +302,6 @@ define <4 x float> @roundeven_v4f32(<4 x float> %x) { %a = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %x) ret <4 x float> %a } -declare <4 x float> @llvm.roundeven.v4f32(<4 x float>) define <8 x float> @roundeven_v8f32(<8 x float> %x) { ; CHECK-LABEL: roundeven_v8f32: @@ -330,7 +321,6 @@ define <8 x float> @roundeven_v8f32(<8 x float> %x) { %a = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %x) ret <8 x float> %a } -declare <8 x float> @llvm.roundeven.v8f32(<8 x float>) define <16 x float> @roundeven_v16f32(<16 x float> %x) { ; CHECK-LABEL: roundeven_v16f32: @@ -350,7 +340,6 @@ define <16 x float> @roundeven_v16f32(<16 x float> %x) { %a = call <16 x float> @llvm.roundeven.v16f32(<16 x float> %x) ret <16 x float> %a } -declare <16 x float> @llvm.roundeven.v16f32(<16 x float>) define <1 x double> @roundeven_v1f64(<1 x double> %x) { ; RV32ZVFH-LABEL: roundeven_v1f64: @@ -417,7 +406,6 @@ define <1 x double> @roundeven_v1f64(<1 x double> %x) { %a = call <1 x double> @llvm.roundeven.v1f64(<1 x double> %x) ret <1 x double> %a } -declare <1 x double> @llvm.roundeven.v1f64(<1 x double>) define <2 x double> @roundeven_v2f64(<2 x double> %x) { ; RV32ZVFH-LABEL: roundeven_v2f64: @@ -484,7 +472,6 @@ define <2 x double> @roundeven_v2f64(<2 x double> %x) { %a = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %x) ret <2 x double> %a } -declare <2 x double> @llvm.roundeven.v2f64(<2 x double>) define <4 x double> @roundeven_v4f64(<4 x double> %x) { ; RV32ZVFH-LABEL: roundeven_v4f64: @@ -551,7 +538,6 @@ define <4 x double> @roundeven_v4f64(<4 x double> %x) { %a = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %x) ret <4 x double> %a } -declare <4 x double> @llvm.roundeven.v4f64(<4 x double>) define <8 x double> @roundeven_v8f64(<8 x double> %x) { ; RV32ZVFH-LABEL: roundeven_v8f64: @@ -618,4 +604,3 @@ define <8 x double> @roundeven_v8f64(<8 x double> %x) { %a = call <8 x double> @llvm.roundeven.v8f64(<8 x double> %x) ret <8 x double> %a } -declare <8 x double> @llvm.roundeven.v8f64(<8 x double>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll index 81679806f32d8..da04b08aa5db5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll @@ -2,7 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s -declare <2 x i8> @llvm.vp.fshr.v2i8(<2 x i8>, <2 x i8>, <2 x i8>, <2 x i1>, i32) define <2 x i8> @fshr_v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v2i8: ; CHECK: # %bb.0: @@ -19,7 +18,6 @@ define <2 x i8> @fshr_v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i1> %m, i ret <2 x i8> %res } -declare <2 x i8> @llvm.vp.fshl.v2i8(<2 x i8>, <2 x i8>, <2 x i8>, <2 x i1>, i32) define <2 x i8> @fshl_v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v2i8: ; CHECK: # %bb.0: @@ -36,7 +34,6 @@ define <2 x i8> @fshl_v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i1> %m, i ret <2 x i8> %res } -declare <4 x i8> @llvm.vp.fshr.v4i8(<4 x i8>, <4 x i8>, <4 x i8>, <4 x i1>, i32) define <4 x i8> @fshr_v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v4i8: ; CHECK: # %bb.0: @@ -53,7 +50,6 @@ define <4 x i8> @fshr_v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i1> %m, i ret <4 x i8> %res } -declare <4 x i8> @llvm.vp.fshl.v4i8(<4 x i8>, <4 x i8>, <4 x i8>, <4 x i1>, i32) define <4 x i8> @fshl_v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v4i8: ; CHECK: # %bb.0: @@ -70,7 +66,6 @@ define <4 x i8> @fshl_v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i1> %m, i ret <4 x i8> %res } -declare <8 x i8> @llvm.vp.fshr.v8i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i1>, i32) define <8 x i8> @fshr_v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v8i8: ; CHECK: # %bb.0: @@ -87,7 +82,6 @@ define <8 x i8> @fshr_v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i1> %m, i ret <8 x i8> %res } -declare <8 x i8> @llvm.vp.fshl.v8i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i1>, i32) define <8 x i8> @fshl_v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v8i8: ; CHECK: # %bb.0: @@ -104,7 +98,6 @@ define <8 x i8> @fshl_v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i1> %m, i ret <8 x i8> %res } -declare <16 x i8> @llvm.vp.fshr.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i1>, i32) define <16 x i8> @fshr_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v16i8: ; CHECK: # %bb.0: @@ -121,7 +114,6 @@ define <16 x i8> @fshr_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i1> ret <16 x i8> %res } -declare <16 x i8> @llvm.vp.fshl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i1>, i32) define <16 x i8> @fshl_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v16i8: ; CHECK: # %bb.0: @@ -138,7 +130,6 @@ define <16 x i8> @fshl_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i1> ret <16 x i8> %res } -declare <32 x i8> @llvm.vp.fshr.v32i8(<32 x i8>, <32 x i8>, <32 x i8>, <32 x i1>, i32) define <32 x i8> @fshr_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v32i8: ; CHECK: # %bb.0: @@ -155,7 +146,6 @@ define <32 x i8> @fshr_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i1> ret <32 x i8> %res } -declare <32 x i8> @llvm.vp.fshl.v32i8(<32 x i8>, <32 x i8>, <32 x i8>, <32 x i1>, i32) define <32 x i8> @fshl_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v32i8: ; CHECK: # %bb.0: @@ -172,7 +162,6 @@ define <32 x i8> @fshl_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i1> ret <32 x i8> %res } -declare <64 x i8> @llvm.vp.fshr.v64i8(<64 x i8>, <64 x i8>, <64 x i8>, <64 x i1>, i32) define <64 x i8> @fshr_v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v64i8: ; CHECK: # %bb.0: @@ -189,7 +178,6 @@ define <64 x i8> @fshr_v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i1> ret <64 x i8> %res } -declare <64 x i8> @llvm.vp.fshl.v64i8(<64 x i8>, <64 x i8>, <64 x i8>, <64 x i1>, i32) define <64 x i8> @fshl_v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v64i8: ; CHECK: # %bb.0: @@ -206,7 +194,6 @@ define <64 x i8> @fshl_v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i1> ret <64 x i8> %res } -declare <2 x i16> @llvm.vp.fshr.v2i16(<2 x i16>, <2 x i16>, <2 x i16>, <2 x i1>, i32) define <2 x i16> @fshr_v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v2i16: ; CHECK: # %bb.0: @@ -223,7 +210,6 @@ define <2 x i16> @fshr_v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i1> ret <2 x i16> %res } -declare <2 x i16> @llvm.vp.fshl.v2i16(<2 x i16>, <2 x i16>, <2 x i16>, <2 x i1>, i32) define <2 x i16> @fshl_v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v2i16: ; CHECK: # %bb.0: @@ -240,7 +226,6 @@ define <2 x i16> @fshl_v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i1> ret <2 x i16> %res } -declare <4 x i16> @llvm.vp.fshr.v4i16(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i1>, i32) define <4 x i16> @fshr_v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v4i16: ; CHECK: # %bb.0: @@ -257,7 +242,6 @@ define <4 x i16> @fshr_v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i1> ret <4 x i16> %res } -declare <4 x i16> @llvm.vp.fshl.v4i16(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i1>, i32) define <4 x i16> @fshl_v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v4i16: ; CHECK: # %bb.0: @@ -274,7 +258,6 @@ define <4 x i16> @fshl_v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i1> ret <4 x i16> %res } -declare <8 x i16> @llvm.vp.fshr.v8i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i1>, i32) define <8 x i16> @fshr_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v8i16: ; CHECK: # %bb.0: @@ -291,7 +274,6 @@ define <8 x i16> @fshr_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i1> ret <8 x i16> %res } -declare <8 x i16> @llvm.vp.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i1>, i32) define <8 x i16> @fshl_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v8i16: ; CHECK: # %bb.0: @@ -308,7 +290,6 @@ define <8 x i16> @fshl_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i1> ret <8 x i16> %res } -declare <16 x i16> @llvm.vp.fshr.v16i16(<16 x i16>, <16 x i16>, <16 x i16>, <16 x i1>, i32) define <16 x i16> @fshr_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v16i16: ; CHECK: # %bb.0: @@ -325,7 +306,6 @@ define <16 x i16> @fshr_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 ret <16 x i16> %res } -declare <16 x i16> @llvm.vp.fshl.v16i16(<16 x i16>, <16 x i16>, <16 x i16>, <16 x i1>, i32) define <16 x i16> @fshl_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v16i16: ; CHECK: # %bb.0: @@ -342,7 +322,6 @@ define <16 x i16> @fshl_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 ret <16 x i16> %res } -declare <32 x i16> @llvm.vp.fshr.v32i16(<32 x i16>, <32 x i16>, <32 x i16>, <32 x i1>, i32) define <32 x i16> @fshr_v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v32i16: ; CHECK: # %bb.0: @@ -359,7 +338,6 @@ define <32 x i16> @fshr_v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 ret <32 x i16> %res } -declare <32 x i16> @llvm.vp.fshl.v32i16(<32 x i16>, <32 x i16>, <32 x i16>, <32 x i1>, i32) define <32 x i16> @fshl_v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v32i16: ; CHECK: # %bb.0: @@ -376,7 +354,6 @@ define <32 x i16> @fshl_v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 ret <32 x i16> %res } -declare <2 x i32> @llvm.vp.fshr.v2i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i1>, i32) define <2 x i32> @fshr_v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v2i32: ; CHECK: # %bb.0: @@ -394,7 +371,6 @@ define <2 x i32> @fshr_v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i1> ret <2 x i32> %res } -declare <2 x i32> @llvm.vp.fshl.v2i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i1>, i32) define <2 x i32> @fshl_v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v2i32: ; CHECK: # %bb.0: @@ -412,7 +388,6 @@ define <2 x i32> @fshl_v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i1> ret <2 x i32> %res } -declare <4 x i32> @llvm.vp.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i1>, i32) define <4 x i32> @fshr_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v4i32: ; CHECK: # %bb.0: @@ -430,7 +405,6 @@ define <4 x i32> @fshr_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> ret <4 x i32> %res } -declare <4 x i32> @llvm.vp.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i1>, i32) define <4 x i32> @fshl_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v4i32: ; CHECK: # %bb.0: @@ -448,7 +422,6 @@ define <4 x i32> @fshl_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> ret <4 x i32> %res } -declare <8 x i32> @llvm.vp.fshr.v8i32(<8 x i32>, <8 x i32>, <8 x i32>, <8 x i1>, i32) define <8 x i32> @fshr_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v8i32: ; CHECK: # %bb.0: @@ -466,7 +439,6 @@ define <8 x i32> @fshr_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i1> ret <8 x i32> %res } -declare <8 x i32> @llvm.vp.fshl.v8i32(<8 x i32>, <8 x i32>, <8 x i32>, <8 x i1>, i32) define <8 x i32> @fshl_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v8i32: ; CHECK: # %bb.0: @@ -484,7 +456,6 @@ define <8 x i32> @fshl_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i1> ret <8 x i32> %res } -declare <16 x i32> @llvm.vp.fshr.v16i32(<16 x i32>, <16 x i32>, <16 x i32>, <16 x i1>, i32) define <16 x i32> @fshr_v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v16i32: ; CHECK: # %bb.0: @@ -502,7 +473,6 @@ define <16 x i32> @fshr_v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 ret <16 x i32> %res } -declare <16 x i32> @llvm.vp.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>, <16 x i1>, i32) define <16 x i32> @fshl_v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v16i32: ; CHECK: # %bb.0: @@ -520,7 +490,6 @@ define <16 x i32> @fshl_v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 ret <16 x i32> %res } -declare <2 x i64> @llvm.vp.fshr.v2i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i1>, i32) define <2 x i64> @fshr_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v2i64: ; CHECK: # %bb.0: @@ -538,7 +507,6 @@ define <2 x i64> @fshr_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i1> ret <2 x i64> %res } -declare <2 x i64> @llvm.vp.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i1>, i32) define <2 x i64> @fshl_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v2i64: ; CHECK: # %bb.0: @@ -556,7 +524,6 @@ define <2 x i64> @fshl_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i1> ret <2 x i64> %res } -declare <4 x i64> @llvm.vp.fshr.v4i64(<4 x i64>, <4 x i64>, <4 x i64>, <4 x i1>, i32) define <4 x i64> @fshr_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v4i64: ; CHECK: # %bb.0: @@ -574,7 +541,6 @@ define <4 x i64> @fshr_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i1> ret <4 x i64> %res } -declare <4 x i64> @llvm.vp.fshl.v4i64(<4 x i64>, <4 x i64>, <4 x i64>, <4 x i1>, i32) define <4 x i64> @fshl_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v4i64: ; CHECK: # %bb.0: @@ -592,7 +558,6 @@ define <4 x i64> @fshl_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i1> ret <4 x i64> %res } -declare <7 x i64> @llvm.vp.fshr.v7i64(<7 x i64>, <7 x i64>, <7 x i64>, <7 x i1>, i32) define <7 x i64> @fshr_v7i64(<7 x i64> %a, <7 x i64> %b, <7 x i64> %c, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v7i64: ; CHECK: # %bb.0: @@ -610,7 +575,6 @@ define <7 x i64> @fshr_v7i64(<7 x i64> %a, <7 x i64> %b, <7 x i64> %c, <7 x i1> ret <7 x i64> %res } -declare <7 x i64> @llvm.vp.fshl.v7i64(<7 x i64>, <7 x i64>, <7 x i64>, <7 x i1>, i32) define <7 x i64> @fshl_v7i64(<7 x i64> %a, <7 x i64> %b, <7 x i64> %c, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v7i64: ; CHECK: # %bb.0: @@ -628,7 +592,6 @@ define <7 x i64> @fshl_v7i64(<7 x i64> %a, <7 x i64> %b, <7 x i64> %c, <7 x i1> ret <7 x i64> %res } -declare <8 x i64> @llvm.vp.fshr.v8i64(<8 x i64>, <8 x i64>, <8 x i64>, <8 x i1>, i32) define <8 x i64> @fshr_v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v8i64: ; CHECK: # %bb.0: @@ -646,7 +609,6 @@ define <8 x i64> @fshr_v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i1> ret <8 x i64> %res } -declare <8 x i64> @llvm.vp.fshl.v8i64(<8 x i64>, <8 x i64>, <8 x i64>, <8 x i1>, i32) define <8 x i64> @fshl_v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v8i64: ; CHECK: # %bb.0: @@ -664,7 +626,6 @@ define <8 x i64> @fshl_v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i1> ret <8 x i64> %res } -declare <16 x i64> @llvm.vp.fshr.v16i64(<16 x i64>, <16 x i64>, <16 x i64>, <16 x i1>, i32) define <16 x i64> @fshr_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v16i64: ; CHECK: # %bb.0: @@ -700,7 +661,6 @@ define <16 x i64> @fshr_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 ret <16 x i64> %res } -declare <16 x i64> @llvm.vp.fshl.v16i64(<16 x i64>, <16 x i64>, <16 x i64>, <16 x i1>, i32) define <16 x i64> @fshl_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll index b7cf84fba4210..7813d7f309b6a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll @@ -24,7 +24,6 @@ define <1 x half> @trunc_v1f16(<1 x half> %x) strictfp { %a = call <1 x half> @llvm.experimental.constrained.trunc.v1f16(<1 x half> %x, metadata !"fpexcept.strict") ret <1 x half> %a } -declare <1 x half> @llvm.experimental.constrained.trunc.v1f16(<1 x half>, metadata) define <2 x half> @trunc_v2f16(<2 x half> %x) strictfp { ; CHECK-LABEL: trunc_v2f16: @@ -46,7 +45,6 @@ define <2 x half> @trunc_v2f16(<2 x half> %x) strictfp { %a = call <2 x half> @llvm.experimental.constrained.trunc.v2f16(<2 x half> %x, metadata !"fpexcept.strict") ret <2 x half> %a } -declare <2 x half> @llvm.experimental.constrained.trunc.v2f16(<2 x half>, metadata) define <4 x half> @trunc_v4f16(<4 x half> %x) strictfp { ; CHECK-LABEL: trunc_v4f16: @@ -68,7 +66,6 @@ define <4 x half> @trunc_v4f16(<4 x half> %x) strictfp { %a = call <4 x half> @llvm.experimental.constrained.trunc.v4f16(<4 x half> %x, metadata !"fpexcept.strict") ret <4 x half> %a } -declare <4 x half> @llvm.experimental.constrained.trunc.v4f16(<4 x half>, metadata) define <8 x half> @trunc_v8f16(<8 x half> %x) strictfp { ; CHECK-LABEL: trunc_v8f16: @@ -90,7 +87,6 @@ define <8 x half> @trunc_v8f16(<8 x half> %x) strictfp { %a = call <8 x half> @llvm.experimental.constrained.trunc.v8f16(<8 x half> %x, metadata !"fpexcept.strict") ret <8 x half> %a } -declare <8 x half> @llvm.experimental.constrained.trunc.v8f16(<8 x half>, metadata) define <16 x half> @trunc_v16f16(<16 x half> %x) strictfp { ; CHECK-LABEL: trunc_v16f16: @@ -112,7 +108,6 @@ define <16 x half> @trunc_v16f16(<16 x half> %x) strictfp { %a = call <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half> %x, metadata !"fpexcept.strict") ret <16 x half> %a } -declare <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half>, metadata) define <32 x half> @trunc_v32f16(<32 x half> %x) strictfp { ; CHECK-LABEL: trunc_v32f16: @@ -135,7 +130,6 @@ define <32 x half> @trunc_v32f16(<32 x half> %x) strictfp { %a = call <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half> %x, metadata !"fpexcept.strict") ret <32 x half> %a } -declare <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half>, metadata) define <1 x float> @trunc_v1f32(<1 x float> %x) strictfp { ; CHECK-LABEL: trunc_v1f32: @@ -156,7 +150,6 @@ define <1 x float> @trunc_v1f32(<1 x float> %x) strictfp { %a = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float> %x, metadata !"fpexcept.strict") ret <1 x float> %a } -declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata) define <2 x float> @trunc_v2f32(<2 x float> %x) strictfp { ; CHECK-LABEL: trunc_v2f32: @@ -177,7 +170,6 @@ define <2 x float> @trunc_v2f32(<2 x float> %x) strictfp { %a = call <2 x float> @llvm.experimental.constrained.trunc.v2f32(<2 x float> %x, metadata !"fpexcept.strict") ret <2 x float> %a } -declare <2 x float> @llvm.experimental.constrained.trunc.v2f32(<2 x float>, metadata) define <4 x float> @trunc_v4f32(<4 x float> %x) strictfp { ; CHECK-LABEL: trunc_v4f32: @@ -198,7 +190,6 @@ define <4 x float> @trunc_v4f32(<4 x float> %x) strictfp { %a = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %x, metadata !"fpexcept.strict") ret <4 x float> %a } -declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata) define <8 x float> @trunc_v8f32(<8 x float> %x) strictfp { ; CHECK-LABEL: trunc_v8f32: @@ -219,7 +210,6 @@ define <8 x float> @trunc_v8f32(<8 x float> %x) strictfp { %a = call <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float> %x, metadata !"fpexcept.strict") ret <8 x float> %a } -declare <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float>, metadata) define <16 x float> @trunc_v16f32(<16 x float> %x) strictfp { ; CHECK-LABEL: trunc_v16f32: @@ -240,7 +230,6 @@ define <16 x float> @trunc_v16f32(<16 x float> %x) strictfp { %a = call <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float> %x, metadata !"fpexcept.strict") ret <16 x float> %a } -declare <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float>, metadata) define <1 x double> @trunc_v1f64(<1 x double> %x) strictfp { ; RV32-LABEL: trunc_v1f64: @@ -278,7 +267,6 @@ define <1 x double> @trunc_v1f64(<1 x double> %x) strictfp { %a = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %x, metadata !"fpexcept.strict") ret <1 x double> %a } -declare <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double>, metadata) define <2 x double> @trunc_v2f64(<2 x double> %x) strictfp { ; RV32-LABEL: trunc_v2f64: @@ -316,7 +304,6 @@ define <2 x double> @trunc_v2f64(<2 x double> %x) strictfp { %a = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %x, metadata !"fpexcept.strict") ret <2 x double> %a } -declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata) define <4 x double> @trunc_v4f64(<4 x double> %x) strictfp { ; RV32-LABEL: trunc_v4f64: @@ -354,7 +341,6 @@ define <4 x double> @trunc_v4f64(<4 x double> %x) strictfp { %a = call <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double> %x, metadata !"fpexcept.strict") ret <4 x double> %a } -declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata) define <8 x double> @trunc_v8f64(<8 x double> %x) strictfp { ; RV32-LABEL: trunc_v8f64: @@ -392,4 +378,3 @@ define <8 x double> @trunc_v8f64(<8 x double> %x) strictfp { %a = call <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double> %x, metadata !"fpexcept.strict") ret <8 x double> %a } -declare <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double>, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector-shuffle.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector-shuffle.ll index a99efc97f7e63..8efb48a8cb691 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector-shuffle.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector-shuffle.ll @@ -13,7 +13,6 @@ define <4 x i32> @insert_subvector_load_v4i32_v4i32(<4 x i32> %v1, ptr %p) { ret <4 x i32> %v3 } -declare <4 x i32> @llvm.vp.load.v4i32(ptr, <4 x i1>, i32) define <4 x i32> @insert_subvector_vp_load_v4i32_v4i32(<4 x i32> %v1, ptr %p, <4 x i1> %mask) { ; CHECK-LABEL: insert_subvector_vp_load_v4i32_v4i32: ; CHECK: # %bb.0: @@ -26,7 +25,6 @@ define <4 x i32> @insert_subvector_vp_load_v4i32_v4i32(<4 x i32> %v1, ptr %p, <4 } ; Can't fold this in because the load has a non-poison passthru that isn't equal to the vmv.v.v passtrhu -declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32, <4 x i1>, <4 x i32>) define <4 x i32> @insert_subvector_load_unfoldable_passthru_v4i32_v4i32(<4 x i32> %v1, ptr %p, <4 x i1> %mask, <4 x i32> %passthru) { ; CHECK-LABEL: insert_subvector_load_unfoldable_passthru_v4i32_v4i32: ; CHECK: # %bb.0: @@ -65,7 +63,6 @@ define <4 x i32> @insert_subvector_add_v4i32_v4i32(<4 x i32> %v1, <4 x i32> %v2) ret <4 x i32> %v4 } -declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) define <4 x i32> @insert_subvector_vp_add_v4i32_v4i32(<4 x i32> %v1, <4 x i32> %v2, <4 x i1> %mask) { ; CHECK-LABEL: insert_subvector_vp_add_v4i32_v4i32: ; CHECK: # %bb.0: @@ -91,7 +88,6 @@ define <4 x i32> @insert_subvector_load_v4i32_v2i32(<4 x i32> %v1, ptr %p) { ret <4 x i32> %v4 } -declare <2 x i32> @llvm.vp.load.v2i32(ptr, <2 x i1>, i32) define <4 x i32> @insert_subvector_vp_load_v4i32_v2i32(<4 x i32> %v1, ptr %p, <2 x i1> %mask) { ; CHECK-LABEL: insert_subvector_vp_load_v4i32_v2i32: ; CHECK: # %bb.0: @@ -121,7 +117,6 @@ define <4 x i32> @insert_subvector_add_v4i32_v2i32(<4 x i32> %v1, <2 x i32> %v2) ret <4 x i32> %v5 } -declare <2 x i32> @llvm.vp.add.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) define <4 x i32> @insert_subvector_vp_add_v4i32_v2i32(<4 x i32> %v1, <2 x i32> %v2, <2 x i1> %mask) { ; CHECK-LABEL: insert_subvector_vp_add_v4i32_v2i32: ; CHECK: # %bb.0: @@ -148,7 +143,6 @@ define <4 x i32> @insert_subvector_load_v4i32_v8i32(<4 x i32> %v1, ptr %p) { ret <4 x i32> %v4 } -declare <8 x i32> @llvm.vp.load.v8i32(ptr, <8 x i1>, i32) define <4 x i32> @insert_subvector_vp_load_v4i32_v8i32(<4 x i32> %v1, ptr %p, <8 x i1> %mask) { ; CHECK-LABEL: insert_subvector_vp_load_v4i32_v8i32: ; CHECK: # %bb.0: @@ -177,7 +171,6 @@ define <4 x i32> @insert_subvector_add_v4i32_v8i32(<4 x i32> %v1, <8 x i32> %v2) ret <4 x i32> %v5 } -declare <8 x i32> @llvm.vp.add.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) define <4 x i32> @insert_subvector_vp_add_v4i32_v8i32(<4 x i32> %v1, <8 x i32> %v2, <8 x i1> %mask) { ; CHECK-LABEL: insert_subvector_vp_add_v4i32_v8i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll index 00328f9d33d3e..c0473eea56552 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll @@ -140,7 +140,6 @@ define @insert_nxv8i32_v4i32_0( %vec, <4 x ret %v } - define <4 x i32> @insert_v4i32_v4i32_0(<4 x i32> %vec, <4 x i32> %subvec) { ; CHECK-LABEL: insert_v4i32_v4i32_0: ; CHECK: # %bb.0: @@ -252,7 +251,6 @@ bar: ret <4 x i32> %w } - define void @insert_v8i32_v2i32_0(ptr %vp, ptr %svp) { ; VLA-LABEL: insert_v8i32_v2i32_0: ; VLA: # %bb.0: @@ -603,8 +601,6 @@ define @insert_nxv8i1_v8i1_16( %v, ptr %svp) ret %c } -declare @llvm.vector.insert.v2i64.nxv16i64(, <2 x i64>, i64) - define void @insert_v2i64_nxv16i64(ptr %psv0, ptr %psv1, ptr %out) { ; VLA-LABEL: insert_v2i64_nxv16i64: ; VLA: # %bb.0: @@ -966,23 +962,6 @@ define @insert_nxv8f16_v2f16_2( %vec, ptr ret %v } -declare <8 x i1> @llvm.vector.insert.v4i1.v8i1(<8 x i1>, <4 x i1>, i64) -declare <32 x i1> @llvm.vector.insert.v8i1.v32i1(<32 x i1>, <8 x i1>, i64) - -declare <4 x i16> @llvm.vector.insert.v2i16.v4i16(<4 x i16>, <2 x i16>, i64) - -declare <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32>, <2 x i32>, i64) -declare <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32>, <2 x i32>, i64) - -declare @llvm.vector.insert.v4i1.nxv2i1(, <4 x i1>, i64) -declare @llvm.vector.insert.v8i1.nxv8i1(, <8 x i1>, i64) - -declare @llvm.vector.insert.v2i16.nxv2i16(, <2 x i16>, i64) - -declare @llvm.vector.insert.v2i32.nxv8i32(, <2 x i32>, i64) -declare @llvm.vector.insert.v4i32.nxv8i32(, <4 x i32>, i64) -declare @llvm.vector.insert.v8i32.nxv8i32(, <8 x i32>, i64) - ; We emit insert_subvectors of fixed vectors at index 0 into undefs as a ; copy_to_regclass or insert_subreg, depending on the register classes of the ; vector types. Make sure that we use the correct type and not the shrunken @@ -991,7 +970,6 @@ declare @llvm.vector.insert.v8i32.nxv8i32(, ; ; t14: nxv2i32 = insert_subvector poison:nxv2i32, t4, Constant:i64<0> ; t15: v8i32 = extract_subvector t14, Constant:i64<0> -declare <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32>, i64) define <4 x i32> @insert_extract_v8i32_v2i32_0(<2 x i32> %v) { ; CHECK-LABEL: insert_extract_v8i32_v2i32_0: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll index 9df71cfc96cc7..7cb00d40e60c0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -1493,7 +1493,6 @@ define void @smin_vx_v16i8(ptr %x, i8 %y) { store <16 x i8> %d, ptr %x ret void } -declare <16 x i8> @llvm.smin.v16i8(<16 x i8>, <16 x i8>) define void @smin_vx_v8i16(ptr %x, i16 %y) { ; CHECK-LABEL: smin_vx_v8i16: @@ -1510,7 +1509,6 @@ define void @smin_vx_v8i16(ptr %x, i16 %y) { store <8 x i16> %d, ptr %x ret void } -declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>) define void @smin_vx_v6i16(ptr %x, i16 %y) { ; CHECK-LABEL: smin_vx_v6i16: @@ -1527,7 +1525,6 @@ define void @smin_vx_v6i16(ptr %x, i16 %y) { store <6 x i16> %d, ptr %x ret void } -declare <6 x i16> @llvm.smin.v6i16(<6 x i16>, <6 x i16>) define void @smin_vx_v4i32(ptr %x, i32 %y) { ; CHECK-LABEL: smin_vx_v4i32: @@ -1544,7 +1541,6 @@ define void @smin_vx_v4i32(ptr %x, i32 %y) { store <4 x i32> %d, ptr %x ret void } -declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) define void @smin_xv_v16i8(ptr %x, i8 %y) { ; CHECK-LABEL: smin_xv_v16i8: @@ -1710,7 +1706,6 @@ define void @smax_vx_v16i8(ptr %x, i8 %y) { store <16 x i8> %d, ptr %x ret void } -declare <16 x i8> @llvm.smax.v16i8(<16 x i8>, <16 x i8>) define void @smax_vx_v8i16(ptr %x, i16 %y) { ; CHECK-LABEL: smax_vx_v8i16: @@ -1727,7 +1722,6 @@ define void @smax_vx_v8i16(ptr %x, i16 %y) { store <8 x i16> %d, ptr %x ret void } -declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>) define void @smax_vx_v6i16(ptr %x, i16 %y) { ; CHECK-LABEL: smax_vx_v6i16: @@ -1744,7 +1738,6 @@ define void @smax_vx_v6i16(ptr %x, i16 %y) { store <6 x i16> %d, ptr %x ret void } -declare <6 x i16> @llvm.smax.v6i16(<6 x i16>, <6 x i16>) define void @smax_vx_v4i32(ptr %x, i32 %y) { ; CHECK-LABEL: smax_vx_v4i32: @@ -1761,7 +1754,6 @@ define void @smax_vx_v4i32(ptr %x, i32 %y) { store <4 x i32> %d, ptr %x ret void } -declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) define void @smax_xv_v16i8(ptr %x, i8 %y) { ; CHECK-LABEL: smax_xv_v16i8: @@ -1927,7 +1919,6 @@ define void @umin_vx_v16i8(ptr %x, i8 %y) { store <16 x i8> %d, ptr %x ret void } -declare <16 x i8> @llvm.umin.v16i8(<16 x i8>, <16 x i8>) define void @umin_vx_v8i16(ptr %x, i16 %y) { ; CHECK-LABEL: umin_vx_v8i16: @@ -1944,7 +1935,6 @@ define void @umin_vx_v8i16(ptr %x, i16 %y) { store <8 x i16> %d, ptr %x ret void } -declare <8 x i16> @llvm.umin.v8i16(<8 x i16>, <8 x i16>) define void @umin_vx_v6i16(ptr %x, i16 %y) { ; CHECK-LABEL: umin_vx_v6i16: @@ -1961,7 +1951,6 @@ define void @umin_vx_v6i16(ptr %x, i16 %y) { store <6 x i16> %d, ptr %x ret void } -declare <6 x i16> @llvm.umin.v6i16(<6 x i16>, <6 x i16>) define void @umin_vx_v4i32(ptr %x, i32 %y) { ; CHECK-LABEL: umin_vx_v4i32: @@ -1978,7 +1967,6 @@ define void @umin_vx_v4i32(ptr %x, i32 %y) { store <4 x i32> %d, ptr %x ret void } -declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>) define void @umin_xv_v16i8(ptr %x, i8 %y) { ; CHECK-LABEL: umin_xv_v16i8: @@ -2144,7 +2132,6 @@ define void @umax_vx_v16i8(ptr %x, i8 %y) { store <16 x i8> %d, ptr %x ret void } -declare <16 x i8> @llvm.umax.v16i8(<16 x i8>, <16 x i8>) define void @umax_vx_v8i16(ptr %x, i16 %y) { ; CHECK-LABEL: umax_vx_v8i16: @@ -2161,7 +2148,6 @@ define void @umax_vx_v8i16(ptr %x, i16 %y) { store <8 x i16> %d, ptr %x ret void } -declare <8 x i16> @llvm.umax.v8i16(<8 x i16>, <8 x i16>) define void @umax_vx_v6i16(ptr %x, i16 %y) { ; CHECK-LABEL: umax_vx_v6i16: @@ -2178,7 +2164,6 @@ define void @umax_vx_v6i16(ptr %x, i16 %y) { store <6 x i16> %d, ptr %x ret void } -declare <6 x i16> @llvm.umax.v6i16(<6 x i16>, <6 x i16>) define void @umax_vx_v4i32(ptr %x, i32 %y) { ; CHECK-LABEL: umax_vx_v4i32: @@ -2195,7 +2180,6 @@ define void @umax_vx_v4i32(ptr %x, i32 %y) { store <4 x i32> %d, ptr %x ret void } -declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>) define void @umax_xv_v16i8(ptr %x, i8 %y) { ; CHECK-LABEL: umax_xv_v16i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-inttoptr-ptrtoint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-inttoptr-ptrtoint.ll index 8b6270e86af36..0abad3bf1c56c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-inttoptr-ptrtoint.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-inttoptr-ptrtoint.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s -declare <4 x ptr> @llvm.vp.inttoptr.v4p0.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x ptr> @inttoptr_v4p0_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: inttoptr_v4p0_v4i32: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define <4 x ptr> @inttoptr_v4p0_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %e ret <4 x ptr> %v } -declare <4 x ptr> @llvm.vp.inttoptr.v4p0.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x ptr> @inttoptr_v4p0_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: inttoptr_v4p0_v4i64: ; CHECK: # %bb.0: @@ -24,8 +20,6 @@ define <4 x ptr> @inttoptr_v4p0_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %e ret <4 x ptr> %v } -declare <4 x i32> @llvm.vp.ptrtoint.v4i32.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x i32> @ptrtoint_v4i32_v4p0(<4 x ptr> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: ptrtoint_v4i32_v4p0: ; CHECK: # %bb.0: @@ -37,8 +31,6 @@ define <4 x i32> @ptrtoint_v4i32_v4p0(<4 x ptr> %va, <4 x i1> %m, i32 zeroext %e ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.ptrtoint.v4i64.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x i64> @ptrtoint_v4i64_v4p0(<4 x ptr> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: ptrtoint_v4i64_v4p0: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint-vp.ll index 466fe744a1376..1282a6f9f8c6d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint-vp.ll @@ -21,7 +21,6 @@ define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x, <1 x i1> %m, i32 zeroext %e %a = call <1 x i64> @llvm.vp.llrint.v1i64.v1f32(<1 x float> %x, <1 x i1> %m, i32 %evl) ret <1 x i64> %a } -declare <1 x i64> @llvm.vp.llrint.v1i64.v1f32(<1 x float>, <1 x i1>, i32) define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v2i64_v2f32: @@ -40,7 +39,6 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x, <2 x i1> %m, i32 zeroext %e %a = call <2 x i64> @llvm.vp.llrint.v2i64.v2f32(<2 x float> %x, <2 x i1> %m, i32 %evl) ret <2 x i64> %a } -declare <2 x i64> @llvm.vp.llrint.v2i64.v2f32(<2 x float>, <2 x i1>, i32) define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x, <3 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v3i64_v3f32: @@ -59,7 +57,6 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x, <3 x i1> %m, i32 zeroext %e %a = call <3 x i64> @llvm.vp.llrint.v3i64.v3f32(<3 x float> %x, <3 x i1> %m, i32 %evl) ret <3 x i64> %a } -declare <3 x i64> @llvm.vp.llrint.v3i64.v3f32(<3 x float>, <3 x i1>, i32) define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v4i64_v4f32: @@ -78,7 +75,6 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x, <4 x i1> %m, i32 zeroext %e %a = call <4 x i64> @llvm.vp.llrint.v4i64.v4f32(<4 x float> %x, <4 x i1> %m, i32 %evl) ret <4 x i64> %a } -declare <4 x i64> @llvm.vp.llrint.v4i64.v4f32(<4 x float>, <4 x i1>, i32) define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v8i64_v8f32: @@ -97,7 +93,6 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x, <8 x i1> %m, i32 zeroext %e %a = call <8 x i64> @llvm.vp.llrint.v8i64.v8f32(<8 x float> %x, <8 x i1> %m, i32 %evl) ret <8 x i64> %a } -declare <8 x i64> @llvm.vp.llrint.v8i64.v8f32(<8 x float>, <8 x i1>, i32) define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v16i64_v16f32: @@ -116,7 +111,6 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x, <16 x i1> %m, i32 zeroe %a = call <16 x i64> @llvm.vp.llrint.v16i64.v16f32(<16 x float> %x, <16 x i1> %m, i32 %evl) ret <16 x i64> %a } -declare <16 x i64> @llvm.vp.llrint.v16i64.v16f32(<16 x float>, <16 x i1>, i32) define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x, <1 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v1i64_v1f64: @@ -133,7 +127,6 @@ define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x, <1 x i1> %m, i32 zeroext % %a = call <1 x i64> @llvm.vp.llrint.v1i64.v1f64(<1 x double> %x, <1 x i1> %m, i32 %evl) ret <1 x i64> %a } -declare <1 x i64> @llvm.vp.llrint.v1i64.v1f64(<1 x double>, <1 x i1>, i32) define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v2i64_v2f64: @@ -150,7 +143,6 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x, <2 x i1> %m, i32 zeroext % %a = call <2 x i64> @llvm.vp.llrint.v2i64.v2f64(<2 x double> %x, <2 x i1> %m, i32 %evl) ret <2 x i64> %a } -declare <2 x i64> @llvm.vp.llrint.v2i64.v2f64(<2 x double>, <2 x i1>, i32) define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v4i64_v4f64: @@ -167,7 +159,6 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x, <4 x i1> %m, i32 zeroext % %a = call <4 x i64> @llvm.vp.llrint.v4i64.v4f64(<4 x double> %x, <4 x i1> %m, i32 %evl) ret <4 x i64> %a } -declare <4 x i64> @llvm.vp.llrint.v4i64.v4f64(<4 x double>, <4 x i1>, i32) define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v8i64_v8f64: @@ -184,4 +175,3 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x, <8 x i1> %m, i32 zeroext % %a = call <8 x i64> @llvm.vp.llrint.v8i64.v8f64(<8 x double> %x, <8 x i1> %m, i32 %evl) ret <8 x i64> %a } -declare <8 x i64> @llvm.vp.llrint.v8i64.v8f64(<8 x double>, <8 x i1>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll index b9a84ff9b07b9..d7f971baf34b8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll @@ -14,7 +14,6 @@ define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) { %a = call <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float> %x) ret <1 x i64> %a } -declare <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float>) define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) { ; CHECK-LABEL: llrint_v2i64_v2f32: @@ -26,7 +25,6 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) { %a = call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> %x) ret <2 x i64> %a } -declare <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float>) define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) { ; CHECK-LABEL: llrint_v3i64_v3f32: @@ -38,7 +36,6 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) { %a = call <3 x i64> @llvm.llrint.v3i64.v3f32(<3 x float> %x) ret <3 x i64> %a } -declare <3 x i64> @llvm.llrint.v3i64.v3f32(<3 x float>) define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) { ; CHECK-LABEL: llrint_v4i64_v4f32: @@ -50,7 +47,6 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) { %a = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> %x) ret <4 x i64> %a } -declare <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float>) define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) { ; CHECK-LABEL: llrint_v8i64_v8f32: @@ -62,7 +58,6 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) { %a = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> %x) ret <8 x i64> %a } -declare <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float>) define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) { ; CHECK-LABEL: llrint_v16i64_v16f32: @@ -74,7 +69,6 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) { %a = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> %x) ret <16 x i64> %a } -declare <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float>) define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) { ; CHECK-LABEL: llrint_v1i64_v1f64: @@ -85,7 +79,6 @@ define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) { %a = call <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double> %x) ret <1 x i64> %a } -declare <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double>) define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) { ; CHECK-LABEL: llrint_v2i64_v2f64: @@ -96,7 +89,6 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) { %a = call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> %x) ret <2 x i64> %a } -declare <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double>) define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) { ; CHECK-LABEL: llrint_v4i64_v4f64: @@ -107,7 +99,6 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) { %a = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> %x) ret <4 x i64> %a } -declare <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double>) define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) { ; CHECK-LABEL: llrint_v8i64_v8f64: @@ -118,7 +109,6 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) { %a = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> %x) ret <8 x i64> %a } -declare <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double>) define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) { ; CHECK-LABEL: llrint_v1i64_v1f16: @@ -131,7 +121,6 @@ define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) { %a = call <1 x i64> @llvm.llrint.v1i64.v1f16(<1 x half> %x) ret <1 x i64> %a } -declare <1 x i64> @llvm.llrint.v1i64.v1f16(<1 x half>) define <2 x i64> @llrint_v2i64_v2f16(<2 x half> %x) { ; CHECK-LABEL: llrint_v2i64_v2f16: @@ -144,7 +133,6 @@ define <2 x i64> @llrint_v2i64_v2f16(<2 x half> %x) { %a = call <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half> %x) ret <2 x i64> %a } -declare <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half>) define <3 x i64> @llrint_v3i64_v3f16(<3 x half> %x) { ; CHECK-LABEL: llrint_v3i64_v3f16: @@ -157,7 +145,6 @@ define <3 x i64> @llrint_v3i64_v3f16(<3 x half> %x) { %a = call <3 x i64> @llvm.llrint.v3i64.v3f16(<3 x half> %x) ret <3 x i64> %a } -declare <3 x i64> @llvm.llrint.v3i64.v3f16(<3 x half>) define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) { ; CHECK-LABEL: llrint_v4i64_v4f16: @@ -170,7 +157,6 @@ define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) { %a = call <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half> %x) ret <4 x i64> %a } -declare <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half>) define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) { ; CHECK-LABEL: llrint_v8i64_v8f16: @@ -183,7 +169,6 @@ define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) { %a = call <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half> %x) ret <8 x i64> %a } -declare <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half>) define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) { ; CHECK-LABEL: llrint_v16i64_v16f16: @@ -196,7 +181,6 @@ define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) { %a = call <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half> %x) ret <16 x i64> %a } -declare <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half>) define <1 x i64> @llrint_v1i64_v1bf16(<1 x bfloat> %x) { ; CHECK-LABEL: llrint_v1i64_v1bf16: @@ -209,7 +193,6 @@ define <1 x i64> @llrint_v1i64_v1bf16(<1 x bfloat> %x) { %a = call <1 x i64> @llvm.llrint.v1i64.v1bf16(<1 x bfloat> %x) ret <1 x i64> %a } -declare <1 x i64> @llvm.llrint.v1i64.v1bf16(<1 x bfloat>) define <2 x i64> @llrint_v2i64_v2bf16(<2 x bfloat> %x) { ; CHECK-LABEL: llrint_v2i64_v2bf16: @@ -222,7 +205,6 @@ define <2 x i64> @llrint_v2i64_v2bf16(<2 x bfloat> %x) { %a = call <2 x i64> @llvm.llrint.v2i64.v2bf16(<2 x bfloat> %x) ret <2 x i64> %a } -declare <2 x i64> @llvm.llrint.v2i64.v2bf16(<2 x bfloat>) define <3 x i64> @llrint_v3i64_v3bf16(<3 x bfloat> %x) { ; CHECK-LABEL: llrint_v3i64_v3bf16: @@ -235,7 +217,6 @@ define <3 x i64> @llrint_v3i64_v3bf16(<3 x bfloat> %x) { %a = call <3 x i64> @llvm.llrint.v3i64.v3bf16(<3 x bfloat> %x) ret <3 x i64> %a } -declare <3 x i64> @llvm.llrint.v3i64.v3bf16(<3 x bfloat>) define <4 x i64> @llrint_v4i64_v4bf16(<4 x bfloat> %x) { ; CHECK-LABEL: llrint_v4i64_v4bf16: @@ -248,7 +229,6 @@ define <4 x i64> @llrint_v4i64_v4bf16(<4 x bfloat> %x) { %a = call <4 x i64> @llvm.llrint.v4i64.v4bf16(<4 x bfloat> %x) ret <4 x i64> %a } -declare <4 x i64> @llvm.llrint.v4i64.v4bf16(<4 x bfloat>) define <8 x i64> @llrint_v8i64_v8bf16(<8 x bfloat> %x) { ; CHECK-LABEL: llrint_v8i64_v8bf16: @@ -261,7 +241,6 @@ define <8 x i64> @llrint_v8i64_v8bf16(<8 x bfloat> %x) { %a = call <8 x i64> @llvm.llrint.v8i64.v8bf16(<8 x bfloat> %x) ret <8 x i64> %a } -declare <8 x i64> @llvm.llrint.v8i64.v8bf16(<8 x bfloat>) define <16 x i64> @llrint_v16i64_v16bf16(<16 x bfloat> %x) { ; CHECK-LABEL: llrint_v16i64_v16bf16: @@ -274,4 +253,3 @@ define <16 x i64> @llrint_v16i64_v16bf16(<16 x bfloat> %x) { %a = call <16 x i64> @llvm.llrint.v16i64.v16bf16(<16 x bfloat> %x) ret <16 x i64> %a } -declare <16 x i64> @llvm.llrint.v16i64.v16bf16(<16 x bfloat>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll index 5751759ddd9cb..9de58469479ff 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll @@ -27,7 +27,6 @@ define <1 x i64> @llround_v1f16(<1 x half> %x) nounwind { %a = call <1 x i64> @llvm.llround.v1i64.v1f16(<1 x half> %x) ret <1 x i64> %a } -declare <1 x i64> @llvm.llround.v1i64.v1f16(<1 x half>) define <2 x i64> @llround_v2f16(<2 x half> %x) nounwind { ; RV32-LABEL: llround_v2f16: @@ -52,7 +51,6 @@ define <2 x i64> @llround_v2f16(<2 x half> %x) nounwind { %a = call <2 x i64> @llvm.llround.v2i64.v2f16(<2 x half> %x) ret <2 x i64> %a } -declare <2 x i64> @llvm.llround.v2i64.v2f16(<2 x half>) define <3 x i64> @llround_v3f16(<3 x half> %x) nounwind { ; RV32-LABEL: llround_v3f16: @@ -77,7 +75,6 @@ define <3 x i64> @llround_v3f16(<3 x half> %x) nounwind { %a = call <3 x i64> @llvm.llround.v3i64.v3f16(<3 x half> %x) ret <3 x i64> %a } -declare <3 x i64> @llvm.llround.v3i64.v3f16(<3 x half>) define <4 x i64> @llround_v4f16(<4 x half> %x) nounwind { ; RV32-LABEL: llround_v4f16: @@ -102,7 +99,6 @@ define <4 x i64> @llround_v4f16(<4 x half> %x) nounwind { %a = call <4 x i64> @llvm.llround.v4i64.v4f16(<4 x half> %x) ret <4 x i64> %a } -declare <4 x i64> @llvm.llround.v4i64.v4f16(<4 x half>) define <8 x i64> @llround_v8f16(<8 x half> %x) nounwind { ; RV32-LABEL: llround_v8f16: @@ -127,7 +123,6 @@ define <8 x i64> @llround_v8f16(<8 x half> %x) nounwind { %a = call <8 x i64> @llvm.llround.v8i64.v8f16(<8 x half> %x) ret <8 x i64> %a } -declare <8 x i64> @llvm.llround.v8i64.v8f16(<8 x half>) define <16 x i64> @llround_v16f16(<16 x half> %x) nounwind { ; RV32-LABEL: llround_v16f16: @@ -152,7 +147,6 @@ define <16 x i64> @llround_v16f16(<16 x half> %x) nounwind { %a = call <16 x i64> @llvm.llround.v16i64.v16f16(<16 x half> %x) ret <16 x i64> %a } -declare <16 x i64> @llvm.llround.v16i64.v16f16(<16 x half>) define <1 x i64> @llround_v1i64_v1f32(<1 x float> %x) nounwind { ; RV32-LABEL: llround_v1i64_v1f32: @@ -175,7 +169,6 @@ define <1 x i64> @llround_v1i64_v1f32(<1 x float> %x) nounwind { %a = call <1 x i64> @llvm.llround.v1i64.v1f32(<1 x float> %x) ret <1 x i64> %a } -declare <1 x i64> @llvm.llround.v1i64.v1f32(<1 x float>) define <2 x i64> @llround_v2i64_v2f32(<2 x float> %x) nounwind { ; RV32-LABEL: llround_v2i64_v2f32: @@ -198,7 +191,6 @@ define <2 x i64> @llround_v2i64_v2f32(<2 x float> %x) nounwind { %a = call <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float> %x) ret <2 x i64> %a } -declare <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float>) define <3 x i64> @llround_v3i64_v3f32(<3 x float> %x) nounwind { ; RV32-LABEL: llround_v3i64_v3f32: @@ -221,7 +213,6 @@ define <3 x i64> @llround_v3i64_v3f32(<3 x float> %x) nounwind { %a = call <3 x i64> @llvm.llround.v3i64.v3f32(<3 x float> %x) ret <3 x i64> %a } -declare <3 x i64> @llvm.llround.v3i64.v3f32(<3 x float>) define <4 x i64> @llround_v4i64_v4f32(<4 x float> %x) nounwind { ; RV32-LABEL: llround_v4i64_v4f32: @@ -244,7 +235,6 @@ define <4 x i64> @llround_v4i64_v4f32(<4 x float> %x) nounwind { %a = call <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float> %x) ret <4 x i64> %a } -declare <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float>) define <8 x i64> @llround_v8i64_v8f32(<8 x float> %x) nounwind { ; RV32-LABEL: llround_v8i64_v8f32: @@ -267,7 +257,6 @@ define <8 x i64> @llround_v8i64_v8f32(<8 x float> %x) nounwind { %a = call <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float> %x) ret <8 x i64> %a } -declare <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float>) define <16 x i64> @llround_v16i64_v16f32(<16 x float> %x) nounwind { ; RV32-LABEL: llround_v16i64_v16f32: @@ -290,7 +279,6 @@ define <16 x i64> @llround_v16i64_v16f32(<16 x float> %x) nounwind { %a = call <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float> %x) ret <16 x i64> %a } -declare <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float>) define <1 x i64> @llround_v1i64_v1f64(<1 x double> %x) nounwind { ; RV32-LABEL: llround_v1i64_v1f64: @@ -311,7 +299,6 @@ define <1 x i64> @llround_v1i64_v1f64(<1 x double> %x) nounwind { %a = call <1 x i64> @llvm.llround.v1i64.v1f64(<1 x double> %x) ret <1 x i64> %a } -declare <1 x i64> @llvm.llround.v1i64.v1f64(<1 x double>) define <2 x i64> @llround_v2i64_v2f64(<2 x double> %x) nounwind { ; RV32-LABEL: llround_v2i64_v2f64: @@ -332,7 +319,6 @@ define <2 x i64> @llround_v2i64_v2f64(<2 x double> %x) nounwind { %a = call <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double> %x) ret <2 x i64> %a } -declare <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double>) define <4 x i64> @llround_v4i64_v4f64(<4 x double> %x) nounwind { ; RV32-LABEL: llround_v4i64_v4f64: @@ -353,7 +339,6 @@ define <4 x i64> @llround_v4i64_v4f64(<4 x double> %x) nounwind { %a = call <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double> %x) ret <4 x i64> %a } -declare <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double>) define <8 x i64> @llround_v8i64_v8f64(<8 x double> %x) nounwind { ; RV32-LABEL: llround_v8i64_v8f64: @@ -374,4 +359,3 @@ define <8 x i64> @llround_v8i64_v8f64(<8 x double> %x) nounwind { %a = call <8 x i64> @llvm.llround.v8i64.v8f64(<8 x double> %x) ret <8 x i64> %a } -declare <8 x i64> @llvm.llround.v8i64.v8f64(<8 x double>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint-vp.ll index 5b5163c17a5c9..613e3e1618732 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint-vp.ll @@ -28,7 +28,6 @@ define <1 x iXLen> @lrint_v1f32(<1 x float> %x, <1 x i1> %m, i32 zeroext %evl) { %a = call <1 x iXLen> @llvm.vp.lrint.v1iXLen.v1f32(<1 x float> %x, <1 x i1> %m, i32 %evl) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.vp.lrint.v1iXLen.v1f32(<1 x float>, <1 x i1>, i32) define <2 x iXLen> @lrint_v2f32(<2 x float> %x, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v2f32: @@ -52,7 +51,6 @@ define <2 x iXLen> @lrint_v2f32(<2 x float> %x, <2 x i1> %m, i32 zeroext %evl) { %a = call <2 x iXLen> @llvm.vp.lrint.v2iXLen.v2f32(<2 x float> %x, <2 x i1> %m, i32 %evl) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.vp.lrint.v2iXLen.v2f32(<2 x float>, <2 x i1>, i32) define <3 x iXLen> @lrint_v3f32(<3 x float> %x, <3 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v3f32: @@ -76,7 +74,6 @@ define <3 x iXLen> @lrint_v3f32(<3 x float> %x, <3 x i1> %m, i32 zeroext %evl) { %a = call <3 x iXLen> @llvm.vp.lrint.v3iXLen.v3f32(<3 x float> %x, <3 x i1> %m, i32 %evl) ret <3 x iXLen> %a } -declare <3 x iXLen> @llvm.vp.lrint.v3iXLen.v3f32(<3 x float>, <3 x i1>, i32) define <4 x iXLen> @lrint_v4f32(<4 x float> %x, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v4f32: @@ -100,7 +97,6 @@ define <4 x iXLen> @lrint_v4f32(<4 x float> %x, <4 x i1> %m, i32 zeroext %evl) { %a = call <4 x iXLen> @llvm.vp.lrint.v4iXLen.v4f32(<4 x float> %x, <4 x i1> %m, i32 %evl) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.vp.lrint.v4iXLen.v4f32(<4 x float>, <4 x i1>, i32) define <8 x iXLen> @lrint_v8f32(<8 x float> %x, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v8f32: @@ -124,7 +120,6 @@ define <8 x iXLen> @lrint_v8f32(<8 x float> %x, <8 x i1> %m, i32 zeroext %evl) { %a = call <8 x iXLen> @llvm.vp.lrint.v8iXLen.v8f32(<8 x float> %x, <8 x i1> %m, i32 %evl) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.vp.lrint.v8iXLen.v8f32(<8 x float>, <8 x i1>, i32) define <16 x iXLen> @lrint_v16f32(<16 x float> %x, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v16f32: @@ -148,7 +143,6 @@ define <16 x iXLen> @lrint_v16f32(<16 x float> %x, <16 x i1> %m, i32 zeroext %ev %a = call <16 x iXLen> @llvm.vp.lrint.v16iXLen.v16f32(<16 x float> %x, <16 x i1> %m, i32 %evl) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.vp.lrint.v16iXLen.v16f32(<16 x float>, <16 x i1>, i32) define <1 x iXLen> @lrint_v1f64(<1 x double> %x, <1 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v1f64: @@ -173,7 +167,6 @@ define <1 x iXLen> @lrint_v1f64(<1 x double> %x, <1 x i1> %m, i32 zeroext %evl) %a = call <1 x iXLen> @llvm.vp.lrint.v1iXLen.v1f64(<1 x double> %x, <1 x i1> %m, i32 %evl) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.vp.lrint.v1iXLen.v1f64(<1 x double>, <1 x i1>, i32) define <2 x iXLen> @lrint_v2f64(<2 x double> %x, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v2f64: @@ -198,7 +191,6 @@ define <2 x iXLen> @lrint_v2f64(<2 x double> %x, <2 x i1> %m, i32 zeroext %evl) %a = call <2 x iXLen> @llvm.vp.lrint.v2iXLen.v2f64(<2 x double> %x, <2 x i1> %m, i32 %evl) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.vp.lrint.v2iXLen.v2f64(<2 x double>, <2 x i1>, i32) define <4 x iXLen> @lrint_v4f64(<4 x double> %x, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v4f64: @@ -223,7 +215,6 @@ define <4 x iXLen> @lrint_v4f64(<4 x double> %x, <4 x i1> %m, i32 zeroext %evl) %a = call <4 x iXLen> @llvm.vp.lrint.v4iXLen.v4f64(<4 x double> %x, <4 x i1> %m, i32 %evl) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.vp.lrint.v4iXLen.v4f64(<4 x double>, <4 x i1>, i32) define <8 x iXLen> @lrint_v8f64(<8 x double> %x, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v8f64: @@ -248,4 +239,3 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x, <8 x i1> %m, i32 zeroext %evl) %a = call <8 x iXLen> @llvm.vp.lrint.v8iXLen.v8f64(<8 x double> %x, <8 x i1> %m, i32 %evl) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.vp.lrint.v8iXLen.v8f64(<8 x double>, <8 x i1>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll index a52290072c540..330e9468f1ab6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll @@ -28,7 +28,6 @@ define <1 x iXLen> @lrint_v1f32(<1 x float> %x) { %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float>) define <2 x iXLen> @lrint_v2f32(<2 x float> %x) { ; RV32-LABEL: lrint_v2f32: @@ -52,7 +51,6 @@ define <2 x iXLen> @lrint_v2f32(<2 x float> %x) { %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float>) define <3 x iXLen> @lrint_v3f32(<3 x float> %x) { ; RV32-LABEL: lrint_v3f32: @@ -76,7 +74,6 @@ define <3 x iXLen> @lrint_v3f32(<3 x float> %x) { %a = call <3 x iXLen> @llvm.lrint.v3iXLen.v3f32(<3 x float> %x) ret <3 x iXLen> %a } -declare <3 x iXLen> @llvm.lrint.v3iXLen.v3f32(<3 x float>) define <4 x iXLen> @lrint_v4f32(<4 x float> %x) { ; RV32-LABEL: lrint_v4f32: @@ -100,7 +97,6 @@ define <4 x iXLen> @lrint_v4f32(<4 x float> %x) { %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float>) define <8 x iXLen> @lrint_v8f32(<8 x float> %x) { ; RV32-LABEL: lrint_v8f32: @@ -124,7 +120,6 @@ define <8 x iXLen> @lrint_v8f32(<8 x float> %x) { %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float>) define <16 x iXLen> @lrint_v16f32(<16 x float> %x) { ; RV32-LABEL: lrint_v16f32: @@ -148,7 +143,6 @@ define <16 x iXLen> @lrint_v16f32(<16 x float> %x) { %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float> %x) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float>) define <1 x iXLen> @lrint_v1f64(<1 x double> %x) { ; RV32-LABEL: lrint_v1f64: @@ -173,7 +167,6 @@ define <1 x iXLen> @lrint_v1f64(<1 x double> %x) { %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double>) define <2 x iXLen> @lrint_v2f64(<2 x double> %x) { ; RV32-LABEL: lrint_v2f64: @@ -198,7 +191,6 @@ define <2 x iXLen> @lrint_v2f64(<2 x double> %x) { %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double>) define <4 x iXLen> @lrint_v4f64(<4 x double> %x) { ; RV32-LABEL: lrint_v4f64: @@ -223,7 +215,6 @@ define <4 x iXLen> @lrint_v4f64(<4 x double> %x) { %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double>) define <8 x iXLen> @lrint_v8f64(<8 x double> %x) { ; RV32-LABEL: lrint_v8f64: @@ -248,7 +239,6 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x) { %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double>) define <1 x iXLen> @lrint_v1f16(<1 x half> %x) { ; RV32-LABEL: lrint_v1f16: @@ -277,7 +267,6 @@ define <1 x iXLen> @lrint_v1f16(<1 x half> %x) { %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half>) define <2 x iXLen> @lrint_v2f16(<2 x half> %x) { ; RV32-LABEL: lrint_v2f16: @@ -306,7 +295,6 @@ define <2 x iXLen> @lrint_v2f16(<2 x half> %x) { %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f16(<2 x half> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f16(<2 x half>) define <3 x iXLen> @lrint_v3f16(<3 x half> %x) { ; RV32-LABEL: lrint_v3f16: @@ -335,7 +323,6 @@ define <3 x iXLen> @lrint_v3f16(<3 x half> %x) { %a = call <3 x iXLen> @llvm.lrint.v3iXLen.v3f16(<3 x half> %x) ret <3 x iXLen> %a } -declare <3 x iXLen> @llvm.lrint.v3iXLen.v3f16(<3 x half>) define <4 x iXLen> @lrint_v4f16(<4 x half> %x) { ; RV32-LABEL: lrint_v4f16: @@ -364,7 +351,6 @@ define <4 x iXLen> @lrint_v4f16(<4 x half> %x) { %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f16(<4 x half> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f16(<4 x half>) define <8 x iXLen> @lrint_v8f16(<8 x half> %x) { ; RV32-LABEL: lrint_v8f16: @@ -393,7 +379,6 @@ define <8 x iXLen> @lrint_v8f16(<8 x half> %x) { %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half>) define <16 x iXLen> @lrint_v16f16(<16 x half> %x) { ; RV32-LABEL: lrint_v16f16: @@ -422,7 +407,6 @@ define <16 x iXLen> @lrint_v16f16(<16 x half> %x) { %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half> %x) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half>) define <1 x iXLen> @lrint_v1bf16(<1 x bfloat> %x) { ; RV32-LABEL: lrint_v1bf16: @@ -451,7 +435,6 @@ define <1 x iXLen> @lrint_v1bf16(<1 x bfloat> %x) { %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1bf16(<1 x bfloat> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lrint.v1iXLen.v1bf16(<1 x bfloat>) define <2 x iXLen> @lrint_v2bf16(<2 x bfloat> %x) { ; RV32-LABEL: lrint_v2bf16: @@ -480,7 +463,6 @@ define <2 x iXLen> @lrint_v2bf16(<2 x bfloat> %x) { %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2bf16(<2 x bfloat> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lrint.v2iXLen.v2bf16(<2 x bfloat>) define <3 x iXLen> @lrint_v3bf16(<3 x bfloat> %x) { ; RV32-LABEL: lrint_v3bf16: @@ -509,7 +491,6 @@ define <3 x iXLen> @lrint_v3bf16(<3 x bfloat> %x) { %a = call <3 x iXLen> @llvm.lrint.v3iXLen.v3bf16(<3 x bfloat> %x) ret <3 x iXLen> %a } -declare <3 x iXLen> @llvm.lrint.v3iXLen.v3bf16(<3 x bfloat>) define <4 x iXLen> @lrint_v4bf16(<4 x bfloat> %x) { ; RV32-LABEL: lrint_v4bf16: @@ -538,7 +519,6 @@ define <4 x iXLen> @lrint_v4bf16(<4 x bfloat> %x) { %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4bf16(<4 x bfloat> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lrint.v4iXLen.v4bf16(<4 x bfloat>) define <8 x iXLen> @lrint_v8bf16(<8 x bfloat> %x) { ; RV32-LABEL: lrint_v8bf16: @@ -567,7 +547,6 @@ define <8 x iXLen> @lrint_v8bf16(<8 x bfloat> %x) { %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8bf16(<8 x bfloat> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lrint.v8iXLen.v8bf16(<8 x bfloat>) define <16 x iXLen> @lrint_v16bf16(<16 x bfloat> %x) { ; RV32-LABEL: lrint_v16bf16: @@ -596,7 +575,6 @@ define <16 x iXLen> @lrint_v16bf16(<16 x bfloat> %x) { %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16bf16(<16 x bfloat> %x) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.lrint.v16iXLen.v16bf16(<16 x bfloat>) define <32 x iXLen> @lrint_v32bf16(<32 x bfloat> %x) { ; RV32-LABEL: lrint_v32bf16: @@ -633,4 +611,3 @@ define <32 x iXLen> @lrint_v32bf16(<32 x bfloat> %x) { %a = call <32 x iXLen> @llvm.lrint.v32iXLen.v32bf16(<32 x bfloat> %x) ret <32 x iXLen> %a } -declare <32 x iXLen> @llvm.lrint.v32iXLen.v32bf16(<32 x bfloat>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll index 64b3b7912ed32..d1ef02665016a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll @@ -39,7 +39,6 @@ define <1 x iXLen> @lround_v1f16(<1 x half> %x) nounwind { %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f16(<1 x half> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lround.v1iXLen.v1f16(<1 x half>) define <2 x iXLen> @lround_v2f16(<2 x half> %x) nounwind { ; RV32-LABEL: lround_v2f16: @@ -74,7 +73,6 @@ define <2 x iXLen> @lround_v2f16(<2 x half> %x) nounwind { %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f16(<2 x half> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lround.v2iXLen.v2f16(<2 x half>) define <3 x iXLen> @lround_v3f16(<3 x half> %x) nounwind { ; RV32-LABEL: lround_v3f16: @@ -109,7 +107,6 @@ define <3 x iXLen> @lround_v3f16(<3 x half> %x) nounwind { %a = call <3 x iXLen> @llvm.lround.v3iXLen.v3f16(<3 x half> %x) ret <3 x iXLen> %a } -declare <3 x iXLen> @llvm.lround.v3iXLen.v3f16(<3 x half>) define <4 x iXLen> @lround_v4f16(<4 x half> %x) nounwind { ; RV32-LABEL: lround_v4f16: @@ -144,7 +141,6 @@ define <4 x iXLen> @lround_v4f16(<4 x half> %x) nounwind { %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f16(<4 x half> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lround.v4iXLen.v4f16(<4 x half>) define <8 x iXLen> @lround_v8f16(<8 x half> %x) nounwind { ; RV32-LABEL: lround_v8f16: @@ -179,7 +175,6 @@ define <8 x iXLen> @lround_v8f16(<8 x half> %x) nounwind { %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f16(<8 x half> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lround.v8iXLen.v8f16(<8 x half>) define <16 x iXLen> @lround_v16f16(<16 x half> %x) nounwind { ; RV32-LABEL: lround_v16f16: @@ -214,7 +209,6 @@ define <16 x iXLen> @lround_v16f16(<16 x half> %x) nounwind { %a = call <16 x iXLen> @llvm.lround.v16iXLen.v16f16(<16 x half> %x) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.lround.v16iXLen.v16f16(<16 x half>) define <1 x iXLen> @lround_v1f32(<1 x float> %x) nounwind { ; RV32-LABEL: lround_v1f32: @@ -244,7 +238,6 @@ define <1 x iXLen> @lround_v1f32(<1 x float> %x) nounwind { %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f32(<1 x float> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lround.v1iXLen.v1f32(<1 x float>) define <2 x iXLen> @lround_v2f32(<2 x float> %x) nounwind { ; RV32-LABEL: lround_v2f32: @@ -274,7 +267,6 @@ define <2 x iXLen> @lround_v2f32(<2 x float> %x) nounwind { %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f32(<2 x float> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lround.v2iXLen.v2f32(<2 x float>) define <3 x iXLen> @lround_v3f32(<3 x float> %x) nounwind { ; RV32-LABEL: lround_v3f32: @@ -304,7 +296,6 @@ define <3 x iXLen> @lround_v3f32(<3 x float> %x) nounwind { %a = call <3 x iXLen> @llvm.lround.v3iXLen.v3f32(<3 x float> %x) ret <3 x iXLen> %a } -declare <3 x iXLen> @llvm.lround.v3iXLen.v3f32(<3 x float>) define <4 x iXLen> @lround_v4f32(<4 x float> %x) nounwind { ; RV32-LABEL: lround_v4f32: @@ -334,7 +325,6 @@ define <4 x iXLen> @lround_v4f32(<4 x float> %x) nounwind { %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f32(<4 x float> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lround.v4iXLen.v4f32(<4 x float>) define <8 x iXLen> @lround_v8f32(<8 x float> %x) nounwind { ; RV32-LABEL: lround_v8f32: @@ -364,7 +354,6 @@ define <8 x iXLen> @lround_v8f32(<8 x float> %x) nounwind { %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f32(<8 x float> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lround.v8iXLen.v8f32(<8 x float>) define <16 x iXLen> @lround_v16f32(<16 x float> %x) nounwind { ; RV32-LABEL: lround_v16f32: @@ -394,7 +383,6 @@ define <16 x iXLen> @lround_v16f32(<16 x float> %x) nounwind { %a = call <16 x iXLen> @llvm.lround.v16iXLen.v16f32(<16 x float> %x) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.lround.v16iXLen.v16f32(<16 x float>) define <1 x iXLen> @lround_v1f64(<1 x double> %x) nounwind { ; RV32-LABEL: lround_v1f64: @@ -425,7 +413,6 @@ define <1 x iXLen> @lround_v1f64(<1 x double> %x) nounwind { %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f64(<1 x double> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lround.v1iXLen.v1f64(<1 x double>) define <2 x iXLen> @lround_v2f64(<2 x double> %x) nounwind { ; RV32-LABEL: lround_v2f64: @@ -456,7 +443,6 @@ define <2 x iXLen> @lround_v2f64(<2 x double> %x) nounwind { %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f64(<2 x double> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lround.v2iXLen.v2f64(<2 x double>) define <4 x iXLen> @lround_v4f64(<4 x double> %x) nounwind { ; RV32-LABEL: lround_v4f64: @@ -487,7 +473,6 @@ define <4 x iXLen> @lround_v4f64(<4 x double> %x) nounwind { %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f64(<4 x double> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lround.v4iXLen.v4f64(<4 x double>) define <8 x iXLen> @lround_v8f64(<8 x double> %x) nounwind { ; RV32-LABEL: lround_v8f64: @@ -518,7 +503,6 @@ define <8 x iXLen> @lround_v8f64(<8 x double> %x) nounwind { %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f64(<8 x double> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lround.v8iXLen.v8f64(<8 x double>) define <32 x iXLen> @lround_v32bf16(<32 x bfloat> %x) { ; RV32-LABEL: lround_v32bf16: @@ -561,4 +545,3 @@ define <32 x iXLen> @lround_v32bf16(<32 x bfloat> %x) { %a = call <32 x iXLen> @llvm.lround.v32iXLen.v32bf16(<32 x bfloat> %x) ret <32 x iXLen> %a } -declare <32 x iXLen> @llvm.lround.v32iXLen.v32bf16(<32 x bfloat>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll index 38e78de3575b3..edefb8e1a8ae4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -declare <1 x i1> @llvm.vp.and.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32) - define <1 x i1> @and_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v1i1: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <1 x i1> @and_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %ev ret <1 x i1> %v } -declare <2 x i1> @llvm.vp.and.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @and_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v2i1: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define <2 x i1> @and_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %ev ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.and.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @and_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v4i1: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define <4 x i1> @and_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %ev ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.and.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @and_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v8i1: ; CHECK: # %bb.0: @@ -52,8 +44,6 @@ define <8 x i1> @and_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %ev ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.and.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @and_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v16i1: ; CHECK: # %bb.0: @@ -64,8 +54,6 @@ define <16 x i1> @and_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroex ret <16 x i1> %v } -declare <1 x i1> @llvm.vp.or.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32) - define <1 x i1> @or_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v1i1: ; CHECK: # %bb.0: @@ -76,8 +64,6 @@ define <1 x i1> @or_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl ret <1 x i1> %v } -declare <2 x i1> @llvm.vp.or.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @or_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v2i1: ; CHECK: # %bb.0: @@ -88,8 +74,6 @@ define <2 x i1> @or_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.or.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @or_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v4i1: ; CHECK: # %bb.0: @@ -100,8 +84,6 @@ define <4 x i1> @or_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.or.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @or_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v8i1: ; CHECK: # %bb.0: @@ -112,8 +94,6 @@ define <8 x i1> @or_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.or.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @or_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v16i1: ; CHECK: # %bb.0: @@ -124,8 +104,6 @@ define <16 x i1> @or_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext ret <16 x i1> %v } -declare <1 x i1> @llvm.vp.xor.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32) - define <1 x i1> @xor_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v1i1: ; CHECK: # %bb.0: @@ -136,8 +114,6 @@ define <1 x i1> @xor_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %ev ret <1 x i1> %v } -declare <2 x i1> @llvm.vp.xor.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @xor_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v2i1: ; CHECK: # %bb.0: @@ -148,8 +124,6 @@ define <2 x i1> @xor_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %ev ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.xor.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @xor_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v4i1: ; CHECK: # %bb.0: @@ -160,8 +134,6 @@ define <4 x i1> @xor_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %ev ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.xor.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @xor_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v8i1: ; CHECK: # %bb.0: @@ -172,8 +144,6 @@ define <8 x i1> @xor_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %ev ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.xor.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @xor_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v16i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll index 7e6f2c76e5881..f3cea49ce7946 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -17,8 +17,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+zve32f,+zvl128b -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVE32F,RV64ZVE32F-ZVFHMIN -declare <1 x i8> @llvm.masked.gather.v1i8.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i8>) - define <1 x i8> @mgather_v1i8(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i8> %passthru) { ; RV32V-LABEL: mgather_v1i8: ; RV32V: # %bb.0: @@ -55,8 +53,6 @@ define <1 x i8> @mgather_v1i8(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i8> %passthru) ret <1 x i8> %v } -declare <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i8>) - define <2 x i8> @mgather_v2i8(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i8> %passthru) { ; RV32V-LABEL: mgather_v2i8: ; RV32V: # %bb.0: @@ -449,8 +445,6 @@ define <2 x i64> @mgather_v2i8_zextload_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, <2 x ret <2 x i64> %ev } -declare <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i8>) - define <4 x i8> @mgather_v4i8(<4 x ptr> %ptrs, <4 x i1> %m, <4 x i8> %passthru) { ; RV32-LABEL: mgather_v4i8: ; RV32: # %bb.0: @@ -573,8 +567,6 @@ define <4 x i8> @mgather_falsemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) { ret <4 x i8> %v } -declare <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i8>) - define <8 x i8> @mgather_v8i8(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i8> %passthru) { ; RV32-LABEL: mgather_v8i8: ; RV32: # %bb.0: @@ -810,8 +802,6 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8 ret <8 x i8> %v } -declare <1 x i16> @llvm.masked.gather.v1i16.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i16>) - define <1 x i16> @mgather_v1i16(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i16> %passthru) { ; RV32V-LABEL: mgather_v1i16: ; RV32V: # %bb.0: @@ -848,8 +838,6 @@ define <1 x i16> @mgather_v1i16(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i16> %passthr ret <1 x i16> %v } -declare <2 x i16> @llvm.masked.gather.v2i16.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i16>) - define <2 x i16> @mgather_v2i16(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %passthru) { ; RV32V-LABEL: mgather_v2i16: ; RV32V: # %bb.0: @@ -1138,8 +1126,6 @@ define <2 x i64> @mgather_v2i16_zextload_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, <2 ret <2 x i64> %ev } -declare <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i16>) - define <4 x i16> @mgather_v4i16(<4 x ptr> %ptrs, <4 x i1> %m, <4 x i16> %passthru) { ; RV32-LABEL: mgather_v4i16: ; RV32: # %bb.0: @@ -1262,8 +1248,6 @@ define <4 x i16> @mgather_falsemask_v4i16(<4 x ptr> %ptrs, <4 x i16> %passthru) ret <4 x i16> %v } -declare <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i16>) - define <8 x i16> @mgather_v8i16(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i16> %passthru) { ; RV32-LABEL: mgather_v8i16: ; RV32: # %bb.0: @@ -1938,8 +1922,6 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m, ret <8 x i16> %v } -declare <1 x i32> @llvm.masked.gather.v1i32.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i32>) - define <1 x i32> @mgather_v1i32(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i32> %passthru) { ; RV32V-LABEL: mgather_v1i32: ; RV32V: # %bb.0: @@ -1976,8 +1958,6 @@ define <1 x i32> @mgather_v1i32(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i32> %passthr ret <1 x i32> %v } -declare <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i32>) - define <2 x i32> @mgather_v2i32(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i32> %passthru) { ; RV32V-LABEL: mgather_v2i32: ; RV32V: # %bb.0: @@ -2154,8 +2134,6 @@ define <2 x i64> @mgather_v2i32_zextload_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, <2 ret <2 x i64> %ev } -declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i32>) - define <4 x i32> @mgather_v4i32(<4 x ptr> %ptrs, <4 x i1> %m, <4 x i32> %passthru) { ; RV32-LABEL: mgather_v4i32: ; RV32: # %bb.0: @@ -2277,8 +2255,6 @@ define <4 x i32> @mgather_falsemask_v4i32(<4 x ptr> %ptrs, <4 x i32> %passthru) ret <4 x i32> %v } -declare <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i32>) - define <8 x i32> @mgather_v8i32(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i32> %passthru) { ; RV32-LABEL: mgather_v8i32: ; RV32: # %bb.0: @@ -3391,8 +3367,6 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m, ret <8 x i32> %v } -declare <1 x i64> @llvm.masked.gather.v1i64.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i64>) - define <1 x i64> @mgather_v1i64(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i64> %passthru) { ; RV32V-LABEL: mgather_v1i64: ; RV32V: # %bb.0: @@ -3435,8 +3409,6 @@ define <1 x i64> @mgather_v1i64(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i64> %passthr ret <1 x i64> %v } -declare <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i64>) - define <2 x i64> @mgather_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %passthru) { ; RV32V-LABEL: mgather_v2i64: ; RV32V: # %bb.0: @@ -3508,8 +3480,6 @@ define <2 x i64> @mgather_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %passthr ret <2 x i64> %v } -declare <4 x i64> @llvm.masked.gather.v4i64.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i64>) - define <4 x i64> @mgather_v4i64(<4 x ptr> %ptrs, <4 x i1> %m, <4 x i64> %passthru) { ; RV32V-LABEL: mgather_v4i64: ; RV32V: # %bb.0: @@ -3748,8 +3718,6 @@ define <4 x i64> @mgather_falsemask_v4i64(<4 x ptr> %ptrs, <4 x i64> %passthru) ret <4 x i64> %v } -declare <8 x i64> @llvm.masked.gather.v8i64.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i64>) - define <8 x i64> @mgather_v8i64(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i64> %passthru) { ; RV32V-LABEL: mgather_v8i64: ; RV32V: # %bb.0: @@ -6827,8 +6795,6 @@ define <8 x i64> @mgather_baseidx_v8i64(ptr %base, <8 x i64> %idxs, <8 x i1> %m, ret <8 x i64> %v } -declare <1 x bfloat> @llvm.masked.gather.v1bf16.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x bfloat>) - define <1 x bfloat> @mgather_v1bf16(<1 x ptr> %ptrs, <1 x i1> %m, <1 x bfloat> %passthru) { ; RV32V-LABEL: mgather_v1bf16: ; RV32V: # %bb.0: @@ -6865,8 +6831,6 @@ define <1 x bfloat> @mgather_v1bf16(<1 x ptr> %ptrs, <1 x i1> %m, <1 x bfloat> % ret <1 x bfloat> %v } -declare <2 x bfloat> @llvm.masked.gather.v2bf16.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x bfloat>) - define <2 x bfloat> @mgather_v2bf16(<2 x ptr> %ptrs, <2 x i1> %m, <2 x bfloat> %passthru) { ; RV32V-LABEL: mgather_v2bf16: ; RV32V: # %bb.0: @@ -6917,8 +6881,6 @@ define <2 x bfloat> @mgather_v2bf16(<2 x ptr> %ptrs, <2 x i1> %m, <2 x bfloat> % ret <2 x bfloat> %v } -declare <4 x bfloat> @llvm.masked.gather.v4bf16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x bfloat>) - define <4 x bfloat> @mgather_v4bf16(<4 x ptr> %ptrs, <4 x i1> %m, <4 x bfloat> %passthru) { ; RV32-LABEL: mgather_v4bf16: ; RV32: # %bb.0: @@ -7041,8 +7003,6 @@ define <4 x bfloat> @mgather_falsemask_v4bf16(<4 x ptr> %ptrs, <4 x bfloat> %pas ret <4 x bfloat> %v } -declare <8 x bfloat> @llvm.masked.gather.v8bf16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x bfloat>) - define <8 x bfloat> @mgather_v8bf16(<8 x ptr> %ptrs, <8 x i1> %m, <8 x bfloat> %passthru) { ; RV32-LABEL: mgather_v8bf16: ; RV32: # %bb.0: @@ -7717,8 +7677,6 @@ define <8 x bfloat> @mgather_baseidx_v8bf16(ptr %base, <8 x i16> %idxs, <8 x i1> ret <8 x bfloat> %v } -declare <1 x half> @llvm.masked.gather.v1f16.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x half>) - define <1 x half> @mgather_v1f16(<1 x ptr> %ptrs, <1 x i1> %m, <1 x half> %passthru) { ; RV32V-LABEL: mgather_v1f16: ; RV32V: # %bb.0: @@ -7755,8 +7713,6 @@ define <1 x half> @mgather_v1f16(<1 x ptr> %ptrs, <1 x i1> %m, <1 x half> %passt ret <1 x half> %v } -declare <2 x half> @llvm.masked.gather.v2f16.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x half>) - define <2 x half> @mgather_v2f16(<2 x ptr> %ptrs, <2 x i1> %m, <2 x half> %passthru) { ; RV32V-LABEL: mgather_v2f16: ; RV32V: # %bb.0: @@ -7832,8 +7788,6 @@ define <2 x half> @mgather_v2f16(<2 x ptr> %ptrs, <2 x i1> %m, <2 x half> %passt ret <2 x half> %v } -declare <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x half>) - define <4 x half> @mgather_v4f16(<4 x ptr> %ptrs, <4 x i1> %m, <4 x half> %passthru) { ; RV32-LABEL: mgather_v4f16: ; RV32: # %bb.0: @@ -8022,8 +7976,6 @@ define <4 x half> @mgather_falsemask_v4f16(<4 x ptr> %ptrs, <4 x half> %passthru ret <4 x half> %v } -declare <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x half>) - define <8 x half> @mgather_v8f16(<8 x ptr> %ptrs, <8 x i1> %m, <8 x half> %passthru) { ; RV32-LABEL: mgather_v8f16: ; RV32: # %bb.0: @@ -9256,8 +9208,6 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m ret <8 x half> %v } -declare <1 x float> @llvm.masked.gather.v1f32.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x float>) - define <1 x float> @mgather_v1f32(<1 x ptr> %ptrs, <1 x i1> %m, <1 x float> %passthru) { ; RV32V-LABEL: mgather_v1f32: ; RV32V: # %bb.0: @@ -9294,8 +9244,6 @@ define <1 x float> @mgather_v1f32(<1 x ptr> %ptrs, <1 x i1> %m, <1 x float> %pas ret <1 x float> %v } -declare <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x float>) - define <2 x float> @mgather_v2f32(<2 x ptr> %ptrs, <2 x i1> %m, <2 x float> %passthru) { ; RV32V-LABEL: mgather_v2f32: ; RV32V: # %bb.0: @@ -9346,8 +9294,6 @@ define <2 x float> @mgather_v2f32(<2 x ptr> %ptrs, <2 x i1> %m, <2 x float> %pas ret <2 x float> %v } -declare <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x float>) - define <4 x float> @mgather_v4f32(<4 x ptr> %ptrs, <4 x i1> %m, <4 x float> %passthru) { ; RV32-LABEL: mgather_v4f32: ; RV32: # %bb.0: @@ -9469,8 +9415,6 @@ define <4 x float> @mgather_falsemask_v4f32(<4 x ptr> %ptrs, <4 x float> %passth ret <4 x float> %v } -declare <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x float>) - define <8 x float> @mgather_v8f32(<8 x ptr> %ptrs, <8 x i1> %m, <8 x float> %passthru) { ; RV32-LABEL: mgather_v8f32: ; RV32: # %bb.0: @@ -10583,8 +10527,6 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> % ret <8 x float> %v } -declare <1 x double> @llvm.masked.gather.v1f64.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x double>) - define <1 x double> @mgather_v1f64(<1 x ptr> %ptrs, <1 x i1> %m, <1 x double> %passthru) { ; RV32V-LABEL: mgather_v1f64: ; RV32V: # %bb.0: @@ -10625,8 +10567,6 @@ define <1 x double> @mgather_v1f64(<1 x ptr> %ptrs, <1 x i1> %m, <1 x double> %p ret <1 x double> %v } -declare <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x double>) - define <2 x double> @mgather_v2f64(<2 x ptr> %ptrs, <2 x i1> %m, <2 x double> %passthru) { ; RV32V-LABEL: mgather_v2f64: ; RV32V: # %bb.0: @@ -10688,8 +10628,6 @@ define <2 x double> @mgather_v2f64(<2 x ptr> %ptrs, <2 x i1> %m, <2 x double> %p ret <2 x double> %v } -declare <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x double>) - define <4 x double> @mgather_v4f64(<4 x ptr> %ptrs, <4 x i1> %m, <4 x double> %passthru) { ; RV32V-LABEL: mgather_v4f64: ; RV32V: # %bb.0: @@ -10882,8 +10820,6 @@ define <4 x double> @mgather_falsemask_v4f64(<4 x ptr> %ptrs, <4 x double> %pass ret <4 x double> %v } -declare <8 x double> @llvm.masked.gather.v8f64.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x double>) - define <8 x double> @mgather_v8f64(<8 x ptr> %ptrs, <8 x i1> %m, <8 x double> %passthru) { ; RV32V-LABEL: mgather_v8f64: ; RV32V: # %bb.0: @@ -13247,8 +13183,6 @@ define <8 x double> @mgather_baseidx_v8f64(ptr %base, <8 x i64> %idxs, <8 x i1> ret <8 x double> %v } -declare <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i8>) - define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m, <16 x i8> %passthru) { ; RV32-LABEL: mgather_baseidx_v16i8: ; RV32: # %bb.0: @@ -13470,8 +13404,6 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m ret <16 x i8> %v } -declare <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr>, i32, <32 x i1>, <32 x i8>) - define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m, <32 x i8> %passthru) { ; RV32-LABEL: mgather_baseidx_v32i8: ; RV32: # %bb.0: @@ -13904,7 +13836,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m ret <32 x i8> %v } - define <4 x i32> @mgather_broadcast_load_unmasked(ptr %base) { ; CHECK-LABEL: mgather_broadcast_load_unmasked: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll index e86fae6d501e5..c5e874a6f8f91 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll @@ -17,8 +17,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+zve32f,+zvl128b -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVE32F,RV64ZVE32F-ZVFHMIN -declare void @llvm.masked.scatter.v1i8.v1p0(<1 x i8>, <1 x ptr>, i32, <1 x i1>) - define void @mscatter_v1i8(<1 x i8> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1i8: ; RV32V: # %bb.0: @@ -52,8 +50,6 @@ define void @mscatter_v1i8(<1 x i8> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v2i8.v2p0(<2 x i8>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2i8(<2 x i8> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i8: ; RV32V: # %bb.0: @@ -267,8 +263,6 @@ define void @mscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x ptr> %ptrs, <2 ret void } -declare void @llvm.masked.scatter.v4i8.v4p0(<4 x i8>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4i8(<4 x i8> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4i8: ; RV32: # %bb.0: @@ -369,8 +363,6 @@ define void @mscatter_falsemask_v4i8(<4 x i8> %val, <4 x ptr> %ptrs) { ret void } -declare void @llvm.masked.scatter.v8i8.v8p0(<8 x i8>, <8 x ptr>, i32, <8 x i1>) - define void @mscatter_v8i8(<8 x i8> %val, <8 x ptr> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8i8: ; RV32: # %bb.0: @@ -586,8 +578,6 @@ define void @mscatter_baseidx_v8i8(<8 x i8> %val, ptr %base, <8 x i8> %idxs, <8 ret void } -declare void @llvm.masked.scatter.v1i16.v1p0(<1 x i16>, <1 x ptr>, i32, <1 x i1>) - define void @mscatter_v1i16(<1 x i16> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1i16: ; RV32V: # %bb.0: @@ -621,8 +611,6 @@ define void @mscatter_v1i16(<1 x i16> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v2i16.v2p0(<2 x i16>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2i16(<2 x i16> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i16: ; RV32V: # %bb.0: @@ -778,8 +766,6 @@ define void @mscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x ptr> %ptrs, <2 ret void } -declare void @llvm.masked.scatter.v4i16.v4p0(<4 x i16>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4i16(<4 x i16> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4i16: ; RV32: # %bb.0: @@ -880,8 +866,6 @@ define void @mscatter_falsemask_v4i16(<4 x i16> %val, <4 x ptr> %ptrs) { ret void } -declare void @llvm.masked.scatter.v8i16.v8p0(<8 x i16>, <8 x ptr>, i32, <8 x i1>) - define void @mscatter_v8i16(<8 x i16> %val, <8 x ptr> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8i16: ; RV32: # %bb.0: @@ -1491,8 +1475,6 @@ define void @mscatter_baseidx_v8i16(<8 x i16> %val, ptr %base, <8 x i16> %idxs, ret void } -declare void @llvm.masked.scatter.v1i32.v1p0(<1 x i32>, <1 x ptr>, i32, <1 x i1>) - define void @mscatter_v1i32(<1 x i32> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1i32: ; RV32V: # %bb.0: @@ -1526,8 +1508,6 @@ define void @mscatter_v1i32(<1 x i32> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v2i32.v2p0(<2 x i32>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2i32(<2 x i32> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i32: ; RV32V: # %bb.0: @@ -1627,8 +1607,6 @@ define void @mscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x ptr> %ptrs, <2 ret void } -declare void @llvm.masked.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4i32(<4 x i32> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4i32: ; RV32: # %bb.0: @@ -1729,8 +1707,6 @@ define void @mscatter_falsemask_v4i32(<4 x i32> %val, <4 x ptr> %ptrs) { ret void } -declare void @llvm.masked.scatter.v8i32.v8p0(<8 x i32>, <8 x ptr>, i32, <8 x i1>) - define void @mscatter_v8i32(<8 x i32> %val, <8 x ptr> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8i32: ; RV32: # %bb.0: @@ -2765,8 +2741,6 @@ define void @mscatter_baseidx_v8i32(<8 x i32> %val, ptr %base, <8 x i32> %idxs, ret void } -declare void @llvm.masked.scatter.v1i64.v1p0(<1 x i64>, <1 x ptr>, i32, <1 x i1>) - define void @mscatter_v1i64(<1 x i64> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1i64: ; RV32V: # %bb.0: @@ -2806,8 +2780,6 @@ define void @mscatter_v1i64(<1 x i64> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v2i64.v2p0(<2 x i64>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2i64(<2 x i64> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i64: ; RV32V: # %bb.0: @@ -2873,8 +2845,6 @@ define void @mscatter_v2i64(<2 x i64> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v4i64.v4p0(<4 x i64>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4i64(<4 x i64> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32V-LABEL: mscatter_v4i64: ; RV32V: # %bb.0: @@ -3056,8 +3026,6 @@ define void @mscatter_falsemask_v4i64(<4 x i64> %val, <4 x ptr> %ptrs) { ret void } -declare void @llvm.masked.scatter.v8i64.v8p0(<8 x i64>, <8 x ptr>, i32, <8 x i1>) - define void @mscatter_v8i64(<8 x i64> %val, <8 x ptr> %ptrs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_v8i64: ; RV32V: # %bb.0: @@ -5868,8 +5836,6 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs, ret void } -declare void @llvm.masked.scatter.v1bf16.v1p0(<1 x bfloat>, <1 x ptr>, i32, <1 x i1>) - define void @mscatter_v1bf16(<1 x bfloat> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1bf16: ; RV32V: # %bb.0: @@ -5905,8 +5871,6 @@ define void @mscatter_v1bf16(<1 x bfloat> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v2bf16.v2p0(<2 x bfloat>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2bf16(<2 x bfloat> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2bf16: ; RV32V: # %bb.0: @@ -5955,8 +5919,6 @@ define void @mscatter_v2bf16(<2 x bfloat> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v4bf16.v4p0(<4 x bfloat>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4bf16(<4 x bfloat> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4bf16: ; RV32: # %bb.0: @@ -6073,8 +6035,6 @@ define void @mscatter_falsemask_v4bf16(<4 x bfloat> %val, <4 x ptr> %ptrs) { ret void } -declare void @llvm.masked.scatter.v8bf16.v8p0(<8 x bfloat>, <8 x ptr>, i32, <8 x i1>) - define void @mscatter_v8bf16(<8 x bfloat> %val, <8 x ptr> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8bf16: ; RV32: # %bb.0: @@ -6766,8 +6726,6 @@ define void @mscatter_baseidx_v8bf16(<8 x bfloat> %val, ptr %base, <8 x i16> %id ret void } -declare void @llvm.masked.scatter.v1f16.v1p0(<1 x half>, <1 x ptr>, i32, <1 x i1>) - define void @mscatter_v1f16(<1 x half> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1f16: ; RV32V: # %bb.0: @@ -6814,8 +6772,6 @@ define void @mscatter_v1f16(<1 x half> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v2f16.v2p0(<2 x half>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2f16(<2 x half> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2f16: ; RV32V: # %bb.0: @@ -6886,8 +6842,6 @@ define void @mscatter_v2f16(<2 x half> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v4f16.v4p0(<4 x half>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4f16(<4 x half> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4f16: ; RV32: # %bb.0: @@ -7064,8 +7018,6 @@ define void @mscatter_falsemask_v4f16(<4 x half> %val, <4 x ptr> %ptrs) { ret void } -declare void @llvm.masked.scatter.v8f16.v8p0(<8 x half>, <8 x ptr>, i32, <8 x i1>) - define void @mscatter_v8f16(<8 x half> %val, <8 x ptr> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8f16: ; RV32: # %bb.0: @@ -8260,8 +8212,6 @@ define void @mscatter_baseidx_v8f16(<8 x half> %val, ptr %base, <8 x i16> %idxs, ret void } -declare void @llvm.masked.scatter.v1f32.v1p0(<1 x float>, <1 x ptr>, i32, <1 x i1>) - define void @mscatter_v1f32(<1 x float> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1f32: ; RV32V: # %bb.0: @@ -8295,8 +8245,6 @@ define void @mscatter_v1f32(<1 x float> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v2f32.v2p0(<2 x float>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2f32(<2 x float> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2f32: ; RV32V: # %bb.0: @@ -8341,8 +8289,6 @@ define void @mscatter_v2f32(<2 x float> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v4f32.v4p0(<4 x float>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4f32(<4 x float> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4f32: ; RV32: # %bb.0: @@ -8443,8 +8389,6 @@ define void @mscatter_falsemask_v4f32(<4 x float> %val, <4 x ptr> %ptrs) { ret void } -declare void @llvm.masked.scatter.v8f32.v8p0(<8 x float>, <8 x ptr>, i32, <8 x i1>) - define void @mscatter_v8f32(<8 x float> %val, <8 x ptr> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8f32: ; RV32: # %bb.0: @@ -9479,8 +9423,6 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs ret void } -declare void @llvm.masked.scatter.v1f64.v1p0(<1 x double>, <1 x ptr>, i32, <1 x i1>) - define void @mscatter_v1f64(<1 x double> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1f64: ; RV32V: # %bb.0: @@ -9519,8 +9461,6 @@ define void @mscatter_v1f64(<1 x double> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v2f64.v2p0(<2 x double>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2f64(<2 x double> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2f64: ; RV32V: # %bb.0: @@ -9580,8 +9520,6 @@ define void @mscatter_v2f64(<2 x double> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v4f64.v4p0(<4 x double>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4f64(<4 x double> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32V-LABEL: mscatter_v4f64: ; RV32V: # %bb.0: @@ -9731,8 +9669,6 @@ define void @mscatter_falsemask_v4f64(<4 x double> %val, <4 x ptr> %ptrs) { ret void } -declare void @llvm.masked.scatter.v8f64.v8p0(<8 x double>, <8 x ptr>, i32, <8 x i1>) - define void @mscatter_v8f64(<8 x double> %val, <8 x ptr> %ptrs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_v8f64: ; RV32V: # %bb.0: @@ -11925,8 +11861,6 @@ define void @mscatter_baseidx_v8f64(<8 x double> %val, ptr %base, <8 x i64> %idx ret void } -declare void @llvm.masked.scatter.v16i8.v16p0(<16 x i8>, <16 x ptr>, i32, <16 x i1>) - define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs, <16 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v16i8: ; RV32: # %bb.0: @@ -12128,8 +12062,6 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs, ret void } -declare void @llvm.masked.scatter.v32i8.v32p0(<32 x i8>, <32 x ptr>, i32, <32 x i1>) - define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs, <32 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v32i8: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll index 08da7d6bc50f7..57c94830fc606 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s -declare <2 x half> @llvm.vp.nearbyint.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vp_nearbyint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v2f16: ; CHECK: # %bb.0: @@ -48,8 +46,6 @@ define <2 x half> @vp_nearbyint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) ret <2 x half> %v } -declare <4 x half> @llvm.vp.nearbyint.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vp_nearbyint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v4f16: ; CHECK: # %bb.0: @@ -92,8 +88,6 @@ define <4 x half> @vp_nearbyint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) ret <4 x half> %v } -declare <8 x half> @llvm.vp.nearbyint.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vp_nearbyint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v8f16: ; CHECK: # %bb.0: @@ -136,8 +130,6 @@ define <8 x half> @vp_nearbyint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) ret <8 x half> %v } -declare <16 x half> @llvm.vp.nearbyint.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vp_nearbyint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v16f16: ; CHECK: # %bb.0: @@ -182,8 +174,6 @@ define <16 x half> @vp_nearbyint_v16f16_unmasked(<16 x half> %va, i32 zeroext %e ret <16 x half> %v } -declare <2 x float> @llvm.vp.nearbyint.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vp_nearbyint_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v2f32: ; CHECK: # %bb.0: @@ -224,8 +214,6 @@ define <2 x float> @vp_nearbyint_v2f32_unmasked(<2 x float> %va, i32 zeroext %ev ret <2 x float> %v } -declare <4 x float> @llvm.vp.nearbyint.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vp_nearbyint_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v4f32: ; CHECK: # %bb.0: @@ -266,8 +254,6 @@ define <4 x float> @vp_nearbyint_v4f32_unmasked(<4 x float> %va, i32 zeroext %ev ret <4 x float> %v } -declare <8 x float> @llvm.vp.nearbyint.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vp_nearbyint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v8f32: ; CHECK: # %bb.0: @@ -310,8 +296,6 @@ define <8 x float> @vp_nearbyint_v8f32_unmasked(<8 x float> %va, i32 zeroext %ev ret <8 x float> %v } -declare <16 x float> @llvm.vp.nearbyint.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vp_nearbyint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v16f32: ; CHECK: # %bb.0: @@ -354,8 +338,6 @@ define <16 x float> @vp_nearbyint_v16f32_unmasked(<16 x float> %va, i32 zeroext ret <16 x float> %v } -declare <2 x double> @llvm.vp.nearbyint.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vp_nearbyint_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_nearbyint_v2f64: ; RV32: # %bb.0: @@ -430,8 +412,6 @@ define <2 x double> @vp_nearbyint_v2f64_unmasked(<2 x double> %va, i32 zeroext % ret <2 x double> %v } -declare <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vp_nearbyint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_nearbyint_v4f64: ; RV32: # %bb.0: @@ -510,8 +490,6 @@ define <4 x double> @vp_nearbyint_v4f64_unmasked(<4 x double> %va, i32 zeroext % ret <4 x double> %v } -declare <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vp_nearbyint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_nearbyint_v8f64: ; RV32: # %bb.0: @@ -590,8 +568,6 @@ define <8 x double> @vp_nearbyint_v8f64_unmasked(<8 x double> %va, i32 zeroext % ret <8 x double> %v } -declare <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vp_nearbyint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_nearbyint_v15f64: ; RV32: # %bb.0: @@ -670,8 +646,6 @@ define <15 x double> @vp_nearbyint_v15f64_unmasked(<15 x double> %va, i32 zeroex ret <15 x double> %v } -declare <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vp_nearbyint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_nearbyint_v16f64: ; RV32: # %bb.0: @@ -750,8 +724,6 @@ define <16 x double> @vp_nearbyint_v16f64_unmasked(<16 x double> %va, i32 zeroex ret <16 x double> %v } -declare <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_nearbyint_v32f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll index 016be04ffc9b9..287752759a06b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll @@ -1,13 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvl256b | FileCheck %s -declare <8 x i16> @llvm.vp.merge.v8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) -declare <8 x i32> @llvm.vp.merge.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) -declare <8 x float> @llvm.vp.merge.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) -declare <8 x double> @llvm.vp.merge.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) - ; Test binary operator with vp.merge and vp.smax. -declare <8 x i32> @llvm.vp.add.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) define <8 x i32> @vpmerge_vpadd(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpadd: ; CHECK: # %bb.0: @@ -20,7 +14,6 @@ define <8 x i32> @vpmerge_vpadd(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, } ; Test glued node of merge should not be deleted. -declare <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32>, <8 x i32>, metadata, <8 x i1>, i32) define <8 x i32> @vpmerge_vpadd2(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpadd2: ; CHECK: # %bb.0: @@ -48,7 +41,6 @@ define <8 x i32> @vpmerge_vpadd3(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y } ; Test float binary operator with vp.merge and vp.fadd. -declare <8 x float> @llvm.vp.fadd.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) define <8 x float> @vpmerge_vpfadd(<8 x float> %passthru, <8 x float> %x, <8 x float> %y, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfadd: ; CHECK: # %bb.0: @@ -61,7 +53,6 @@ define <8 x float> @vpmerge_vpfadd(<8 x float> %passthru, <8 x float> %x, <8 x f } ; Test conversion by fptosi. -declare <8 x i16> @llvm.vp.fptosi.v8i16.v8f32(<8 x float>, <8 x i1>, i32) define <8 x i16> @vpmerge_vpfptosi(<8 x i16> %passthru, <8 x float> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfptosi: ; CHECK: # %bb.0: @@ -74,7 +65,6 @@ define <8 x i16> @vpmerge_vpfptosi(<8 x i16> %passthru, <8 x float> %x, <8 x i1> } ; Test conversion by sitofp. -declare <8 x float> @llvm.vp.sitofp.v8f32.v8i64(<8 x i64>, <8 x i1>, i32) define <8 x float> @vpmerge_vpsitofp(<8 x float> %passthru, <8 x i64> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpsitofp: ; CHECK: # %bb.0: @@ -87,7 +77,6 @@ define <8 x float> @vpmerge_vpsitofp(<8 x float> %passthru, <8 x i64> %x, <8 x i } ; Test integer extension by vp.zext. -declare <8 x i32> @llvm.vp.zext.v8i32.v8i8(<8 x i8>, <8 x i1>, i32) define <8 x i32> @vpmerge_vpzext(<8 x i32> %passthru, <8 x i8> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpzext: ; CHECK: # %bb.0: @@ -100,7 +89,6 @@ define <8 x i32> @vpmerge_vpzext(<8 x i32> %passthru, <8 x i8> %x, <8 x i1> %m, } ; Test integer truncation by vp.trunc. -declare <8 x i32> @llvm.vp.trunc.v8i32.v8i64(<8 x i64>, <8 x i1>, i32) define <8 x i32> @vpmerge_vptrunc(<8 x i32> %passthru, <8 x i64> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vptrunc: ; CHECK: # %bb.0: @@ -113,7 +101,6 @@ define <8 x i32> @vpmerge_vptrunc(<8 x i32> %passthru, <8 x i64> %x, <8 x i1> %m } ; Test integer extension by vp.fpext. -declare <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float>, <8 x i1>, i32) define <8 x double> @vpmerge_vpfpext(<8 x double> %passthru, <8 x float> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfpext: ; CHECK: # %bb.0: @@ -126,7 +113,6 @@ define <8 x double> @vpmerge_vpfpext(<8 x double> %passthru, <8 x float> %x, <8 } ; Test integer truncation by vp.trunc. -declare <8 x float> @llvm.vp.fptrunc.v8f32.v8f64(<8 x double>, <8 x i1>, i32) define <8 x float> @vpmerge_vpfptrunc(<8 x float> %passthru, <8 x double> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfptrunc: ; CHECK: # %bb.0: @@ -139,7 +125,6 @@ define <8 x float> @vpmerge_vpfptrunc(<8 x float> %passthru, <8 x double> %x, <8 } ; Test load operation by vp.load. -declare <8 x i32> @llvm.vp.load.v8i32.p0(ptr, <8 x i1>, i32) define <8 x i32> @vpmerge_vpload(<8 x i32> %passthru, ptr %p, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpload: @@ -167,11 +152,6 @@ define <8 x i32> @vpmerge_vpload2(<8 x i32> %passthru, ptr %p, <8 x i32> %x, <8 ret <8 x i32> %b } -declare <8 x i16> @llvm.vp.select.v8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) -declare <8 x i32> @llvm.vp.select.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) -declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) -declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) - ; Test binary operator with vp.select and vp.add. define <8 x i32> @vpselect_vpadd(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vpadd: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll index 754941eb93e01..4da6e103603ce 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll @@ -324,7 +324,6 @@ define i32 @reduce_sum_16xi32_prefix13(ptr %p) { ret i32 %add11 } - define i32 @reduce_sum_16xi32_prefix14(ptr %p) { ; CHECK-LABEL: reduce_sum_16xi32_prefix14: ; CHECK: # %bb.0: @@ -586,11 +585,6 @@ define i32 @reduce_or_16xi32_prefix5(ptr %p) { ret i32 %or3 } -declare i32 @llvm.smax.i32(i32 %a, i32 %b) -declare i32 @llvm.smin.i32(i32 %a, i32 %b) -declare i32 @llvm.umax.i32(i32 %a, i32 %b) -declare i32 @llvm.umin.i32(i32 %a, i32 %b) - define i32 @reduce_smax_16xi32_prefix2(ptr %p) { ; CHECK-LABEL: reduce_smax_16xi32_prefix2: ; CHECK: # %bb.0: @@ -849,7 +843,6 @@ define float @reduce_fadd_2xf32_ninf_only(ptr %p) { ret float %fadd0 } - ; Negative test - last fadd is not associative define float @reduce_fadd_4xi32_non_associative(ptr %p) { ; CHECK-LABEL: reduce_fadd_4xi32_non_associative: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll index 0f5cccd8cf2e2..ca9b24e60e503 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare half @llvm.vp.reduce.fadd.v2f16(half, <2 x half>, <2 x i1>, i32) - define half @vpreduce_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v2f16: ; CHECK: # %bb.0: @@ -32,8 +30,6 @@ define half @vpreduce_ord_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 ze ret half %r } -declare half @llvm.vp.reduce.fadd.v4f16(half, <4 x half>, <4 x i1>, i32) - define half @vpreduce_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v4f16: ; CHECK: # %bb.0: @@ -60,8 +56,6 @@ define half @vpreduce_ord_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 ze ret half %r } -declare float @llvm.vp.reduce.fadd.v2f32(float, <2 x float>, <2 x i1>, i32) - define float @vpreduce_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v2f32: ; CHECK: # %bb.0: @@ -88,8 +82,6 @@ define float @vpreduce_ord_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 ret float %r } -declare float @llvm.vp.reduce.fadd.v4f32(float, <4 x float>, <4 x i1>, i32) - define float @vpreduce_fadd_v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v4f32: ; CHECK: # %bb.0: @@ -116,8 +108,6 @@ define float @vpreduce_ord_fadd_v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 ret float %r } -declare float @llvm.vp.reduce.fadd.v64f32(float, <64 x float>, <64 x i1>, i32) - define float @vpreduce_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v64f32: ; CHECK: # %bb.0: @@ -174,8 +164,6 @@ define float @vpreduce_ord_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, ret float %r } -declare double @llvm.vp.reduce.fadd.v2f64(double, <2 x double>, <2 x i1>, i32) - define double @vpreduce_fadd_v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v2f64: ; CHECK: # %bb.0: @@ -202,8 +190,6 @@ define double @vpreduce_ord_fadd_v2f64(double %s, <2 x double> %v, <2 x i1> %m, ret double %r } -declare double @llvm.vp.reduce.fadd.v3f64(double, <3 x double>, <3 x i1>, i32) - define double @vpreduce_fadd_v3f64(double %s, <3 x double> %v, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v3f64: ; CHECK: # %bb.0: @@ -230,8 +216,6 @@ define double @vpreduce_ord_fadd_v3f64(double %s, <3 x double> %v, <3 x i1> %m, ret double %r } -declare double @llvm.vp.reduce.fadd.v4f64(double, <4 x double>, <4 x i1>, i32) - define double @vpreduce_fadd_v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v4f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll index eec12212d0d37..ffbf1c7a548e1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s -declare half @llvm.vector.reduce.fadd.v1f16(half, <1 x half>) - define half @vreduce_fadd_v1f16(<1 x half> %v, half %s) { ; CHECK-LABEL: vreduce_fadd_v1f16: ; CHECK: # %bb.0: @@ -27,8 +25,6 @@ define half @vreduce_ord_fadd_v1f16(<1 x half> %v, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.v2f16(half, <2 x half>) - define half @vreduce_fadd_v2f16(ptr %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v2f16: ; CHECK: # %bb.0: @@ -57,8 +53,6 @@ define half @vreduce_ord_fadd_v2f16(ptr %x, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.v4f16(half, <4 x half>) - define half @vreduce_fadd_v4f16(ptr %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v4f16: ; CHECK: # %bb.0: @@ -87,8 +81,6 @@ define half @vreduce_ord_fadd_v4f16(ptr %x, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.v7f16(half, <7 x half>) - define half @vreduce_fadd_v7f16(ptr %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v7f16: ; CHECK: # %bb.0: @@ -103,8 +95,6 @@ define half @vreduce_fadd_v7f16(ptr %x, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.v8f16(half, <8 x half>) - define half @vreduce_fadd_v8f16(ptr %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v8f16: ; CHECK: # %bb.0: @@ -133,8 +123,6 @@ define half @vreduce_ord_fadd_v8f16(ptr %x, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.v16f16(half, <16 x half>) - define half @vreduce_fadd_v16f16(ptr %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v16f16: ; CHECK: # %bb.0: @@ -163,8 +151,6 @@ define half @vreduce_ord_fadd_v16f16(ptr %x, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.v32f16(half, <32 x half>) - define half @vreduce_fadd_v32f16(ptr %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v32f16: ; CHECK: # %bb.0: @@ -195,8 +181,6 @@ define half @vreduce_ord_fadd_v32f16(ptr %x, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.v64f16(half, <64 x half>) - define half @vreduce_fadd_v64f16(ptr %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v64f16: ; CHECK: # %bb.0: @@ -227,8 +211,6 @@ define half @vreduce_ord_fadd_v64f16(ptr %x, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.v128f16(half, <128 x half>) - define half @vreduce_fadd_v128f16(ptr %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v128f16: ; CHECK: # %bb.0: @@ -265,8 +247,6 @@ define half @vreduce_ord_fadd_v128f16(ptr %x, half %s) { ret half %red } -declare float @llvm.vector.reduce.fadd.v1f32(float, <1 x float>) - define float @vreduce_fadd_v1f32(<1 x float> %v, float %s) { ; CHECK-LABEL: vreduce_fadd_v1f32: ; CHECK: # %bb.0: @@ -319,8 +299,6 @@ define float @vreduce_ord_fwadd_v1f32(<1 x half> %v, float %s) { ret float %red } -declare float @llvm.vector.reduce.fadd.v2f32(float, <2 x float>) - define float @vreduce_fadd_v2f32(ptr %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v2f32: ; CHECK: # %bb.0: @@ -383,8 +361,6 @@ define float @vreduce_ord_fwadd_v2f32(ptr %x, float %s) { ret float %red } -declare float @llvm.vector.reduce.fadd.v4f32(float, <4 x float>) - define float @vreduce_fadd_v4f32(ptr %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v4f32: ; CHECK: # %bb.0: @@ -447,8 +423,6 @@ define float @vreduce_ord_fwadd_v4f32(ptr %x, float %s) { ret float %red } -declare float @llvm.vector.reduce.fadd.v7f32(float, <7 x float>) - define float @vreduce_fadd_v7f32(ptr %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v7f32: ; CHECK: # %bb.0: @@ -521,9 +495,6 @@ define float @vreduce_fadd_v7f32_neutralstart_fast(ptr %x) { ret float %red } - -declare float @llvm.vector.reduce.fadd.v8f32(float, <8 x float>) - define float @vreduce_fadd_v8f32(ptr %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v8f32: ; CHECK: # %bb.0: @@ -586,8 +557,6 @@ define float @vreduce_ord_fwadd_v8f32(ptr %x, float %s) { ret float %red } -declare float @llvm.vector.reduce.fadd.v16f32(float, <16 x float>) - define float @vreduce_fadd_v16f32(ptr %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v16f32: ; CHECK: # %bb.0: @@ -650,8 +619,6 @@ define float @vreduce_ord_fwadd_v16f32(ptr %x, float %s) { ret float %red } -declare float @llvm.vector.reduce.fadd.v32f32(float, <32 x float>) - define float @vreduce_fadd_v32f32(ptr %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v32f32: ; CHECK: # %bb.0: @@ -718,8 +685,6 @@ define float @vreduce_ord_fwadd_v32f32(ptr %x, float %s) { ret float %red } -declare float @llvm.vector.reduce.fadd.v64f32(float, <64 x float>) - define float @vreduce_fadd_v64f32(ptr %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v64f32: ; CHECK: # %bb.0: @@ -801,8 +766,6 @@ define float @vreduce_ord_fwadd_v64f32(ptr %x, float %s) { ret float %red } -declare double @llvm.vector.reduce.fadd.v1f64(double, <1 x double>) - define double @vreduce_fadd_v1f64(<1 x double> %v, double %s) { ; CHECK-LABEL: vreduce_fadd_v1f64: ; CHECK: # %bb.0: @@ -855,8 +818,6 @@ define double @vreduce_ord_fwadd_v1f64(<1 x float> %v, double %s) { ret double %red } -declare double @llvm.vector.reduce.fadd.v2f64(double, <2 x double>) - define double @vreduce_fadd_v2f64(ptr %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v2f64: ; CHECK: # %bb.0: @@ -919,8 +880,6 @@ define double @vreduce_ord_fwadd_v2f64(ptr %x, double %s) { ret double %red } -declare double @llvm.vector.reduce.fadd.v4f64(double, <4 x double>) - define double @vreduce_fadd_v4f64(ptr %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v4f64: ; CHECK: # %bb.0: @@ -983,8 +942,6 @@ define double @vreduce_ord_fwadd_v4f64(ptr %x, double %s) { ret double %red } -declare double @llvm.vector.reduce.fadd.v8f64(double, <8 x double>) - define double @vreduce_fadd_v8f64(ptr %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v8f64: ; CHECK: # %bb.0: @@ -1047,8 +1004,6 @@ define double @vreduce_ord_fwadd_v8f64(ptr %x, double %s) { ret double %red } -declare double @llvm.vector.reduce.fadd.v16f64(double, <16 x double>) - define double @vreduce_fadd_v16f64(ptr %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v16f64: ; CHECK: # %bb.0: @@ -1111,8 +1066,6 @@ define double @vreduce_ord_fwadd_v16f64(ptr %x, double %s) { ret double %red } -declare double @llvm.vector.reduce.fadd.v32f64(double, <32 x double>) - define double @vreduce_fadd_v32f64(ptr %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v32f64: ; CHECK: # %bb.0: @@ -1190,8 +1143,6 @@ define double @vreduce_ord_fwadd_v32f64(ptr %x, double %s) { ret double %red } -declare half @llvm.vector.reduce.fmin.v2f16(<2 x half>) - define half @vreduce_fmin_v2f16(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v2f16: ; CHECK: # %bb.0: @@ -1205,8 +1156,6 @@ define half @vreduce_fmin_v2f16(ptr %x) { ret half %red } -declare half @llvm.vector.reduce.fmin.v4f16(<4 x half>) - define half @vreduce_fmin_v4f16(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v4f16: ; CHECK: # %bb.0: @@ -1246,8 +1195,6 @@ define half @vreduce_fmin_v4f16_nonans_noinfs(ptr %x) { ret half %red } -declare half @llvm.vector.reduce.fmin.v128f16(<128 x half>) - define half @vreduce_fmin_v128f16(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v128f16: ; CHECK: # %bb.0: @@ -1265,8 +1212,6 @@ define half @vreduce_fmin_v128f16(ptr %x) { ret half %red } -declare float @llvm.vector.reduce.fmin.v2f32(<2 x float>) - define float @vreduce_fmin_v2f32(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v2f32: ; CHECK: # %bb.0: @@ -1280,8 +1225,6 @@ define float @vreduce_fmin_v2f32(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>) - define float @vreduce_fmin_v4f32(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v4f32: ; CHECK: # %bb.0: @@ -1321,8 +1264,6 @@ define float @vreduce_fmin_v4f32_nonans_noinfs(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmin.v7f32(<7 x float>) - define float @vreduce_fmin_v7f32(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v7f32: ; CHECK: # %bb.0: @@ -1338,8 +1279,6 @@ define float @vreduce_fmin_v7f32(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmin.v128f32(<128 x float>) - define float @vreduce_fmin_v128f32(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v128f32: ; CHECK: # %bb.0: @@ -1363,8 +1302,6 @@ define float @vreduce_fmin_v128f32(ptr %x) { ret float %red } -declare double @llvm.vector.reduce.fmin.v2f64(<2 x double>) - define double @vreduce_fmin_v2f64(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v2f64: ; CHECK: # %bb.0: @@ -1378,8 +1315,6 @@ define double @vreduce_fmin_v2f64(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmin.v4f64(<4 x double>) - define double @vreduce_fmin_v4f64(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v4f64: ; CHECK: # %bb.0: @@ -1419,8 +1354,6 @@ define double @vreduce_fmin_v4f64_nonans_noinfs(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmin.v32f64(<32 x double>) - define double @vreduce_fmin_v32f64(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v32f64: ; CHECK: # %bb.0: @@ -1437,8 +1370,6 @@ define double @vreduce_fmin_v32f64(ptr %x) { ret double %red } -declare half @llvm.vector.reduce.fmax.v2f16(<2 x half>) - define half @vreduce_fmax_v2f16(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v2f16: ; CHECK: # %bb.0: @@ -1452,8 +1383,6 @@ define half @vreduce_fmax_v2f16(ptr %x) { ret half %red } -declare half @llvm.vector.reduce.fmax.v4f16(<4 x half>) - define half @vreduce_fmax_v4f16(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v4f16: ; CHECK: # %bb.0: @@ -1493,8 +1422,6 @@ define half @vreduce_fmax_v4f16_nonans_noinfs(ptr %x) { ret half %red } -declare half @llvm.vector.reduce.fmax.v128f16(<128 x half>) - define half @vreduce_fmax_v128f16(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v128f16: ; CHECK: # %bb.0: @@ -1512,8 +1439,6 @@ define half @vreduce_fmax_v128f16(ptr %x) { ret half %red } -declare float @llvm.vector.reduce.fmax.v2f32(<2 x float>) - define float @vreduce_fmax_v2f32(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v2f32: ; CHECK: # %bb.0: @@ -1527,8 +1452,6 @@ define float @vreduce_fmax_v2f32(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>) - define float @vreduce_fmax_v4f32(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v4f32: ; CHECK: # %bb.0: @@ -1568,8 +1491,6 @@ define float @vreduce_fmax_v4f32_nonans_noinfs(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmax.v7f32(<7 x float>) - define float @vreduce_fmax_v7f32(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v7f32: ; CHECK: # %bb.0: @@ -1585,8 +1506,6 @@ define float @vreduce_fmax_v7f32(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmax.v128f32(<128 x float>) - define float @vreduce_fmax_v128f32(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v128f32: ; CHECK: # %bb.0: @@ -1610,8 +1529,6 @@ define float @vreduce_fmax_v128f32(ptr %x) { ret float %red } -declare double @llvm.vector.reduce.fmax.v2f64(<2 x double>) - define double @vreduce_fmax_v2f64(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v2f64: ; CHECK: # %bb.0: @@ -1625,8 +1542,6 @@ define double @vreduce_fmax_v2f64(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmax.v4f64(<4 x double>) - define double @vreduce_fmax_v4f64(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v4f64: ; CHECK: # %bb.0: @@ -1666,8 +1581,6 @@ define double @vreduce_fmax_v4f64_nonans_noinfs(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmax.v32f64(<32 x double>) - define double @vreduce_fmax_v32f64(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v32f64: ; CHECK: # %bb.0: @@ -1698,8 +1611,6 @@ define float @vreduce_nsz_fadd_v4f32(ptr %x, float %s) { ret float %red } -declare float @llvm.vector.reduce.fminimum.v2f32(<2 x float>) - define float @vreduce_fminimum_v2f32(ptr %x) { ; CHECK-LABEL: vreduce_fminimum_v2f32: ; CHECK: # %bb.0: @@ -1734,8 +1645,6 @@ define float @vreduce_fminimum_v2f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fminimum.v4f32(<4 x float>) - define float @vreduce_fminimum_v4f32(ptr %x) { ; CHECK-LABEL: vreduce_fminimum_v4f32: ; CHECK: # %bb.0: @@ -1770,8 +1679,6 @@ define float @vreduce_fminimum_v4f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fminimum.v7f32(<7 x float>) - define float @vreduce_fminimum_v7f32(ptr %x) { ; CHECK-LABEL: vreduce_fminimum_v7f32: ; CHECK: # %bb.0: @@ -1810,8 +1717,6 @@ define float @vreduce_fminimum_v7f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fminimum.v8f32(<8 x float>) - define float @vreduce_fminimum_v8f32(ptr %x) { ; CHECK-LABEL: vreduce_fminimum_v8f32: ; CHECK: # %bb.0: @@ -1846,8 +1751,6 @@ define float @vreduce_fminimum_v8f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fminimum.v16f32(<16 x float>) - define float @vreduce_fminimum_v16f32(ptr %x) { ; CHECK-LABEL: vreduce_fminimum_v16f32: ; CHECK: # %bb.0: @@ -1882,8 +1785,6 @@ define float @vreduce_fminimum_v16f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fminimum.v32f32(<32 x float>) - define float @vreduce_fminimum_v32f32(ptr %x) { ; CHECK-LABEL: vreduce_fminimum_v32f32: ; CHECK: # %bb.0: @@ -1920,8 +1821,6 @@ define float @vreduce_fminimum_v32f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fminimum.v64f32(<64 x float>) - define float @vreduce_fminimum_v64f32(ptr %x) { ; CHECK-LABEL: vreduce_fminimum_v64f32: ; CHECK: # %bb.0: @@ -1969,8 +1868,6 @@ define float @vreduce_fminimum_v64f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fminimum.v128f32(<128 x float>) - define float @vreduce_fminimum_v128f32(ptr %x) { ; CHECK-LABEL: vreduce_fminimum_v128f32: ; CHECK: # %bb.0: @@ -2080,8 +1977,6 @@ define float @vreduce_fminimum_v128f32_nonans(ptr %x) { ret float %red } -declare double @llvm.vector.reduce.fminimum.v2f64(<2 x double>) - define double @vreduce_fminimum_v2f64(ptr %x) { ; RV32-LABEL: vreduce_fminimum_v2f64: ; RV32: # %bb.0: @@ -2133,8 +2028,6 @@ define double @vreduce_fminimum_v2f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fminimum.v4f64(<4 x double>) - define double @vreduce_fminimum_v4f64(ptr %x) { ; RV32-LABEL: vreduce_fminimum_v4f64: ; RV32: # %bb.0: @@ -2186,8 +2079,6 @@ define double @vreduce_fminimum_v4f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fminimum.v8f64(<8 x double>) - define double @vreduce_fminimum_v8f64(ptr %x) { ; RV32-LABEL: vreduce_fminimum_v8f64: ; RV32: # %bb.0: @@ -2239,8 +2130,6 @@ define double @vreduce_fminimum_v8f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fminimum.v16f64(<16 x double>) - define double @vreduce_fminimum_v16f64(ptr %x) { ; RV32-LABEL: vreduce_fminimum_v16f64: ; RV32: # %bb.0: @@ -2292,8 +2181,6 @@ define double @vreduce_fminimum_v16f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fminimum.v32f64(<32 x double>) - define double @vreduce_fminimum_v32f64(ptr %x) { ; RV32-LABEL: vreduce_fminimum_v32f64: ; RV32: # %bb.0: @@ -2364,8 +2251,6 @@ define double @vreduce_fminimum_v32f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fminimum.v64f64(<64 x double>) - define double @vreduce_fminimum_v64f64(ptr %x) { ; RV32-LABEL: vreduce_fminimum_v64f64: ; RV32: # %bb.0: @@ -2554,8 +2439,6 @@ define double @vreduce_fminimum_v64f64_nonans(ptr %x) { ret double %red } -declare float @llvm.vector.reduce.fmaximum.v2f32(<2 x float>) - define float @vreduce_fmaximum_v2f32(ptr %x) { ; CHECK-LABEL: vreduce_fmaximum_v2f32: ; CHECK: # %bb.0: @@ -2590,8 +2473,6 @@ define float @vreduce_fmaximum_v2f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmaximum.v4f32(<4 x float>) - define float @vreduce_fmaximum_v4f32(ptr %x) { ; CHECK-LABEL: vreduce_fmaximum_v4f32: ; CHECK: # %bb.0: @@ -2626,8 +2507,6 @@ define float @vreduce_fmaximum_v4f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmaximum.v7f32(<7 x float>) - define float @vreduce_fmaximum_v7f32(ptr %x) { ; CHECK-LABEL: vreduce_fmaximum_v7f32: ; CHECK: # %bb.0: @@ -2666,8 +2545,6 @@ define float @vreduce_fmaximum_v7f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmaximum.v8f32(<8 x float>) - define float @vreduce_fmaximum_v8f32(ptr %x) { ; CHECK-LABEL: vreduce_fmaximum_v8f32: ; CHECK: # %bb.0: @@ -2702,8 +2579,6 @@ define float @vreduce_fmaximum_v8f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmaximum.v16f32(<16 x float>) - define float @vreduce_fmaximum_v16f32(ptr %x) { ; CHECK-LABEL: vreduce_fmaximum_v16f32: ; CHECK: # %bb.0: @@ -2738,8 +2613,6 @@ define float @vreduce_fmaximum_v16f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmaximum.v32f32(<32 x float>) - define float @vreduce_fmaximum_v32f32(ptr %x) { ; CHECK-LABEL: vreduce_fmaximum_v32f32: ; CHECK: # %bb.0: @@ -2776,8 +2649,6 @@ define float @vreduce_fmaximum_v32f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmaximum.v64f32(<64 x float>) - define float @vreduce_fmaximum_v64f32(ptr %x) { ; CHECK-LABEL: vreduce_fmaximum_v64f32: ; CHECK: # %bb.0: @@ -2825,8 +2696,6 @@ define float @vreduce_fmaximum_v64f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmaximum.v128f32(<128 x float>) - define float @vreduce_fmaximum_v128f32(ptr %x) { ; CHECK-LABEL: vreduce_fmaximum_v128f32: ; CHECK: # %bb.0: @@ -2936,8 +2805,6 @@ define float @vreduce_fmaximum_v128f32_nonans(ptr %x) { ret float %red } -declare double @llvm.vector.reduce.fmaximum.v2f64(<2 x double>) - define double @vreduce_fmaximum_v2f64(ptr %x) { ; RV32-LABEL: vreduce_fmaximum_v2f64: ; RV32: # %bb.0: @@ -2989,8 +2856,6 @@ define double @vreduce_fmaximum_v2f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmaximum.v4f64(<4 x double>) - define double @vreduce_fmaximum_v4f64(ptr %x) { ; RV32-LABEL: vreduce_fmaximum_v4f64: ; RV32: # %bb.0: @@ -3042,8 +2907,6 @@ define double @vreduce_fmaximum_v4f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmaximum.v8f64(<8 x double>) - define double @vreduce_fmaximum_v8f64(ptr %x) { ; RV32-LABEL: vreduce_fmaximum_v8f64: ; RV32: # %bb.0: @@ -3095,8 +2958,6 @@ define double @vreduce_fmaximum_v8f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmaximum.v16f64(<16 x double>) - define double @vreduce_fmaximum_v16f64(ptr %x) { ; RV32-LABEL: vreduce_fmaximum_v16f64: ; RV32: # %bb.0: @@ -3148,8 +3009,6 @@ define double @vreduce_fmaximum_v16f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmaximum.v32f64(<32 x double>) - define double @vreduce_fmaximum_v32f64(ptr %x) { ; RV32-LABEL: vreduce_fmaximum_v32f64: ; RV32: # %bb.0: @@ -3220,8 +3079,6 @@ define double @vreduce_fmaximum_v32f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmaximum.v64f64(<64 x double>) - define double @vreduce_fmaximum_v64f64(ptr %x) { ; RV32-LABEL: vreduce_fmaximum_v64f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll index dfe8f358b7782..3e77020ed0213 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare i8 @llvm.vp.reduce.add.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_add_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i8: ; CHECK: # %bb.0: @@ -19,8 +17,6 @@ define signext i8 @vpreduce_add_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3 ret i8 %r } -declare i8 @llvm.vp.reduce.umax.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_umax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v2i8: ; CHECK: # %bb.0: @@ -34,8 +30,6 @@ define signext i8 @vpreduce_umax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.smax.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_smax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i8: ; CHECK: # %bb.0: @@ -49,8 +43,6 @@ define signext i8 @vpreduce_smax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.umin.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_umin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v2i8: ; CHECK: # %bb.0: @@ -64,8 +56,6 @@ define signext i8 @vpreduce_umin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.smin.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_smin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i8: ; CHECK: # %bb.0: @@ -79,8 +69,6 @@ define signext i8 @vpreduce_smin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.and.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_and_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i8: ; CHECK: # %bb.0: @@ -94,8 +82,6 @@ define signext i8 @vpreduce_and_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3 ret i8 %r } -declare i8 @llvm.vp.reduce.or.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_or_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i8: ; CHECK: # %bb.0: @@ -109,8 +95,6 @@ define signext i8 @vpreduce_or_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 ret i8 %r } -declare i8 @llvm.vp.reduce.xor.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_xor_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i8: ; CHECK: # %bb.0: @@ -124,8 +108,6 @@ define signext i8 @vpreduce_xor_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3 ret i8 %r } -declare i8 @llvm.vp.reduce.umin.v3i8(i8, <3 x i8>, <3 x i1>, i32) - define signext i8 @vpreduce_umin_v3i8(i8 signext %s, <3 x i8> %v, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v3i8: ; CHECK: # %bb.0: @@ -139,8 +121,6 @@ define signext i8 @vpreduce_umin_v3i8(i8 signext %s, <3 x i8> %v, <3 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.add.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_add_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i8: ; CHECK: # %bb.0: @@ -154,8 +134,6 @@ define signext i8 @vpreduce_add_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3 ret i8 %r } -declare i8 @llvm.vp.reduce.umax.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_umax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v4i8: ; CHECK: # %bb.0: @@ -169,8 +147,6 @@ define signext i8 @vpreduce_umax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.smax.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_smax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i8: ; CHECK: # %bb.0: @@ -184,8 +160,6 @@ define signext i8 @vpreduce_smax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.umin.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_umin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v4i8: ; CHECK: # %bb.0: @@ -199,8 +173,6 @@ define signext i8 @vpreduce_umin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.smin.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_smin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i8: ; CHECK: # %bb.0: @@ -214,8 +186,6 @@ define signext i8 @vpreduce_smin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.and.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_and_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i8: ; CHECK: # %bb.0: @@ -229,8 +199,6 @@ define signext i8 @vpreduce_and_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3 ret i8 %r } -declare i8 @llvm.vp.reduce.or.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_or_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i8: ; CHECK: # %bb.0: @@ -244,8 +212,6 @@ define signext i8 @vpreduce_or_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 ret i8 %r } -declare i8 @llvm.vp.reduce.xor.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_xor_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i8: ; CHECK: # %bb.0: @@ -259,8 +225,6 @@ define signext i8 @vpreduce_xor_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3 ret i8 %r } -declare i16 @llvm.vp.reduce.add.v2i16(i16, <2 x i16>, <2 x i1>, i32) - define signext i16 @vpreduce_add_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i16: ; CHECK: # %bb.0: @@ -274,8 +238,6 @@ define signext i16 @vpreduce_add_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m ret i16 %r } -declare i16 @llvm.vp.reduce.umax.v2i16(i16, <2 x i16>, <2 x i1>, i32) - define signext i16 @vpreduce_umax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v2i16: ; CHECK: # %bb.0: @@ -289,8 +251,6 @@ define signext i16 @vpreduce_umax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> % ret i16 %r } -declare i16 @llvm.vp.reduce.smax.v2i16(i16, <2 x i16>, <2 x i1>, i32) - define signext i16 @vpreduce_smax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i16: ; CHECK: # %bb.0: @@ -304,8 +264,6 @@ define signext i16 @vpreduce_smax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> % ret i16 %r } -declare i16 @llvm.vp.reduce.umin.v2i16(i16, <2 x i16>, <2 x i1>, i32) - define signext i16 @vpreduce_umin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v2i16: ; CHECK: # %bb.0: @@ -319,8 +277,6 @@ define signext i16 @vpreduce_umin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> % ret i16 %r } -declare i16 @llvm.vp.reduce.smin.v2i16(i16, <2 x i16>, <2 x i1>, i32) - define signext i16 @vpreduce_smin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i16: ; CHECK: # %bb.0: @@ -334,8 +290,6 @@ define signext i16 @vpreduce_smin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> % ret i16 %r } -declare i16 @llvm.vp.reduce.and.v2i16(i16, <2 x i16>, <2 x i1>, i32) - define signext i16 @vpreduce_and_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i16: ; CHECK: # %bb.0: @@ -349,8 +303,6 @@ define signext i16 @vpreduce_and_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m ret i16 %r } -declare i16 @llvm.vp.reduce.or.v2i16(i16, <2 x i16>, <2 x i1>, i32) - define signext i16 @vpreduce_or_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i16: ; CHECK: # %bb.0: @@ -364,8 +316,6 @@ define signext i16 @vpreduce_or_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, ret i16 %r } -declare i16 @llvm.vp.reduce.xor.v2i16(i16, <2 x i16>, <2 x i1>, i32) - define signext i16 @vpreduce_xor_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i16: ; CHECK: # %bb.0: @@ -379,8 +329,6 @@ define signext i16 @vpreduce_xor_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m ret i16 %r } -declare i16 @llvm.vp.reduce.add.v4i16(i16, <4 x i16>, <4 x i1>, i32) - define signext i16 @vpreduce_add_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i16: ; CHECK: # %bb.0: @@ -394,8 +342,6 @@ define signext i16 @vpreduce_add_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m ret i16 %r } -declare i16 @llvm.vp.reduce.umax.v4i16(i16, <4 x i16>, <4 x i1>, i32) - define signext i16 @vpreduce_umax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v4i16: ; CHECK: # %bb.0: @@ -409,8 +355,6 @@ define signext i16 @vpreduce_umax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> % ret i16 %r } -declare i16 @llvm.vp.reduce.smax.v4i16(i16, <4 x i16>, <4 x i1>, i32) - define signext i16 @vpreduce_smax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i16: ; CHECK: # %bb.0: @@ -424,8 +368,6 @@ define signext i16 @vpreduce_smax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> % ret i16 %r } -declare i16 @llvm.vp.reduce.umin.v4i16(i16, <4 x i16>, <4 x i1>, i32) - define signext i16 @vpreduce_umin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v4i16: ; CHECK: # %bb.0: @@ -439,8 +381,6 @@ define signext i16 @vpreduce_umin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> % ret i16 %r } -declare i16 @llvm.vp.reduce.smin.v4i16(i16, <4 x i16>, <4 x i1>, i32) - define signext i16 @vpreduce_smin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i16: ; CHECK: # %bb.0: @@ -454,8 +394,6 @@ define signext i16 @vpreduce_smin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> % ret i16 %r } -declare i16 @llvm.vp.reduce.and.v4i16(i16, <4 x i16>, <4 x i1>, i32) - define signext i16 @vpreduce_and_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i16: ; CHECK: # %bb.0: @@ -469,8 +407,6 @@ define signext i16 @vpreduce_and_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m ret i16 %r } -declare i16 @llvm.vp.reduce.or.v4i16(i16, <4 x i16>, <4 x i1>, i32) - define signext i16 @vpreduce_or_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i16: ; CHECK: # %bb.0: @@ -484,8 +420,6 @@ define signext i16 @vpreduce_or_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, ret i16 %r } -declare i16 @llvm.vp.reduce.xor.v4i16(i16, <4 x i16>, <4 x i1>, i32) - define signext i16 @vpreduce_xor_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i16: ; CHECK: # %bb.0: @@ -499,8 +433,6 @@ define signext i16 @vpreduce_xor_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m ret i16 %r } -declare i32 @llvm.vp.reduce.add.v2i32(i32, <2 x i32>, <2 x i1>, i32) - define signext i32 @vpreduce_add_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i32: ; CHECK: # %bb.0: @@ -514,8 +446,6 @@ define signext i32 @vpreduce_add_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m ret i32 %r } -declare i32 @llvm.vp.reduce.umax.v2i32(i32, <2 x i32>, <2 x i1>, i32) - define signext i32 @vpreduce_umax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v2i32: ; CHECK: # %bb.0: @@ -529,8 +459,6 @@ define signext i32 @vpreduce_umax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> % ret i32 %r } -declare i32 @llvm.vp.reduce.smax.v2i32(i32, <2 x i32>, <2 x i1>, i32) - define signext i32 @vpreduce_smax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i32: ; CHECK: # %bb.0: @@ -544,8 +472,6 @@ define signext i32 @vpreduce_smax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> % ret i32 %r } -declare i32 @llvm.vp.reduce.umin.v2i32(i32, <2 x i32>, <2 x i1>, i32) - define signext i32 @vpreduce_umin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v2i32: ; CHECK: # %bb.0: @@ -559,8 +485,6 @@ define signext i32 @vpreduce_umin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> % ret i32 %r } -declare i32 @llvm.vp.reduce.smin.v2i32(i32, <2 x i32>, <2 x i1>, i32) - define signext i32 @vpreduce_smin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i32: ; CHECK: # %bb.0: @@ -574,8 +498,6 @@ define signext i32 @vpreduce_smin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> % ret i32 %r } -declare i32 @llvm.vp.reduce.and.v2i32(i32, <2 x i32>, <2 x i1>, i32) - define signext i32 @vpreduce_and_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i32: ; CHECK: # %bb.0: @@ -589,8 +511,6 @@ define signext i32 @vpreduce_and_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m ret i32 %r } -declare i32 @llvm.vp.reduce.or.v2i32(i32, <2 x i32>, <2 x i1>, i32) - define signext i32 @vpreduce_or_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i32: ; CHECK: # %bb.0: @@ -604,8 +524,6 @@ define signext i32 @vpreduce_or_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, ret i32 %r } -declare i32 @llvm.vp.reduce.xor.v2i32(i32, <2 x i32>, <2 x i1>, i32) - define signext i32 @vpreduce_xor_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i32: ; CHECK: # %bb.0: @@ -619,8 +537,6 @@ define signext i32 @vpreduce_xor_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m ret i32 %r } -declare i32 @llvm.vp.reduce.add.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define signext i32 @vpreduce_add_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i32: ; CHECK: # %bb.0: @@ -634,8 +550,6 @@ define signext i32 @vpreduce_add_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m ret i32 %r } -declare i32 @llvm.vp.reduce.umax.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define signext i32 @vpreduce_umax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v4i32: ; CHECK: # %bb.0: @@ -649,8 +563,6 @@ define signext i32 @vpreduce_umax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> % ret i32 %r } -declare i32 @llvm.vp.reduce.smax.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define signext i32 @vpreduce_smax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i32: ; CHECK: # %bb.0: @@ -664,8 +576,6 @@ define signext i32 @vpreduce_smax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> % ret i32 %r } -declare i32 @llvm.vp.reduce.umin.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define signext i32 @vpreduce_umin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v4i32: ; CHECK: # %bb.0: @@ -679,8 +589,6 @@ define signext i32 @vpreduce_umin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> % ret i32 %r } -declare i32 @llvm.vp.reduce.smin.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define signext i32 @vpreduce_smin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i32: ; CHECK: # %bb.0: @@ -694,8 +602,6 @@ define signext i32 @vpreduce_smin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> % ret i32 %r } -declare i32 @llvm.vp.reduce.and.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define signext i32 @vpreduce_and_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i32: ; CHECK: # %bb.0: @@ -709,8 +615,6 @@ define signext i32 @vpreduce_and_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m ret i32 %r } -declare i32 @llvm.vp.reduce.or.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define signext i32 @vpreduce_or_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i32: ; CHECK: # %bb.0: @@ -724,8 +628,6 @@ define signext i32 @vpreduce_or_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, ret i32 %r } -declare i32 @llvm.vp.reduce.xor.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define signext i32 @vpreduce_xor_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i32: ; CHECK: # %bb.0: @@ -739,8 +641,6 @@ define signext i32 @vpreduce_xor_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m ret i32 %r } -declare i32 @llvm.vp.reduce.xor.v64i32(i32, <64 x i32>, <64 x i1>, i32) - define signext i32 @vpreduce_xor_v64i32(i32 signext %s, <64 x i32> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v64i32: ; CHECK: # %bb.0: @@ -769,8 +669,6 @@ define signext i32 @vpreduce_xor_v64i32(i32 signext %s, <64 x i32> %v, <64 x i1> ret i32 %r } -declare i64 @llvm.vp.reduce.add.v2i64(i64, <2 x i64>, <2 x i1>, i32) - define signext i64 @vpreduce_add_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_add_v2i64: ; RV32: # %bb.0: @@ -804,8 +702,6 @@ define signext i64 @vpreduce_add_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m ret i64 %r } -declare i64 @llvm.vp.reduce.umax.v2i64(i64, <2 x i64>, <2 x i1>, i32) - define signext i64 @vpreduce_umax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_v2i64: ; RV32: # %bb.0: @@ -839,8 +735,6 @@ define signext i64 @vpreduce_umax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> % ret i64 %r } -declare i64 @llvm.vp.reduce.smax.v2i64(i64, <2 x i64>, <2 x i1>, i32) - define signext i64 @vpreduce_smax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smax_v2i64: ; RV32: # %bb.0: @@ -874,8 +768,6 @@ define signext i64 @vpreduce_smax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> % ret i64 %r } -declare i64 @llvm.vp.reduce.umin.v2i64(i64, <2 x i64>, <2 x i1>, i32) - define signext i64 @vpreduce_umin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_v2i64: ; RV32: # %bb.0: @@ -909,8 +801,6 @@ define signext i64 @vpreduce_umin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> % ret i64 %r } -declare i64 @llvm.vp.reduce.smin.v2i64(i64, <2 x i64>, <2 x i1>, i32) - define signext i64 @vpreduce_smin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smin_v2i64: ; RV32: # %bb.0: @@ -944,8 +834,6 @@ define signext i64 @vpreduce_smin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> % ret i64 %r } -declare i64 @llvm.vp.reduce.and.v2i64(i64, <2 x i64>, <2 x i1>, i32) - define signext i64 @vpreduce_and_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_and_v2i64: ; RV32: # %bb.0: @@ -979,8 +867,6 @@ define signext i64 @vpreduce_and_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m ret i64 %r } -declare i64 @llvm.vp.reduce.or.v2i64(i64, <2 x i64>, <2 x i1>, i32) - define signext i64 @vpreduce_or_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_or_v2i64: ; RV32: # %bb.0: @@ -1014,8 +900,6 @@ define signext i64 @vpreduce_or_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, ret i64 %r } -declare i64 @llvm.vp.reduce.xor.v2i64(i64, <2 x i64>, <2 x i1>, i32) - define signext i64 @vpreduce_xor_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_xor_v2i64: ; RV32: # %bb.0: @@ -1049,8 +933,6 @@ define signext i64 @vpreduce_xor_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m ret i64 %r } -declare i64 @llvm.vp.reduce.add.v4i64(i64, <4 x i64>, <4 x i1>, i32) - define signext i64 @vpreduce_add_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_add_v4i64: ; RV32: # %bb.0: @@ -1084,8 +966,6 @@ define signext i64 @vpreduce_add_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m ret i64 %r } -declare i64 @llvm.vp.reduce.umax.v4i64(i64, <4 x i64>, <4 x i1>, i32) - define signext i64 @vpreduce_umax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_v4i64: ; RV32: # %bb.0: @@ -1119,8 +999,6 @@ define signext i64 @vpreduce_umax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> % ret i64 %r } -declare i64 @llvm.vp.reduce.smax.v4i64(i64, <4 x i64>, <4 x i1>, i32) - define signext i64 @vpreduce_smax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smax_v4i64: ; RV32: # %bb.0: @@ -1154,8 +1032,6 @@ define signext i64 @vpreduce_smax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> % ret i64 %r } -declare i64 @llvm.vp.reduce.umin.v4i64(i64, <4 x i64>, <4 x i1>, i32) - define signext i64 @vpreduce_umin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_v4i64: ; RV32: # %bb.0: @@ -1189,8 +1065,6 @@ define signext i64 @vpreduce_umin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> % ret i64 %r } -declare i64 @llvm.vp.reduce.smin.v4i64(i64, <4 x i64>, <4 x i1>, i32) - define signext i64 @vpreduce_smin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smin_v4i64: ; RV32: # %bb.0: @@ -1224,8 +1098,6 @@ define signext i64 @vpreduce_smin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> % ret i64 %r } -declare i64 @llvm.vp.reduce.and.v4i64(i64, <4 x i64>, <4 x i1>, i32) - define signext i64 @vpreduce_and_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_and_v4i64: ; RV32: # %bb.0: @@ -1259,8 +1131,6 @@ define signext i64 @vpreduce_and_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m ret i64 %r } -declare i64 @llvm.vp.reduce.or.v4i64(i64, <4 x i64>, <4 x i1>, i32) - define signext i64 @vpreduce_or_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_or_v4i64: ; RV32: # %bb.0: @@ -1294,8 +1164,6 @@ define signext i64 @vpreduce_or_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, ret i64 %r } -declare i64 @llvm.vp.reduce.xor.v4i64(i64, <4 x i64>, <4 x i1>, i32) - define signext i64 @vpreduce_xor_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_xor_v4i64: ; RV32: # %bb.0: @@ -1329,8 +1197,6 @@ define signext i64 @vpreduce_xor_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m ret i64 %r } -declare i8 @llvm.vp.reduce.mul.v1i8(i8, <1 x i8>, <1 x i1>, i32) - define i8 @vpreduce_mul_v1i8(i8 %s, <1 x i8> %v, <1 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_mul_v1i8: ; RV32: # %bb.0: @@ -1381,8 +1247,6 @@ define i8 @vpreduce_mul_v1i8(i8 %s, <1 x i8> %v, <1 x i1> %m, i32 zeroext %evl) ret i8 %r } -declare i8 @llvm.vp.reduce.mul.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_mul_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_mul_v2i8: ; RV32: # %bb.0: @@ -1441,8 +1305,6 @@ define signext i8 @vpreduce_mul_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3 ret i8 %r } -declare i8 @llvm.vp.reduce.mul.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_mul_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_mul_v4i8: ; RV32: # %bb.0: @@ -1505,8 +1367,6 @@ define signext i8 @vpreduce_mul_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3 ret i8 %r } -declare i8 @llvm.vp.reduce.mul.v8i8(i8, <8 x i8>, <8 x i1>, i32) - define signext i8 @vpreduce_mul_v8i8(i8 signext %s, <8 x i8> %v, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_mul_v8i8: ; RV32: # %bb.0: @@ -1573,8 +1433,6 @@ define signext i8 @vpreduce_mul_v8i8(i8 signext %s, <8 x i8> %v, <8 x i1> %m, i3 ret i8 %r } -declare i8 @llvm.vp.reduce.mul.v16i8(i8, <16 x i8>, <16 x i1>, i32) - define signext i8 @vpreduce_mul_v16i8(i8 signext %s, <16 x i8> %v, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_mul_v16i8: ; RV32: # %bb.0: @@ -1645,8 +1503,6 @@ define signext i8 @vpreduce_mul_v16i8(i8 signext %s, <16 x i8> %v, <16 x i1> %m, ret i8 %r } -declare i8 @llvm.vp.reduce.mul.v32i8(i8, <32 x i8>, <32 x i1>, i32) - define signext i8 @vpreduce_mul_v32i8(i8 signext %s, <32 x i8> %v, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_mul_v32i8: ; RV32: # %bb.0: @@ -1723,8 +1579,6 @@ define signext i8 @vpreduce_mul_v32i8(i8 signext %s, <32 x i8> %v, <32 x i1> %m, ret i8 %r } -declare i8 @llvm.vp.reduce.mul.v64i8(i8, <64 x i8>, <64 x i1>, i32) - define signext i8 @vpreduce_mul_v64i8(i8 signext %s, <64 x i8> %v, <64 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_mul_v64i8: ; RV32: # %bb.0: @@ -1830,7 +1684,6 @@ define zeroext i8 @front_ele_v4i8(<4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { } ; Test start value is the first element of a vector which longer than M1. -declare i8 @llvm.vp.reduce.and.v32i8(i8, <32 x i8>, <32 x i1>, i32) define zeroext i8 @front_ele_v32i8(<32 x i8> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: front_ele_v32i8: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll index d3a36525115c8..9725bb37c679b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare i8 @llvm.vector.reduce.add.v1i8(<1 x i8>) - define i8 @vreduce_add_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_add_v1i8: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define i8 @vreduce_add_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v2i8(<2 x i8>) - define i8 @vreduce_add_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v2i8: ; CHECK: # %bb.0: @@ -30,8 +26,6 @@ define i8 @vreduce_add_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v3i8(<3 x i8>) - define i8 @vreduce_add_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v3i8: ; CHECK: # %bb.0: @@ -46,8 +40,6 @@ define i8 @vreduce_add_v3i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v4i8(<4 x i8>) - define i8 @vreduce_add_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v4i8: ; CHECK: # %bb.0: @@ -62,8 +54,6 @@ define i8 @vreduce_add_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>) - define i8 @vreduce_add_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v8i8: ; CHECK: # %bb.0: @@ -78,8 +68,6 @@ define i8 @vreduce_add_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>) - define i8 @vreduce_add_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v16i8: ; CHECK: # %bb.0: @@ -94,8 +82,6 @@ define i8 @vreduce_add_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v32i8(<32 x i8>) - define i8 @vreduce_add_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v32i8: ; CHECK: # %bb.0: @@ -111,8 +97,6 @@ define i8 @vreduce_add_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v64i8(<64 x i8>) - define i8 @vreduce_add_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v64i8: ; CHECK: # %bb.0: @@ -128,8 +112,6 @@ define i8 @vreduce_add_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v128i8(<128 x i8>) - define i8 @vreduce_add_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v128i8: ; CHECK: # %bb.0: @@ -145,8 +127,6 @@ define i8 @vreduce_add_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v256i8(<256 x i8>) - define i8 @vreduce_add_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v256i8: ; CHECK: # %bb.0: @@ -165,8 +145,6 @@ define i8 @vreduce_add_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.add.v1i16(<1 x i16>) - define i16 @vreduce_add_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_add_v1i16: ; CHECK: # %bb.0: @@ -201,8 +179,6 @@ define i16 @vwreduce_uadd_v1i16(<1 x i8> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.v2i16(<2 x i16>) - define i16 @vreduce_add_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_add_v2i16: ; CHECK: # %bb.0: @@ -251,8 +227,6 @@ define i16 @vwreduce_uadd_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) - define i16 @vreduce_add_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_add_v4i16: ; CHECK: # %bb.0: @@ -301,8 +275,6 @@ define i16 @vwreduce_uadd_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>) - define i16 @vreduce_add_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_add_v8i16: ; CHECK: # %bb.0: @@ -351,8 +323,6 @@ define i16 @vwreduce_uadd_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>) - define i16 @vreduce_add_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_add_v16i16: ; CHECK: # %bb.0: @@ -401,8 +371,6 @@ define i16 @vwreduce_uadd_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.v32i16(<32 x i16>) - define i16 @vreduce_add_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_add_v32i16: ; CHECK: # %bb.0: @@ -454,8 +422,6 @@ define i16 @vwreduce_uadd_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.v64i16(<64 x i16>) - define i16 @vreduce_add_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_add_v64i16: ; CHECK: # %bb.0: @@ -507,8 +473,6 @@ define i16 @vwreduce_uadd_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.v128i16(<128 x i16>) - define i16 @vreduce_add_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_add_v128i16: ; CHECK: # %bb.0: @@ -571,8 +535,6 @@ define i16 @vwreduce_uadd_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.add.v1i32(<1 x i32>) - define i32 @vreduce_add_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_add_v1i32: ; CHECK: # %bb.0: @@ -607,8 +569,6 @@ define i32 @vwreduce_uadd_v1i32(<1 x i16> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>) - define i32 @vreduce_add_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_add_v2i32: ; CHECK: # %bb.0: @@ -657,8 +617,6 @@ define i32 @vwreduce_uadd_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) - define i32 @vreduce_add_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_add_v4i32: ; CHECK: # %bb.0: @@ -707,8 +665,6 @@ define i32 @vwreduce_uadd_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>) - define i32 @vreduce_add_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_add_v8i32: ; CHECK: # %bb.0: @@ -757,8 +713,6 @@ define i32 @vwreduce_uadd_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>) - define i32 @vreduce_add_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_add_v16i32: ; CHECK: # %bb.0: @@ -807,8 +761,6 @@ define i32 @vwreduce_uadd_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.add.v32i32(<32 x i32>) - define i32 @vreduce_add_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_add_v32i32: ; CHECK: # %bb.0: @@ -860,8 +812,6 @@ define i32 @vwreduce_uadd_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.add.v64i32(<64 x i32>) - define i32 @vreduce_add_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_add_v64i32: ; CHECK: # %bb.0: @@ -924,8 +874,6 @@ define i32 @vwreduce_uadd_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.add.v1i64(<1 x i64>) - define i64 @vreduce_add_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_add_v1i64: ; RV32: # %bb.0: @@ -989,8 +937,6 @@ define i64 @vwreduce_uadd_v1i64(<1 x i32> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) - define i64 @vreduce_add_v2i64(ptr %x) { ; RV32-LABEL: vreduce_add_v2i64: ; RV32: # %bb.0: @@ -1080,8 +1026,6 @@ define i64 @vwreduce_uadd_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>) - define i64 @vreduce_add_v4i64(ptr %x) { ; RV32-LABEL: vreduce_add_v4i64: ; RV32: # %bb.0: @@ -1171,8 +1115,6 @@ define i64 @vwreduce_uadd_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>) - define i64 @vreduce_add_v8i64(ptr %x) { ; RV32-LABEL: vreduce_add_v8i64: ; RV32: # %bb.0: @@ -1262,8 +1204,6 @@ define i64 @vwreduce_uadd_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>) - define i64 @vreduce_add_v16i64(ptr %x) { ; RV32-LABEL: vreduce_add_v16i64: ; RV32: # %bb.0: @@ -1353,8 +1293,6 @@ define i64 @vwreduce_uadd_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.add.v32i64(<32 x i64>) - define i64 @vreduce_add_v32i64(ptr %x) { ; RV32-LABEL: vreduce_add_v32i64: ; RV32: # %bb.0: @@ -1466,8 +1404,6 @@ define i64 @vwreduce_uadd_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.add.v64i64(<64 x i64>) - define i64 @vreduce_add_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_add_v64i64: ; RV32: # %bb.0: @@ -1645,8 +1581,6 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) { ret i64 %red } -declare i8 @llvm.vector.reduce.and.v1i8(<1 x i8>) - define i8 @vreduce_and_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_and_v1i8: ; CHECK: # %bb.0: @@ -1657,8 +1591,6 @@ define i8 @vreduce_and_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.v2i8(<2 x i8>) - define i8 @vreduce_and_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v2i8: ; CHECK: # %bb.0: @@ -1672,8 +1604,6 @@ define i8 @vreduce_and_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.v3i8(<3 x i8>) - define i8 @vreduce_and_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v3i8: ; CHECK: # %bb.0: @@ -1689,9 +1619,6 @@ define i8 @vreduce_and_v3i8(ptr %x) { ret i8 %red } - -declare i8 @llvm.vector.reduce.and.v4i8(<4 x i8>) - define i8 @vreduce_and_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v4i8: ; CHECK: # %bb.0: @@ -1705,8 +1632,6 @@ define i8 @vreduce_and_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.v8i8(<8 x i8>) - define i8 @vreduce_and_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v8i8: ; CHECK: # %bb.0: @@ -1720,8 +1645,6 @@ define i8 @vreduce_and_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.v16i8(<16 x i8>) - define i8 @vreduce_and_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v16i8: ; CHECK: # %bb.0: @@ -1735,8 +1658,6 @@ define i8 @vreduce_and_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.v32i8(<32 x i8>) - define i8 @vreduce_and_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v32i8: ; CHECK: # %bb.0: @@ -1751,8 +1672,6 @@ define i8 @vreduce_and_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.v64i8(<64 x i8>) - define i8 @vreduce_and_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v64i8: ; CHECK: # %bb.0: @@ -1767,8 +1686,6 @@ define i8 @vreduce_and_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.v128i8(<128 x i8>) - define i8 @vreduce_and_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v128i8: ; CHECK: # %bb.0: @@ -1783,8 +1700,6 @@ define i8 @vreduce_and_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.v256i8(<256 x i8>) - define i8 @vreduce_and_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v256i8: ; CHECK: # %bb.0: @@ -1802,8 +1717,6 @@ define i8 @vreduce_and_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.and.v1i16(<1 x i16>) - define i16 @vreduce_and_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_and_v1i16: ; CHECK: # %bb.0: @@ -1814,8 +1727,6 @@ define i16 @vreduce_and_v1i16(<1 x i16> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.v2i16(<2 x i16>) - define i16 @vreduce_and_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_and_v2i16: ; CHECK: # %bb.0: @@ -1829,8 +1740,6 @@ define i16 @vreduce_and_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.v4i16(<4 x i16>) - define i16 @vreduce_and_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_and_v4i16: ; CHECK: # %bb.0: @@ -1844,8 +1753,6 @@ define i16 @vreduce_and_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.v8i16(<8 x i16>) - define i16 @vreduce_and_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_and_v8i16: ; CHECK: # %bb.0: @@ -1859,8 +1766,6 @@ define i16 @vreduce_and_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.v16i16(<16 x i16>) - define i16 @vreduce_and_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_and_v16i16: ; CHECK: # %bb.0: @@ -1874,8 +1779,6 @@ define i16 @vreduce_and_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.v32i16(<32 x i16>) - define i16 @vreduce_and_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_and_v32i16: ; CHECK: # %bb.0: @@ -1890,8 +1793,6 @@ define i16 @vreduce_and_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.v64i16(<64 x i16>) - define i16 @vreduce_and_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_and_v64i16: ; CHECK: # %bb.0: @@ -1906,8 +1807,6 @@ define i16 @vreduce_and_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.v128i16(<128 x i16>) - define i16 @vreduce_and_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_and_v128i16: ; CHECK: # %bb.0: @@ -1925,8 +1824,6 @@ define i16 @vreduce_and_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.and.v1i32(<1 x i32>) - define i32 @vreduce_and_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_and_v1i32: ; CHECK: # %bb.0: @@ -1937,8 +1834,6 @@ define i32 @vreduce_and_v1i32(<1 x i32> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.v2i32(<2 x i32>) - define i32 @vreduce_and_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_and_v2i32: ; CHECK: # %bb.0: @@ -1952,8 +1847,6 @@ define i32 @vreduce_and_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.v4i32(<4 x i32>) - define i32 @vreduce_and_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_and_v4i32: ; CHECK: # %bb.0: @@ -1967,8 +1860,6 @@ define i32 @vreduce_and_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.v8i32(<8 x i32>) - define i32 @vreduce_and_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_and_v8i32: ; CHECK: # %bb.0: @@ -1982,8 +1873,6 @@ define i32 @vreduce_and_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.v16i32(<16 x i32>) - define i32 @vreduce_and_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_and_v16i32: ; CHECK: # %bb.0: @@ -1997,8 +1886,6 @@ define i32 @vreduce_and_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.v32i32(<32 x i32>) - define i32 @vreduce_and_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_and_v32i32: ; CHECK: # %bb.0: @@ -2013,8 +1900,6 @@ define i32 @vreduce_and_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.v64i32(<64 x i32>) - define i32 @vreduce_and_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_and_v64i32: ; CHECK: # %bb.0: @@ -2032,8 +1917,6 @@ define i32 @vreduce_and_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.and.v1i64(<1 x i64>) - define i64 @vreduce_and_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_and_v1i64: ; RV32: # %bb.0: @@ -2053,8 +1936,6 @@ define i64 @vreduce_and_v1i64(<1 x i64> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.v2i64(<2 x i64>) - define i64 @vreduce_and_v2i64(ptr %x) { ; RV32-LABEL: vreduce_and_v2i64: ; RV32: # %bb.0: @@ -2080,8 +1961,6 @@ define i64 @vreduce_and_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.v4i64(<4 x i64>) - define i64 @vreduce_and_v4i64(ptr %x) { ; RV32-LABEL: vreduce_and_v4i64: ; RV32: # %bb.0: @@ -2107,8 +1986,6 @@ define i64 @vreduce_and_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.v8i64(<8 x i64>) - define i64 @vreduce_and_v8i64(ptr %x) { ; RV32-LABEL: vreduce_and_v8i64: ; RV32: # %bb.0: @@ -2134,8 +2011,6 @@ define i64 @vreduce_and_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.v16i64(<16 x i64>) - define i64 @vreduce_and_v16i64(ptr %x) { ; RV32-LABEL: vreduce_and_v16i64: ; RV32: # %bb.0: @@ -2161,8 +2036,6 @@ define i64 @vreduce_and_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.v32i64(<32 x i64>) - define i64 @vreduce_and_v32i64(ptr %x) { ; RV32-LABEL: vreduce_and_v32i64: ; RV32: # %bb.0: @@ -2194,8 +2067,6 @@ define i64 @vreduce_and_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.v64i64(<64 x i64>) - define i64 @vreduce_and_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_and_v64i64: ; RV32: # %bb.0: @@ -2239,8 +2110,6 @@ define i64 @vreduce_and_v64i64(ptr %x) nounwind { ret i64 %red } -declare i8 @llvm.vector.reduce.or.v1i8(<1 x i8>) - define i8 @vreduce_or_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_or_v1i8: ; CHECK: # %bb.0: @@ -2251,8 +2120,6 @@ define i8 @vreduce_or_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v2i8(<2 x i8>) - define i8 @vreduce_or_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v2i8: ; CHECK: # %bb.0: @@ -2266,8 +2133,6 @@ define i8 @vreduce_or_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v3i8(<3 x i8>) - define i8 @vreduce_or_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v3i8: ; CHECK: # %bb.0: @@ -2282,8 +2147,6 @@ define i8 @vreduce_or_v3i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v4i8(<4 x i8>) - define i8 @vreduce_or_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v4i8: ; CHECK: # %bb.0: @@ -2297,8 +2160,6 @@ define i8 @vreduce_or_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v8i8(<8 x i8>) - define i8 @vreduce_or_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v8i8: ; CHECK: # %bb.0: @@ -2312,8 +2173,6 @@ define i8 @vreduce_or_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v16i8(<16 x i8>) - define i8 @vreduce_or_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v16i8: ; CHECK: # %bb.0: @@ -2327,8 +2186,6 @@ define i8 @vreduce_or_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v32i8(<32 x i8>) - define i8 @vreduce_or_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v32i8: ; CHECK: # %bb.0: @@ -2343,8 +2200,6 @@ define i8 @vreduce_or_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v64i8(<64 x i8>) - define i8 @vreduce_or_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v64i8: ; CHECK: # %bb.0: @@ -2359,8 +2214,6 @@ define i8 @vreduce_or_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v128i8(<128 x i8>) - define i8 @vreduce_or_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v128i8: ; CHECK: # %bb.0: @@ -2375,8 +2228,6 @@ define i8 @vreduce_or_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v256i8(<256 x i8>) - define i8 @vreduce_or_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v256i8: ; CHECK: # %bb.0: @@ -2394,8 +2245,6 @@ define i8 @vreduce_or_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.or.v1i16(<1 x i16>) - define i16 @vreduce_or_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_or_v1i16: ; CHECK: # %bb.0: @@ -2406,8 +2255,6 @@ define i16 @vreduce_or_v1i16(<1 x i16> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.v2i16(<2 x i16>) - define i16 @vreduce_or_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_or_v2i16: ; CHECK: # %bb.0: @@ -2421,8 +2268,6 @@ define i16 @vreduce_or_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.v4i16(<4 x i16>) - define i16 @vreduce_or_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_or_v4i16: ; CHECK: # %bb.0: @@ -2436,8 +2281,6 @@ define i16 @vreduce_or_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.v8i16(<8 x i16>) - define i16 @vreduce_or_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_or_v8i16: ; CHECK: # %bb.0: @@ -2451,8 +2294,6 @@ define i16 @vreduce_or_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.v16i16(<16 x i16>) - define i16 @vreduce_or_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_or_v16i16: ; CHECK: # %bb.0: @@ -2466,8 +2307,6 @@ define i16 @vreduce_or_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.v32i16(<32 x i16>) - define i16 @vreduce_or_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_or_v32i16: ; CHECK: # %bb.0: @@ -2482,8 +2321,6 @@ define i16 @vreduce_or_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.v64i16(<64 x i16>) - define i16 @vreduce_or_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_or_v64i16: ; CHECK: # %bb.0: @@ -2498,8 +2335,6 @@ define i16 @vreduce_or_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.v128i16(<128 x i16>) - define i16 @vreduce_or_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_or_v128i16: ; CHECK: # %bb.0: @@ -2517,8 +2352,6 @@ define i16 @vreduce_or_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.or.v1i32(<1 x i32>) - define i32 @vreduce_or_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_or_v1i32: ; CHECK: # %bb.0: @@ -2529,8 +2362,6 @@ define i32 @vreduce_or_v1i32(<1 x i32> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.v2i32(<2 x i32>) - define i32 @vreduce_or_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_or_v2i32: ; CHECK: # %bb.0: @@ -2544,8 +2375,6 @@ define i32 @vreduce_or_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.v4i32(<4 x i32>) - define i32 @vreduce_or_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_or_v4i32: ; CHECK: # %bb.0: @@ -2559,8 +2388,6 @@ define i32 @vreduce_or_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.v8i32(<8 x i32>) - define i32 @vreduce_or_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_or_v8i32: ; CHECK: # %bb.0: @@ -2574,8 +2401,6 @@ define i32 @vreduce_or_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.v16i32(<16 x i32>) - define i32 @vreduce_or_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_or_v16i32: ; CHECK: # %bb.0: @@ -2589,8 +2414,6 @@ define i32 @vreduce_or_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.v32i32(<32 x i32>) - define i32 @vreduce_or_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_or_v32i32: ; CHECK: # %bb.0: @@ -2605,8 +2428,6 @@ define i32 @vreduce_or_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.v64i32(<64 x i32>) - define i32 @vreduce_or_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_or_v64i32: ; CHECK: # %bb.0: @@ -2624,8 +2445,6 @@ define i32 @vreduce_or_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.or.v1i64(<1 x i64>) - define i64 @vreduce_or_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_or_v1i64: ; RV32: # %bb.0: @@ -2645,8 +2464,6 @@ define i64 @vreduce_or_v1i64(<1 x i64> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.v2i64(<2 x i64>) - define i64 @vreduce_or_v2i64(ptr %x) { ; RV32-LABEL: vreduce_or_v2i64: ; RV32: # %bb.0: @@ -2672,8 +2489,6 @@ define i64 @vreduce_or_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.v4i64(<4 x i64>) - define i64 @vreduce_or_v4i64(ptr %x) { ; RV32-LABEL: vreduce_or_v4i64: ; RV32: # %bb.0: @@ -2699,8 +2514,6 @@ define i64 @vreduce_or_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.v8i64(<8 x i64>) - define i64 @vreduce_or_v8i64(ptr %x) { ; RV32-LABEL: vreduce_or_v8i64: ; RV32: # %bb.0: @@ -2726,8 +2539,6 @@ define i64 @vreduce_or_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.v16i64(<16 x i64>) - define i64 @vreduce_or_v16i64(ptr %x) { ; RV32-LABEL: vreduce_or_v16i64: ; RV32: # %bb.0: @@ -2753,8 +2564,6 @@ define i64 @vreduce_or_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.v32i64(<32 x i64>) - define i64 @vreduce_or_v32i64(ptr %x) { ; RV32-LABEL: vreduce_or_v32i64: ; RV32: # %bb.0: @@ -2786,8 +2595,6 @@ define i64 @vreduce_or_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.v64i64(<64 x i64>) - define i64 @vreduce_or_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_or_v64i64: ; RV32: # %bb.0: @@ -2831,8 +2638,6 @@ define i64 @vreduce_or_v64i64(ptr %x) nounwind { ret i64 %red } -declare i8 @llvm.vector.reduce.xor.v1i8(<1 x i8>) - define i8 @vreduce_xor_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_xor_v1i8: ; CHECK: # %bb.0: @@ -2843,8 +2648,6 @@ define i8 @vreduce_xor_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v2i8(<2 x i8>) - define i8 @vreduce_xor_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v2i8: ; CHECK: # %bb.0: @@ -2859,8 +2662,6 @@ define i8 @vreduce_xor_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v3i8(<3 x i8>) - define i8 @vreduce_xor_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v3i8: ; CHECK: # %bb.0: @@ -2875,8 +2676,6 @@ define i8 @vreduce_xor_v3i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v4i8(<4 x i8>) - define i8 @vreduce_xor_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v4i8: ; CHECK: # %bb.0: @@ -2891,8 +2690,6 @@ define i8 @vreduce_xor_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v8i8(<8 x i8>) - define i8 @vreduce_xor_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v8i8: ; CHECK: # %bb.0: @@ -2907,8 +2704,6 @@ define i8 @vreduce_xor_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v16i8(<16 x i8>) - define i8 @vreduce_xor_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v16i8: ; CHECK: # %bb.0: @@ -2923,8 +2718,6 @@ define i8 @vreduce_xor_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v32i8(<32 x i8>) - define i8 @vreduce_xor_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v32i8: ; CHECK: # %bb.0: @@ -2940,8 +2733,6 @@ define i8 @vreduce_xor_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v64i8(<64 x i8>) - define i8 @vreduce_xor_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v64i8: ; CHECK: # %bb.0: @@ -2957,8 +2748,6 @@ define i8 @vreduce_xor_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v128i8(<128 x i8>) - define i8 @vreduce_xor_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v128i8: ; CHECK: # %bb.0: @@ -2974,8 +2763,6 @@ define i8 @vreduce_xor_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v256i8(<256 x i8>) - define i8 @vreduce_xor_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v256i8: ; CHECK: # %bb.0: @@ -2994,8 +2781,6 @@ define i8 @vreduce_xor_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.xor.v1i16(<1 x i16>) - define i16 @vreduce_xor_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_xor_v1i16: ; CHECK: # %bb.0: @@ -3006,8 +2791,6 @@ define i16 @vreduce_xor_v1i16(<1 x i16> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.v2i16(<2 x i16>) - define i16 @vreduce_xor_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_xor_v2i16: ; CHECK: # %bb.0: @@ -3022,8 +2805,6 @@ define i16 @vreduce_xor_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.v4i16(<4 x i16>) - define i16 @vreduce_xor_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_xor_v4i16: ; CHECK: # %bb.0: @@ -3038,8 +2819,6 @@ define i16 @vreduce_xor_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.v8i16(<8 x i16>) - define i16 @vreduce_xor_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_xor_v8i16: ; CHECK: # %bb.0: @@ -3054,8 +2833,6 @@ define i16 @vreduce_xor_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.v16i16(<16 x i16>) - define i16 @vreduce_xor_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_xor_v16i16: ; CHECK: # %bb.0: @@ -3070,8 +2847,6 @@ define i16 @vreduce_xor_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.v32i16(<32 x i16>) - define i16 @vreduce_xor_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_xor_v32i16: ; CHECK: # %bb.0: @@ -3087,8 +2862,6 @@ define i16 @vreduce_xor_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.v64i16(<64 x i16>) - define i16 @vreduce_xor_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_xor_v64i16: ; CHECK: # %bb.0: @@ -3104,8 +2877,6 @@ define i16 @vreduce_xor_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.v128i16(<128 x i16>) - define i16 @vreduce_xor_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_xor_v128i16: ; CHECK: # %bb.0: @@ -3124,8 +2895,6 @@ define i16 @vreduce_xor_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.xor.v1i32(<1 x i32>) - define i32 @vreduce_xor_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_xor_v1i32: ; CHECK: # %bb.0: @@ -3136,8 +2905,6 @@ define i32 @vreduce_xor_v1i32(<1 x i32> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.v2i32(<2 x i32>) - define i32 @vreduce_xor_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_xor_v2i32: ; CHECK: # %bb.0: @@ -3152,8 +2919,6 @@ define i32 @vreduce_xor_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.v4i32(<4 x i32>) - define i32 @vreduce_xor_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_xor_v4i32: ; CHECK: # %bb.0: @@ -3168,8 +2933,6 @@ define i32 @vreduce_xor_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.v8i32(<8 x i32>) - define i32 @vreduce_xor_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_xor_v8i32: ; CHECK: # %bb.0: @@ -3184,8 +2947,6 @@ define i32 @vreduce_xor_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.v16i32(<16 x i32>) - define i32 @vreduce_xor_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_xor_v16i32: ; CHECK: # %bb.0: @@ -3200,8 +2961,6 @@ define i32 @vreduce_xor_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.v32i32(<32 x i32>) - define i32 @vreduce_xor_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_xor_v32i32: ; CHECK: # %bb.0: @@ -3217,8 +2976,6 @@ define i32 @vreduce_xor_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.v64i32(<64 x i32>) - define i32 @vreduce_xor_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_xor_v64i32: ; CHECK: # %bb.0: @@ -3237,8 +2994,6 @@ define i32 @vreduce_xor_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.xor.v1i64(<1 x i64>) - define i64 @vreduce_xor_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_xor_v1i64: ; RV32: # %bb.0: @@ -3258,8 +3013,6 @@ define i64 @vreduce_xor_v1i64(<1 x i64> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.v2i64(<2 x i64>) - define i64 @vreduce_xor_v2i64(ptr %x) { ; RV32-LABEL: vreduce_xor_v2i64: ; RV32: # %bb.0: @@ -3287,8 +3040,6 @@ define i64 @vreduce_xor_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.v4i64(<4 x i64>) - define i64 @vreduce_xor_v4i64(ptr %x) { ; RV32-LABEL: vreduce_xor_v4i64: ; RV32: # %bb.0: @@ -3316,8 +3067,6 @@ define i64 @vreduce_xor_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.v8i64(<8 x i64>) - define i64 @vreduce_xor_v8i64(ptr %x) { ; RV32-LABEL: vreduce_xor_v8i64: ; RV32: # %bb.0: @@ -3345,8 +3094,6 @@ define i64 @vreduce_xor_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.v16i64(<16 x i64>) - define i64 @vreduce_xor_v16i64(ptr %x) { ; RV32-LABEL: vreduce_xor_v16i64: ; RV32: # %bb.0: @@ -3374,8 +3121,6 @@ define i64 @vreduce_xor_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.v32i64(<32 x i64>) - define i64 @vreduce_xor_v32i64(ptr %x) { ; RV32-LABEL: vreduce_xor_v32i64: ; RV32: # %bb.0: @@ -3409,8 +3154,6 @@ define i64 @vreduce_xor_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.v64i64(<64 x i64>) - define i64 @vreduce_xor_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_xor_v64i64: ; RV32: # %bb.0: @@ -3456,8 +3199,6 @@ define i64 @vreduce_xor_v64i64(ptr %x) nounwind { ret i64 %red } -declare i8 @llvm.vector.reduce.smin.v1i8(<1 x i8>) - define i8 @vreduce_smin_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_smin_v1i8: ; CHECK: # %bb.0: @@ -3468,8 +3209,6 @@ define i8 @vreduce_smin_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v2i8(<2 x i8>) - define i8 @vreduce_smin_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v2i8: ; CHECK: # %bb.0: @@ -3483,8 +3222,6 @@ define i8 @vreduce_smin_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v3i8(<3 x i8>) - define i8 @vreduce_smin_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v3i8: ; CHECK: # %bb.0: @@ -3500,8 +3237,6 @@ define i8 @vreduce_smin_v3i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v4i8(<4 x i8>) - define i8 @vreduce_smin_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v4i8: ; CHECK: # %bb.0: @@ -3515,8 +3250,6 @@ define i8 @vreduce_smin_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v8i8(<8 x i8>) - define i8 @vreduce_smin_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v8i8: ; CHECK: # %bb.0: @@ -3530,8 +3263,6 @@ define i8 @vreduce_smin_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v16i8(<16 x i8>) - define i8 @vreduce_smin_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v16i8: ; CHECK: # %bb.0: @@ -3545,8 +3276,6 @@ define i8 @vreduce_smin_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v32i8(<32 x i8>) - define i8 @vreduce_smin_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v32i8: ; CHECK: # %bb.0: @@ -3561,8 +3290,6 @@ define i8 @vreduce_smin_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v64i8(<64 x i8>) - define i8 @vreduce_smin_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v64i8: ; CHECK: # %bb.0: @@ -3577,8 +3304,6 @@ define i8 @vreduce_smin_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v128i8(<128 x i8>) - define i8 @vreduce_smin_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v128i8: ; CHECK: # %bb.0: @@ -3593,8 +3318,6 @@ define i8 @vreduce_smin_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v256i8(<256 x i8>) - define i8 @vreduce_smin_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v256i8: ; CHECK: # %bb.0: @@ -3612,8 +3335,6 @@ define i8 @vreduce_smin_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.smin.v1i16(<1 x i16>) - define i16 @vreduce_smin_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_smin_v1i16: ; CHECK: # %bb.0: @@ -3624,8 +3345,6 @@ define i16 @vreduce_smin_v1i16(<1 x i16> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.v2i16(<2 x i16>) - define i16 @vreduce_smin_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_smin_v2i16: ; CHECK: # %bb.0: @@ -3639,8 +3358,6 @@ define i16 @vreduce_smin_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.v4i16(<4 x i16>) - define i16 @vreduce_smin_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_smin_v4i16: ; CHECK: # %bb.0: @@ -3654,8 +3371,6 @@ define i16 @vreduce_smin_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.v8i16(<8 x i16>) - define i16 @vreduce_smin_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_smin_v8i16: ; CHECK: # %bb.0: @@ -3669,8 +3384,6 @@ define i16 @vreduce_smin_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.v16i16(<16 x i16>) - define i16 @vreduce_smin_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_smin_v16i16: ; CHECK: # %bb.0: @@ -3684,8 +3397,6 @@ define i16 @vreduce_smin_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.v32i16(<32 x i16>) - define i16 @vreduce_smin_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_smin_v32i16: ; CHECK: # %bb.0: @@ -3700,8 +3411,6 @@ define i16 @vreduce_smin_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.v64i16(<64 x i16>) - define i16 @vreduce_smin_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_smin_v64i16: ; CHECK: # %bb.0: @@ -3716,8 +3425,6 @@ define i16 @vreduce_smin_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.v128i16(<128 x i16>) - define i16 @vreduce_smin_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_smin_v128i16: ; CHECK: # %bb.0: @@ -3735,8 +3442,6 @@ define i16 @vreduce_smin_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.smin.v1i32(<1 x i32>) - define i32 @vreduce_smin_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_smin_v1i32: ; CHECK: # %bb.0: @@ -3747,8 +3452,6 @@ define i32 @vreduce_smin_v1i32(<1 x i32> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.v2i32(<2 x i32>) - define i32 @vreduce_smin_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_smin_v2i32: ; CHECK: # %bb.0: @@ -3762,8 +3465,6 @@ define i32 @vreduce_smin_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.v4i32(<4 x i32>) - define i32 @vreduce_smin_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_smin_v4i32: ; CHECK: # %bb.0: @@ -3777,8 +3478,6 @@ define i32 @vreduce_smin_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.v8i32(<8 x i32>) - define i32 @vreduce_smin_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_smin_v8i32: ; CHECK: # %bb.0: @@ -3792,8 +3491,6 @@ define i32 @vreduce_smin_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.v16i32(<16 x i32>) - define i32 @vreduce_smin_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_smin_v16i32: ; CHECK: # %bb.0: @@ -3807,8 +3504,6 @@ define i32 @vreduce_smin_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.v32i32(<32 x i32>) - define i32 @vreduce_smin_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_smin_v32i32: ; CHECK: # %bb.0: @@ -3823,8 +3518,6 @@ define i32 @vreduce_smin_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.v64i32(<64 x i32>) - define i32 @vreduce_smin_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_smin_v64i32: ; CHECK: # %bb.0: @@ -3842,8 +3535,6 @@ define i32 @vreduce_smin_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.smin.v1i64(<1 x i64>) - define i64 @vreduce_smin_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_smin_v1i64: ; RV32: # %bb.0: @@ -3863,8 +3554,6 @@ define i64 @vreduce_smin_v1i64(<1 x i64> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.v2i64(<2 x i64>) - define i64 @vreduce_smin_v2i64(ptr %x) { ; RV32-LABEL: vreduce_smin_v2i64: ; RV32: # %bb.0: @@ -3890,8 +3579,6 @@ define i64 @vreduce_smin_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.v4i64(<4 x i64>) - define i64 @vreduce_smin_v4i64(ptr %x) { ; RV32-LABEL: vreduce_smin_v4i64: ; RV32: # %bb.0: @@ -3917,8 +3604,6 @@ define i64 @vreduce_smin_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.v8i64(<8 x i64>) - define i64 @vreduce_smin_v8i64(ptr %x) { ; RV32-LABEL: vreduce_smin_v8i64: ; RV32: # %bb.0: @@ -3944,8 +3629,6 @@ define i64 @vreduce_smin_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.v16i64(<16 x i64>) - define i64 @vreduce_smin_v16i64(ptr %x) { ; RV32-LABEL: vreduce_smin_v16i64: ; RV32: # %bb.0: @@ -3971,8 +3654,6 @@ define i64 @vreduce_smin_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.v32i64(<32 x i64>) - define i64 @vreduce_smin_v32i64(ptr %x) { ; RV32-LABEL: vreduce_smin_v32i64: ; RV32: # %bb.0: @@ -4004,8 +3685,6 @@ define i64 @vreduce_smin_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.v64i64(<64 x i64>) - define i64 @vreduce_smin_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_smin_v64i64: ; RV32: # %bb.0: @@ -4049,8 +3728,6 @@ define i64 @vreduce_smin_v64i64(ptr %x) nounwind { ret i64 %red } -declare i8 @llvm.vector.reduce.smax.v1i8(<1 x i8>) - define i8 @vreduce_smax_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_smax_v1i8: ; CHECK: # %bb.0: @@ -4061,8 +3738,6 @@ define i8 @vreduce_smax_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v2i8(<2 x i8>) - define i8 @vreduce_smax_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v2i8: ; CHECK: # %bb.0: @@ -4076,8 +3751,6 @@ define i8 @vreduce_smax_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v3i8(<3 x i8>) - define i8 @vreduce_smax_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v3i8: ; CHECK: # %bb.0: @@ -4093,8 +3766,6 @@ define i8 @vreduce_smax_v3i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v4i8(<4 x i8>) - define i8 @vreduce_smax_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v4i8: ; CHECK: # %bb.0: @@ -4108,8 +3779,6 @@ define i8 @vreduce_smax_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v8i8(<8 x i8>) - define i8 @vreduce_smax_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v8i8: ; CHECK: # %bb.0: @@ -4123,8 +3792,6 @@ define i8 @vreduce_smax_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v16i8(<16 x i8>) - define i8 @vreduce_smax_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v16i8: ; CHECK: # %bb.0: @@ -4138,8 +3805,6 @@ define i8 @vreduce_smax_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v32i8(<32 x i8>) - define i8 @vreduce_smax_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v32i8: ; CHECK: # %bb.0: @@ -4154,8 +3819,6 @@ define i8 @vreduce_smax_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v64i8(<64 x i8>) - define i8 @vreduce_smax_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v64i8: ; CHECK: # %bb.0: @@ -4170,8 +3833,6 @@ define i8 @vreduce_smax_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v128i8(<128 x i8>) - define i8 @vreduce_smax_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v128i8: ; CHECK: # %bb.0: @@ -4186,8 +3847,6 @@ define i8 @vreduce_smax_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v256i8(<256 x i8>) - define i8 @vreduce_smax_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v256i8: ; CHECK: # %bb.0: @@ -4205,8 +3864,6 @@ define i8 @vreduce_smax_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.smax.v1i16(<1 x i16>) - define i16 @vreduce_smax_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_smax_v1i16: ; CHECK: # %bb.0: @@ -4217,8 +3874,6 @@ define i16 @vreduce_smax_v1i16(<1 x i16> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.v2i16(<2 x i16>) - define i16 @vreduce_smax_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_smax_v2i16: ; CHECK: # %bb.0: @@ -4232,8 +3887,6 @@ define i16 @vreduce_smax_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.v4i16(<4 x i16>) - define i16 @vreduce_smax_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_smax_v4i16: ; CHECK: # %bb.0: @@ -4247,8 +3900,6 @@ define i16 @vreduce_smax_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.v8i16(<8 x i16>) - define i16 @vreduce_smax_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_smax_v8i16: ; CHECK: # %bb.0: @@ -4262,8 +3913,6 @@ define i16 @vreduce_smax_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.v16i16(<16 x i16>) - define i16 @vreduce_smax_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_smax_v16i16: ; CHECK: # %bb.0: @@ -4277,8 +3926,6 @@ define i16 @vreduce_smax_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.v32i16(<32 x i16>) - define i16 @vreduce_smax_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_smax_v32i16: ; CHECK: # %bb.0: @@ -4293,8 +3940,6 @@ define i16 @vreduce_smax_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.v64i16(<64 x i16>) - define i16 @vreduce_smax_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_smax_v64i16: ; CHECK: # %bb.0: @@ -4309,8 +3954,6 @@ define i16 @vreduce_smax_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.v128i16(<128 x i16>) - define i16 @vreduce_smax_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_smax_v128i16: ; CHECK: # %bb.0: @@ -4328,8 +3971,6 @@ define i16 @vreduce_smax_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.smax.v1i32(<1 x i32>) - define i32 @vreduce_smax_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_smax_v1i32: ; CHECK: # %bb.0: @@ -4340,8 +3981,6 @@ define i32 @vreduce_smax_v1i32(<1 x i32> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.v2i32(<2 x i32>) - define i32 @vreduce_smax_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_smax_v2i32: ; CHECK: # %bb.0: @@ -4355,8 +3994,6 @@ define i32 @vreduce_smax_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.v4i32(<4 x i32>) - define i32 @vreduce_smax_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_smax_v4i32: ; CHECK: # %bb.0: @@ -4370,8 +4007,6 @@ define i32 @vreduce_smax_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.v8i32(<8 x i32>) - define i32 @vreduce_smax_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_smax_v8i32: ; CHECK: # %bb.0: @@ -4385,8 +4020,6 @@ define i32 @vreduce_smax_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.v16i32(<16 x i32>) - define i32 @vreduce_smax_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_smax_v16i32: ; CHECK: # %bb.0: @@ -4400,8 +4033,6 @@ define i32 @vreduce_smax_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.v32i32(<32 x i32>) - define i32 @vreduce_smax_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_smax_v32i32: ; CHECK: # %bb.0: @@ -4416,8 +4047,6 @@ define i32 @vreduce_smax_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.v64i32(<64 x i32>) - define i32 @vreduce_smax_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_smax_v64i32: ; CHECK: # %bb.0: @@ -4435,8 +4064,6 @@ define i32 @vreduce_smax_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.smax.v1i64(<1 x i64>) - define i64 @vreduce_smax_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_smax_v1i64: ; RV32: # %bb.0: @@ -4456,8 +4083,6 @@ define i64 @vreduce_smax_v1i64(<1 x i64> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.v2i64(<2 x i64>) - define i64 @vreduce_smax_v2i64(ptr %x) { ; RV32-LABEL: vreduce_smax_v2i64: ; RV32: # %bb.0: @@ -4483,8 +4108,6 @@ define i64 @vreduce_smax_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.v4i64(<4 x i64>) - define i64 @vreduce_smax_v4i64(ptr %x) { ; RV32-LABEL: vreduce_smax_v4i64: ; RV32: # %bb.0: @@ -4510,8 +4133,6 @@ define i64 @vreduce_smax_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.v8i64(<8 x i64>) - define i64 @vreduce_smax_v8i64(ptr %x) { ; RV32-LABEL: vreduce_smax_v8i64: ; RV32: # %bb.0: @@ -4537,8 +4158,6 @@ define i64 @vreduce_smax_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.v16i64(<16 x i64>) - define i64 @vreduce_smax_v16i64(ptr %x) { ; RV32-LABEL: vreduce_smax_v16i64: ; RV32: # %bb.0: @@ -4564,8 +4183,6 @@ define i64 @vreduce_smax_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.v32i64(<32 x i64>) - define i64 @vreduce_smax_v32i64(ptr %x) { ; RV32-LABEL: vreduce_smax_v32i64: ; RV32: # %bb.0: @@ -4597,8 +4214,6 @@ define i64 @vreduce_smax_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.v64i64(<64 x i64>) - define i64 @vreduce_smax_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_smax_v64i64: ; RV32: # %bb.0: @@ -4642,8 +4257,6 @@ define i64 @vreduce_smax_v64i64(ptr %x) nounwind { ret i64 %red } -declare i8 @llvm.vector.reduce.umin.v1i8(<1 x i8>) - define i8 @vreduce_umin_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_umin_v1i8: ; CHECK: # %bb.0: @@ -4654,8 +4267,6 @@ define i8 @vreduce_umin_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v2i8(<2 x i8>) - define i8 @vreduce_umin_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v2i8: ; CHECK: # %bb.0: @@ -4669,8 +4280,6 @@ define i8 @vreduce_umin_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v3i8(<3 x i8>) - define i8 @vreduce_umin_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v3i8: ; CHECK: # %bb.0: @@ -4686,8 +4295,6 @@ define i8 @vreduce_umin_v3i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v4i8(<4 x i8>) - define i8 @vreduce_umin_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v4i8: ; CHECK: # %bb.0: @@ -4701,8 +4308,6 @@ define i8 @vreduce_umin_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v8i8(<8 x i8>) - define i8 @vreduce_umin_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v8i8: ; CHECK: # %bb.0: @@ -4716,8 +4321,6 @@ define i8 @vreduce_umin_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v16i8(<16 x i8>) - define i8 @vreduce_umin_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v16i8: ; CHECK: # %bb.0: @@ -4731,8 +4334,6 @@ define i8 @vreduce_umin_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v32i8(<32 x i8>) - define i8 @vreduce_umin_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v32i8: ; CHECK: # %bb.0: @@ -4747,8 +4348,6 @@ define i8 @vreduce_umin_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v64i8(<64 x i8>) - define i8 @vreduce_umin_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v64i8: ; CHECK: # %bb.0: @@ -4763,8 +4362,6 @@ define i8 @vreduce_umin_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v128i8(<128 x i8>) - define i8 @vreduce_umin_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v128i8: ; CHECK: # %bb.0: @@ -4779,8 +4376,6 @@ define i8 @vreduce_umin_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v256i8(<256 x i8>) - define i8 @vreduce_umin_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v256i8: ; CHECK: # %bb.0: @@ -4798,8 +4393,6 @@ define i8 @vreduce_umin_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.umin.v1i16(<1 x i16>) - define i16 @vreduce_umin_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_umin_v1i16: ; CHECK: # %bb.0: @@ -4810,8 +4403,6 @@ define i16 @vreduce_umin_v1i16(<1 x i16> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.v2i16(<2 x i16>) - define i16 @vreduce_umin_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_umin_v2i16: ; CHECK: # %bb.0: @@ -4825,8 +4416,6 @@ define i16 @vreduce_umin_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.v4i16(<4 x i16>) - define i16 @vreduce_umin_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_umin_v4i16: ; CHECK: # %bb.0: @@ -4840,8 +4429,6 @@ define i16 @vreduce_umin_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.v8i16(<8 x i16>) - define i16 @vreduce_umin_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_umin_v8i16: ; CHECK: # %bb.0: @@ -4855,8 +4442,6 @@ define i16 @vreduce_umin_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.v16i16(<16 x i16>) - define i16 @vreduce_umin_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_umin_v16i16: ; CHECK: # %bb.0: @@ -4870,8 +4455,6 @@ define i16 @vreduce_umin_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.v32i16(<32 x i16>) - define i16 @vreduce_umin_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_umin_v32i16: ; CHECK: # %bb.0: @@ -4886,8 +4469,6 @@ define i16 @vreduce_umin_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.v64i16(<64 x i16>) - define i16 @vreduce_umin_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_umin_v64i16: ; CHECK: # %bb.0: @@ -4902,8 +4483,6 @@ define i16 @vreduce_umin_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.v128i16(<128 x i16>) - define i16 @vreduce_umin_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_umin_v128i16: ; CHECK: # %bb.0: @@ -4921,8 +4500,6 @@ define i16 @vreduce_umin_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.umin.v1i32(<1 x i32>) - define i32 @vreduce_umin_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_umin_v1i32: ; CHECK: # %bb.0: @@ -4933,8 +4510,6 @@ define i32 @vreduce_umin_v1i32(<1 x i32> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.v2i32(<2 x i32>) - define i32 @vreduce_umin_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_umin_v2i32: ; CHECK: # %bb.0: @@ -4948,8 +4523,6 @@ define i32 @vreduce_umin_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.v4i32(<4 x i32>) - define i32 @vreduce_umin_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_umin_v4i32: ; CHECK: # %bb.0: @@ -4963,8 +4536,6 @@ define i32 @vreduce_umin_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.v8i32(<8 x i32>) - define i32 @vreduce_umin_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_umin_v8i32: ; CHECK: # %bb.0: @@ -4978,8 +4549,6 @@ define i32 @vreduce_umin_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.v16i32(<16 x i32>) - define i32 @vreduce_umin_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_umin_v16i32: ; CHECK: # %bb.0: @@ -4993,8 +4562,6 @@ define i32 @vreduce_umin_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.v32i32(<32 x i32>) - define i32 @vreduce_umin_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_umin_v32i32: ; CHECK: # %bb.0: @@ -5009,8 +4576,6 @@ define i32 @vreduce_umin_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.v64i32(<64 x i32>) - define i32 @vreduce_umin_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_umin_v64i32: ; CHECK: # %bb.0: @@ -5028,8 +4593,6 @@ define i32 @vreduce_umin_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.umin.v1i64(<1 x i64>) - define i64 @vreduce_umin_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_umin_v1i64: ; RV32: # %bb.0: @@ -5049,8 +4612,6 @@ define i64 @vreduce_umin_v1i64(<1 x i64> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.v2i64(<2 x i64>) - define i64 @vreduce_umin_v2i64(ptr %x) { ; RV32-LABEL: vreduce_umin_v2i64: ; RV32: # %bb.0: @@ -5076,8 +4637,6 @@ define i64 @vreduce_umin_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.v4i64(<4 x i64>) - define i64 @vreduce_umin_v4i64(ptr %x) { ; RV32-LABEL: vreduce_umin_v4i64: ; RV32: # %bb.0: @@ -5103,8 +4662,6 @@ define i64 @vreduce_umin_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.v8i64(<8 x i64>) - define i64 @vreduce_umin_v8i64(ptr %x) { ; RV32-LABEL: vreduce_umin_v8i64: ; RV32: # %bb.0: @@ -5130,8 +4687,6 @@ define i64 @vreduce_umin_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.v16i64(<16 x i64>) - define i64 @vreduce_umin_v16i64(ptr %x) { ; RV32-LABEL: vreduce_umin_v16i64: ; RV32: # %bb.0: @@ -5157,8 +4712,6 @@ define i64 @vreduce_umin_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.v32i64(<32 x i64>) - define i64 @vreduce_umin_v32i64(ptr %x) { ; RV32-LABEL: vreduce_umin_v32i64: ; RV32: # %bb.0: @@ -5190,8 +4743,6 @@ define i64 @vreduce_umin_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.v64i64(<64 x i64>) - define i64 @vreduce_umin_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_umin_v64i64: ; RV32: # %bb.0: @@ -5235,8 +4786,6 @@ define i64 @vreduce_umin_v64i64(ptr %x) nounwind { ret i64 %red } -declare i8 @llvm.vector.reduce.umax.v1i8(<1 x i8>) - define i8 @vreduce_umax_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_umax_v1i8: ; CHECK: # %bb.0: @@ -5247,8 +4796,6 @@ define i8 @vreduce_umax_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v2i8(<2 x i8>) - define i8 @vreduce_umax_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v2i8: ; CHECK: # %bb.0: @@ -5262,8 +4809,6 @@ define i8 @vreduce_umax_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v3i8(<3 x i8>) - define i8 @vreduce_umax_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v3i8: ; CHECK: # %bb.0: @@ -5278,8 +4823,6 @@ define i8 @vreduce_umax_v3i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v4i8(<4 x i8>) - define i8 @vreduce_umax_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v4i8: ; CHECK: # %bb.0: @@ -5293,8 +4836,6 @@ define i8 @vreduce_umax_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v8i8(<8 x i8>) - define i8 @vreduce_umax_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v8i8: ; CHECK: # %bb.0: @@ -5308,8 +4849,6 @@ define i8 @vreduce_umax_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v16i8(<16 x i8>) - define i8 @vreduce_umax_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v16i8: ; CHECK: # %bb.0: @@ -5323,8 +4862,6 @@ define i8 @vreduce_umax_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v32i8(<32 x i8>) - define i8 @vreduce_umax_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v32i8: ; CHECK: # %bb.0: @@ -5339,8 +4876,6 @@ define i8 @vreduce_umax_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v64i8(<64 x i8>) - define i8 @vreduce_umax_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v64i8: ; CHECK: # %bb.0: @@ -5355,8 +4890,6 @@ define i8 @vreduce_umax_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v128i8(<128 x i8>) - define i8 @vreduce_umax_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v128i8: ; CHECK: # %bb.0: @@ -5371,8 +4904,6 @@ define i8 @vreduce_umax_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v256i8(<256 x i8>) - define i8 @vreduce_umax_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v256i8: ; CHECK: # %bb.0: @@ -5390,8 +4921,6 @@ define i8 @vreduce_umax_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.umax.v1i16(<1 x i16>) - define i16 @vreduce_umax_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_umax_v1i16: ; CHECK: # %bb.0: @@ -5402,8 +4931,6 @@ define i16 @vreduce_umax_v1i16(<1 x i16> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.v2i16(<2 x i16>) - define i16 @vreduce_umax_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_umax_v2i16: ; CHECK: # %bb.0: @@ -5417,8 +4944,6 @@ define i16 @vreduce_umax_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.v4i16(<4 x i16>) - define i16 @vreduce_umax_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_umax_v4i16: ; CHECK: # %bb.0: @@ -5432,8 +4957,6 @@ define i16 @vreduce_umax_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.v8i16(<8 x i16>) - define i16 @vreduce_umax_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_umax_v8i16: ; CHECK: # %bb.0: @@ -5447,8 +4970,6 @@ define i16 @vreduce_umax_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.v16i16(<16 x i16>) - define i16 @vreduce_umax_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_umax_v16i16: ; CHECK: # %bb.0: @@ -5462,8 +4983,6 @@ define i16 @vreduce_umax_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.v32i16(<32 x i16>) - define i16 @vreduce_umax_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_umax_v32i16: ; CHECK: # %bb.0: @@ -5478,8 +4997,6 @@ define i16 @vreduce_umax_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.v64i16(<64 x i16>) - define i16 @vreduce_umax_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_umax_v64i16: ; CHECK: # %bb.0: @@ -5494,8 +5011,6 @@ define i16 @vreduce_umax_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.v128i16(<128 x i16>) - define i16 @vreduce_umax_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_umax_v128i16: ; CHECK: # %bb.0: @@ -5513,8 +5028,6 @@ define i16 @vreduce_umax_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.umax.v1i32(<1 x i32>) - define i32 @vreduce_umax_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_umax_v1i32: ; CHECK: # %bb.0: @@ -5525,8 +5038,6 @@ define i32 @vreduce_umax_v1i32(<1 x i32> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.v2i32(<2 x i32>) - define i32 @vreduce_umax_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_umax_v2i32: ; CHECK: # %bb.0: @@ -5540,8 +5051,6 @@ define i32 @vreduce_umax_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.v4i32(<4 x i32>) - define i32 @vreduce_umax_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_umax_v4i32: ; CHECK: # %bb.0: @@ -5555,8 +5064,6 @@ define i32 @vreduce_umax_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.v8i32(<8 x i32>) - define i32 @vreduce_umax_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_umax_v8i32: ; CHECK: # %bb.0: @@ -5570,8 +5077,6 @@ define i32 @vreduce_umax_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.v16i32(<16 x i32>) - define i32 @vreduce_umax_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_umax_v16i32: ; CHECK: # %bb.0: @@ -5585,8 +5090,6 @@ define i32 @vreduce_umax_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.v32i32(<32 x i32>) - define i32 @vreduce_umax_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_umax_v32i32: ; CHECK: # %bb.0: @@ -5601,8 +5104,6 @@ define i32 @vreduce_umax_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.v64i32(<64 x i32>) - define i32 @vreduce_umax_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_umax_v64i32: ; CHECK: # %bb.0: @@ -5620,8 +5121,6 @@ define i32 @vreduce_umax_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.umax.v1i64(<1 x i64>) - define i64 @vreduce_umax_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_umax_v1i64: ; RV32: # %bb.0: @@ -5641,8 +5140,6 @@ define i64 @vreduce_umax_v1i64(<1 x i64> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.v2i64(<2 x i64>) - define i64 @vreduce_umax_v2i64(ptr %x) { ; RV32-LABEL: vreduce_umax_v2i64: ; RV32: # %bb.0: @@ -5668,8 +5165,6 @@ define i64 @vreduce_umax_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.v4i64(<4 x i64>) - define i64 @vreduce_umax_v4i64(ptr %x) { ; RV32-LABEL: vreduce_umax_v4i64: ; RV32: # %bb.0: @@ -5695,8 +5190,6 @@ define i64 @vreduce_umax_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.v8i64(<8 x i64>) - define i64 @vreduce_umax_v8i64(ptr %x) { ; RV32-LABEL: vreduce_umax_v8i64: ; RV32: # %bb.0: @@ -5722,8 +5215,6 @@ define i64 @vreduce_umax_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.v16i64(<16 x i64>) - define i64 @vreduce_umax_v16i64(ptr %x) { ; RV32-LABEL: vreduce_umax_v16i64: ; RV32: # %bb.0: @@ -5749,8 +5240,6 @@ define i64 @vreduce_umax_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.v32i64(<32 x i64>) - define i64 @vreduce_umax_v32i64(ptr %x) { ; RV32-LABEL: vreduce_umax_v32i64: ; RV32: # %bb.0: @@ -5782,8 +5271,6 @@ define i64 @vreduce_umax_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.v64i64(<64 x i64>) - define i64 @vreduce_umax_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_umax_v64i64: ; RV32: # %bb.0: @@ -5827,8 +5314,6 @@ define i64 @vreduce_umax_v64i64(ptr %x) nounwind { ret i64 %red } -declare i8 @llvm.vector.reduce.mul.v1i8(<1 x i8>) - define i8 @vreduce_mul_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_mul_v1i8: ; CHECK: # %bb.0: @@ -5839,8 +5324,6 @@ define i8 @vreduce_mul_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v2i8(<2 x i8>) - define i8 @vreduce_mul_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v2i8: ; CHECK: # %bb.0: @@ -5855,8 +5338,6 @@ define i8 @vreduce_mul_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v3i8(<3 x i8>) - define i8 @vreduce_mul_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v3i8: ; CHECK: # %bb.0: @@ -5879,8 +5360,6 @@ define i8 @vreduce_mul_v3i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v4i8(<4 x i8>) - define i8 @vreduce_mul_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v4i8: ; CHECK: # %bb.0: @@ -5897,8 +5376,6 @@ define i8 @vreduce_mul_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v8i8(<8 x i8>) - define i8 @vreduce_mul_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v8i8: ; CHECK: # %bb.0: @@ -5917,8 +5394,6 @@ define i8 @vreduce_mul_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v16i8(<16 x i8>) - define i8 @vreduce_mul_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v16i8: ; CHECK: # %bb.0: @@ -5939,8 +5414,6 @@ define i8 @vreduce_mul_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v32i8(<32 x i8>) - define i8 @vreduce_mul_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v32i8: ; CHECK: # %bb.0: @@ -5964,8 +5437,6 @@ define i8 @vreduce_mul_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v64i8(<64 x i8>) - define i8 @vreduce_mul_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v64i8: ; CHECK: # %bb.0: @@ -5992,8 +5463,6 @@ define i8 @vreduce_mul_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v128i8(<128 x i8>) - define i8 @vreduce_mul_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v128i8: ; CHECK: # %bb.0: @@ -6023,8 +5492,6 @@ define i8 @vreduce_mul_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v256i8(<256 x i8>) - define i8 @vreduce_mul_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v256i8: ; CHECK: # %bb.0: @@ -6057,8 +5524,6 @@ define i8 @vreduce_mul_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.mul.v1i16(<1 x i16>) - define i16 @vreduce_mul_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_mul_v1i16: ; CHECK: # %bb.0: @@ -6069,8 +5534,6 @@ define i16 @vreduce_mul_v1i16(<1 x i16> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.mul.v2i16(<2 x i16>) - define i16 @vreduce_mul_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_mul_v2i16: ; CHECK: # %bb.0: @@ -6085,8 +5548,6 @@ define i16 @vreduce_mul_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.mul.v4i16(<4 x i16>) - define i16 @vreduce_mul_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_mul_v4i16: ; CHECK: # %bb.0: @@ -6103,8 +5564,6 @@ define i16 @vreduce_mul_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.mul.v8i16(<8 x i16>) - define i16 @vreduce_mul_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_mul_v8i16: ; CHECK: # %bb.0: @@ -6123,8 +5582,6 @@ define i16 @vreduce_mul_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.mul.v16i16(<16 x i16>) - define i16 @vreduce_mul_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_mul_v16i16: ; CHECK: # %bb.0: @@ -6145,8 +5602,6 @@ define i16 @vreduce_mul_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.mul.v32i16(<32 x i16>) - define i16 @vreduce_mul_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_mul_v32i16: ; CHECK: # %bb.0: @@ -6170,8 +5625,6 @@ define i16 @vreduce_mul_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.mul.v64i16(<64 x i16>) - define i16 @vreduce_mul_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_mul_v64i16: ; CHECK: # %bb.0: @@ -6198,8 +5651,6 @@ define i16 @vreduce_mul_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.mul.v128i16(<128 x i16>) - define i16 @vreduce_mul_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_mul_v128i16: ; CHECK: # %bb.0: @@ -6229,8 +5680,6 @@ define i16 @vreduce_mul_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.mul.v1i32(<1 x i32>) - define i32 @vreduce_mul_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_mul_v1i32: ; CHECK: # %bb.0: @@ -6241,8 +5690,6 @@ define i32 @vreduce_mul_v1i32(<1 x i32> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.mul.v2i32(<2 x i32>) - define i32 @vreduce_mul_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_mul_v2i32: ; CHECK: # %bb.0: @@ -6257,8 +5704,6 @@ define i32 @vreduce_mul_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.mul.v4i32(<4 x i32>) - define i32 @vreduce_mul_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_mul_v4i32: ; CHECK: # %bb.0: @@ -6275,8 +5720,6 @@ define i32 @vreduce_mul_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.mul.v8i32(<8 x i32>) - define i32 @vreduce_mul_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_mul_v8i32: ; CHECK: # %bb.0: @@ -6295,8 +5738,6 @@ define i32 @vreduce_mul_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.mul.v16i32(<16 x i32>) - define i32 @vreduce_mul_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_mul_v16i32: ; CHECK: # %bb.0: @@ -6317,8 +5758,6 @@ define i32 @vreduce_mul_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.mul.v32i32(<32 x i32>) - define i32 @vreduce_mul_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_mul_v32i32: ; CHECK: # %bb.0: @@ -6342,8 +5781,6 @@ define i32 @vreduce_mul_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.mul.v64i32(<64 x i32>) - define i32 @vreduce_mul_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_mul_v64i32: ; CHECK: # %bb.0: @@ -6370,8 +5807,6 @@ define i32 @vreduce_mul_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.mul.v1i64(<1 x i64>) - define i64 @vreduce_mul_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_mul_v1i64: ; RV32: # %bb.0: @@ -6391,8 +5826,6 @@ define i64 @vreduce_mul_v1i64(<1 x i64> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.mul.v2i64(<2 x i64>) - define i64 @vreduce_mul_v2i64(ptr %x) { ; RV32-LABEL: vreduce_mul_v2i64: ; RV32: # %bb.0: @@ -6421,8 +5854,6 @@ define i64 @vreduce_mul_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.mul.v4i64(<4 x i64>) - define i64 @vreduce_mul_v4i64(ptr %x) { ; RV32-LABEL: vreduce_mul_v4i64: ; RV32: # %bb.0: @@ -6454,8 +5885,6 @@ define i64 @vreduce_mul_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.mul.v8i64(<8 x i64>) - define i64 @vreduce_mul_v8i64(ptr %x) { ; RV32-LABEL: vreduce_mul_v8i64: ; RV32: # %bb.0: @@ -6491,8 +5920,6 @@ define i64 @vreduce_mul_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.mul.v16i64(<16 x i64>) - define i64 @vreduce_mul_v16i64(ptr %x) { ; RV32-LABEL: vreduce_mul_v16i64: ; RV32: # %bb.0: @@ -6532,8 +5959,6 @@ define i64 @vreduce_mul_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.mul.v32i64(<32 x i64>) - define i64 @vreduce_mul_v32i64(ptr %x) { ; RV32-LABEL: vreduce_mul_v32i64: ; RV32: # %bb.0: @@ -6578,8 +6003,6 @@ define i64 @vreduce_mul_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.mul.v64i64(<64 x i64>) - define i64 @vreduce_mul_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_mul_v64i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll index 276f6b077931b..8523ca957a8f5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare i1 @llvm.vp.reduce.and.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define zeroext i1 @vpreduce_and_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v1i1: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define zeroext i1 @vpreduce_and_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.or.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define zeroext i1 @vpreduce_or_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v1i1: ; CHECK: # %bb.0: @@ -34,8 +30,6 @@ define zeroext i1 @vpreduce_or_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 ret i1 %r } -declare i1 @llvm.vp.reduce.xor.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define zeroext i1 @vpreduce_xor_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v1i1: ; CHECK: # %bb.0: @@ -50,8 +44,6 @@ define zeroext i1 @vpreduce_xor_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.and.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_and_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i1: ; CHECK: # %bb.0: @@ -66,8 +58,6 @@ define zeroext i1 @vpreduce_and_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.or.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_or_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i1: ; CHECK: # %bb.0: @@ -82,8 +72,6 @@ define zeroext i1 @vpreduce_or_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 ret i1 %r } -declare i1 @llvm.vp.reduce.xor.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_xor_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i1: ; CHECK: # %bb.0: @@ -98,8 +86,6 @@ define zeroext i1 @vpreduce_xor_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.and.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_and_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i1: ; CHECK: # %bb.0: @@ -114,8 +100,6 @@ define zeroext i1 @vpreduce_and_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.or.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_or_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i1: ; CHECK: # %bb.0: @@ -130,8 +114,6 @@ define zeroext i1 @vpreduce_or_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 ret i1 %r } -declare i1 @llvm.vp.reduce.xor.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_xor_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i1: ; CHECK: # %bb.0: @@ -146,8 +128,6 @@ define zeroext i1 @vpreduce_xor_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.and.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_and_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v8i1: ; CHECK: # %bb.0: @@ -162,8 +142,6 @@ define zeroext i1 @vpreduce_and_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.or.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_or_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v8i1: ; CHECK: # %bb.0: @@ -178,8 +156,6 @@ define zeroext i1 @vpreduce_or_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 ret i1 %r } -declare i1 @llvm.vp.reduce.xor.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_xor_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v8i1: ; CHECK: # %bb.0: @@ -194,8 +170,6 @@ define zeroext i1 @vpreduce_xor_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.and.v10i1(i1, <10 x i1>, <10 x i1>, i32) - define zeroext i1 @vpreduce_and_v10i1(i1 zeroext %s, <10 x i1> %v, <10 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v10i1: ; CHECK: # %bb.0: @@ -210,8 +184,6 @@ define zeroext i1 @vpreduce_and_v10i1(i1 zeroext %s, <10 x i1> %v, <10 x i1> %m, ret i1 %r } -declare i1 @llvm.vp.reduce.and.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_and_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v16i1: ; CHECK: # %bb.0: @@ -226,8 +198,6 @@ define zeroext i1 @vpreduce_and_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, ret i1 %r } -declare i1 @llvm.vp.reduce.and.v256i1(i1, <256 x i1>, <256 x i1>, i32) - define zeroext i1 @vpreduce_and_v256i1(i1 zeroext %s, <256 x i1> %v, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v256i1: ; CHECK: # %bb.0: @@ -261,8 +231,6 @@ define zeroext i1 @vpreduce_and_v256i1(i1 zeroext %s, <256 x i1> %v, <256 x i1> ret i1 %r } -declare i1 @llvm.vp.reduce.or.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_or_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v16i1: ; CHECK: # %bb.0: @@ -277,8 +245,6 @@ define zeroext i1 @vpreduce_or_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, ret i1 %r } -declare i1 @llvm.vp.reduce.xor.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_xor_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v16i1: ; CHECK: # %bb.0: @@ -293,8 +259,6 @@ define zeroext i1 @vpreduce_xor_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, ret i1 %r } -declare i1 @llvm.vp.reduce.add.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define zeroext i1 @vpreduce_add_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v1i1: ; CHECK: # %bb.0: @@ -309,8 +273,6 @@ define zeroext i1 @vpreduce_add_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.add.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_add_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i1: ; CHECK: # %bb.0: @@ -325,8 +287,6 @@ define zeroext i1 @vpreduce_add_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.add.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_add_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i1: ; CHECK: # %bb.0: @@ -341,8 +301,6 @@ define zeroext i1 @vpreduce_add_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.add.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_add_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v8i1: ; CHECK: # %bb.0: @@ -357,8 +315,6 @@ define zeroext i1 @vpreduce_add_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.add.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_add_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v16i1: ; CHECK: # %bb.0: @@ -373,8 +329,6 @@ define zeroext i1 @vpreduce_add_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, ret i1 %r } -declare i1 @llvm.vp.reduce.smax.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define zeroext i1 @vpreduce_smax_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v1i1: ; CHECK: # %bb.0: @@ -389,8 +343,6 @@ define zeroext i1 @vpreduce_smax_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.smax.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_smax_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i1: ; CHECK: # %bb.0: @@ -405,8 +357,6 @@ define zeroext i1 @vpreduce_smax_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.smax.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_smax_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i1: ; CHECK: # %bb.0: @@ -421,8 +371,6 @@ define zeroext i1 @vpreduce_smax_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.smax.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_smax_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v8i1: ; CHECK: # %bb.0: @@ -437,8 +385,6 @@ define zeroext i1 @vpreduce_smax_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.smax.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_smax_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v16i1: ; CHECK: # %bb.0: @@ -453,8 +399,6 @@ define zeroext i1 @vpreduce_smax_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.smax.v32i1(i1, <32 x i1>, <32 x i1>, i32) - define zeroext i1 @vpreduce_smax_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v32i1: ; CHECK: # %bb.0: @@ -469,8 +413,6 @@ define zeroext i1 @vpreduce_smax_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.smax.v64i1(i1, <64 x i1>, <64 x i1>, i32) - define zeroext i1 @vpreduce_smax_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v64i1: ; CHECK: # %bb.0: @@ -485,8 +427,6 @@ define zeroext i1 @vpreduce_smax_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.smin.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define zeroext i1 @vpreduce_smin_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v1i1: ; CHECK: # %bb.0: @@ -501,8 +441,6 @@ define zeroext i1 @vpreduce_smin_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.smin.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_smin_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i1: ; CHECK: # %bb.0: @@ -517,8 +455,6 @@ define zeroext i1 @vpreduce_smin_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.smin.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_smin_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i1: ; CHECK: # %bb.0: @@ -533,8 +469,6 @@ define zeroext i1 @vpreduce_smin_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.smin.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_smin_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v8i1: ; CHECK: # %bb.0: @@ -549,8 +483,6 @@ define zeroext i1 @vpreduce_smin_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.smin.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_smin_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v16i1: ; CHECK: # %bb.0: @@ -565,8 +497,6 @@ define zeroext i1 @vpreduce_smin_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.smin.v32i1(i1, <32 x i1>, <32 x i1>, i32) - define zeroext i1 @vpreduce_smin_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v32i1: ; CHECK: # %bb.0: @@ -581,8 +511,6 @@ define zeroext i1 @vpreduce_smin_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.smin.v64i1(i1, <64 x i1>, <64 x i1>, i32) - define zeroext i1 @vpreduce_smin_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v64i1: ; CHECK: # %bb.0: @@ -597,8 +525,6 @@ define zeroext i1 @vpreduce_smin_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.umax.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define zeroext i1 @vpreduce_umax_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v1i1: ; CHECK: # %bb.0: @@ -613,8 +539,6 @@ define zeroext i1 @vpreduce_umax_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.umax.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_umax_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v2i1: ; CHECK: # %bb.0: @@ -629,8 +553,6 @@ define zeroext i1 @vpreduce_umax_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.umax.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_umax_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v4i1: ; CHECK: # %bb.0: @@ -645,8 +567,6 @@ define zeroext i1 @vpreduce_umax_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.umax.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_umax_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v8i1: ; CHECK: # %bb.0: @@ -661,8 +581,6 @@ define zeroext i1 @vpreduce_umax_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.umax.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_umax_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v16i1: ; CHECK: # %bb.0: @@ -677,8 +595,6 @@ define zeroext i1 @vpreduce_umax_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.umax.v32i1(i1, <32 x i1>, <32 x i1>, i32) - define zeroext i1 @vpreduce_umax_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v32i1: ; CHECK: # %bb.0: @@ -693,8 +609,6 @@ define zeroext i1 @vpreduce_umax_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.umax.v64i1(i1, <64 x i1>, <64 x i1>, i32) - define zeroext i1 @vpreduce_umax_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v64i1: ; CHECK: # %bb.0: @@ -709,8 +623,6 @@ define zeroext i1 @vpreduce_umax_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.umin.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define zeroext i1 @vpreduce_umin_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v1i1: ; CHECK: # %bb.0: @@ -725,8 +637,6 @@ define zeroext i1 @vpreduce_umin_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.umin.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_umin_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v2i1: ; CHECK: # %bb.0: @@ -741,8 +651,6 @@ define zeroext i1 @vpreduce_umin_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.umin.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_umin_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v4i1: ; CHECK: # %bb.0: @@ -757,8 +665,6 @@ define zeroext i1 @vpreduce_umin_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.umin.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_umin_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v8i1: ; CHECK: # %bb.0: @@ -773,8 +679,6 @@ define zeroext i1 @vpreduce_umin_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.umin.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_umin_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v16i1: ; CHECK: # %bb.0: @@ -789,8 +693,6 @@ define zeroext i1 @vpreduce_umin_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.umin.v32i1(i1, <32 x i1>, <32 x i1>, i32) - define zeroext i1 @vpreduce_umin_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v32i1: ; CHECK: # %bb.0: @@ -805,8 +707,6 @@ define zeroext i1 @vpreduce_umin_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.umin.v64i1(i1, <64 x i1>, <64 x i1>, i32) - define zeroext i1 @vpreduce_umin_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v64i1: ; CHECK: # %bb.0: @@ -821,8 +721,6 @@ define zeroext i1 @vpreduce_umin_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.mul.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define i1 @vpreduce_mul_v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v1i1: ; CHECK: # %bb.0: @@ -837,8 +735,6 @@ define i1 @vpreduce_mul_v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.mul.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_mul_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v2i1: ; CHECK: # %bb.0: @@ -853,8 +749,6 @@ define zeroext i1 @vpreduce_mul_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.mul.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_mul_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v4i1: ; CHECK: # %bb.0: @@ -869,8 +763,6 @@ define zeroext i1 @vpreduce_mul_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.mul.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_mul_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v8i1: ; CHECK: # %bb.0: @@ -885,8 +777,6 @@ define zeroext i1 @vpreduce_mul_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.mul.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_mul_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v16i1: ; CHECK: # %bb.0: @@ -901,8 +791,6 @@ define zeroext i1 @vpreduce_mul_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, ret i1 %r } -declare i1 @llvm.vp.reduce.mul.v32i1(i1, <32 x i1>, <32 x i1>, i32) - define zeroext i1 @vpreduce_mul_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v32i1: ; CHECK: # %bb.0: @@ -917,8 +805,6 @@ define zeroext i1 @vpreduce_mul_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, ret i1 %r } -declare i1 @llvm.vp.reduce.mul.v64i1(i1, <64 x i1>, <64 x i1>, i32) - define zeroext i1 @vpreduce_mul_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v64i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll index 97cf7e6902e32..7540495c0d3b5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s -declare <2 x half> @llvm.vp.rint.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vp_rint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v2f16: ; CHECK: # %bb.0: @@ -44,8 +42,6 @@ define <2 x half> @vp_rint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ret <2 x half> %v } -declare <4 x half> @llvm.vp.rint.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vp_rint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v4f16: ; CHECK: # %bb.0: @@ -84,8 +80,6 @@ define <4 x half> @vp_rint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <8 x half> @llvm.vp.rint.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vp_rint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v8f16: ; CHECK: # %bb.0: @@ -124,8 +118,6 @@ define <8 x half> @vp_rint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ret <8 x half> %v } -declare <16 x half> @llvm.vp.rint.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vp_rint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v16f16: ; CHECK: # %bb.0: @@ -166,8 +158,6 @@ define <16 x half> @vp_rint_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) { ret <16 x half> %v } -declare <2 x float> @llvm.vp.rint.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vp_rint_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v2f32: ; CHECK: # %bb.0: @@ -204,8 +194,6 @@ define <2 x float> @vp_rint_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ret <2 x float> %v } -declare <4 x float> @llvm.vp.rint.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vp_rint_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v4f32: ; CHECK: # %bb.0: @@ -242,8 +230,6 @@ define <4 x float> @vp_rint_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ret <4 x float> %v } -declare <8 x float> @llvm.vp.rint.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vp_rint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v8f32: ; CHECK: # %bb.0: @@ -282,8 +268,6 @@ define <8 x float> @vp_rint_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ret <8 x float> %v } -declare <16 x float> @llvm.vp.rint.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vp_rint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v16f32: ; CHECK: # %bb.0: @@ -322,8 +306,6 @@ define <16 x float> @vp_rint_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) ret <16 x float> %v } -declare <2 x double> @llvm.vp.rint.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vp_rint_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_rint_v2f64: ; RV32: # %bb.0: @@ -390,8 +372,6 @@ define <2 x double> @vp_rint_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) ret <2 x double> %v } -declare <4 x double> @llvm.vp.rint.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vp_rint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_rint_v4f64: ; RV32: # %bb.0: @@ -462,8 +442,6 @@ define <4 x double> @vp_rint_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) ret <4 x double> %v } -declare <8 x double> @llvm.vp.rint.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vp_rint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_rint_v8f64: ; RV32: # %bb.0: @@ -534,8 +512,6 @@ define <8 x double> @vp_rint_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) ret <8 x double> %v } -declare <15 x double> @llvm.vp.rint.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vp_rint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_rint_v15f64: ; RV32: # %bb.0: @@ -606,8 +582,6 @@ define <15 x double> @vp_rint_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev ret <15 x double> %v } -declare <16 x double> @llvm.vp.rint.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vp_rint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_rint_v16f64: ; RV32: # %bb.0: @@ -678,8 +652,6 @@ define <16 x double> @vp_rint_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev ret <16 x double> %v } -declare <32 x double> @llvm.vp.rint.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_rint_v32f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll index 16c8b2b9da682..de5427f329496 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare <2 x half> @llvm.vp.round.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vp_round_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_v2f16: ; ZVFH: # %bb.0: @@ -96,8 +94,6 @@ define <2 x half> @vp_round_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ret <2 x half> %v } -declare <4 x half> @llvm.vp.round.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vp_round_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_v4f16: ; ZVFH: # %bb.0: @@ -184,8 +180,6 @@ define <4 x half> @vp_round_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <8 x half> @llvm.vp.round.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_v8f16: ; ZVFH: # %bb.0: @@ -272,8 +266,6 @@ define <8 x half> @vp_round_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ret <8 x half> %v } -declare <16 x half> @llvm.vp.round.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_v16f16: ; ZVFH: # %bb.0: @@ -362,8 +354,6 @@ define <16 x half> @vp_round_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) ret <16 x half> %v } -declare <2 x float> @llvm.vp.round.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vp_round_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v2f32: ; CHECK: # %bb.0: @@ -404,8 +394,6 @@ define <2 x float> @vp_round_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ret <2 x float> %v } -declare <4 x float> @llvm.vp.round.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vp_round_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v4f32: ; CHECK: # %bb.0: @@ -446,8 +434,6 @@ define <4 x float> @vp_round_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ret <4 x float> %v } -declare <8 x float> @llvm.vp.round.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vp_round_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v8f32: ; CHECK: # %bb.0: @@ -490,8 +476,6 @@ define <8 x float> @vp_round_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ret <8 x float> %v } -declare <16 x float> @llvm.vp.round.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vp_round_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v16f32: ; CHECK: # %bb.0: @@ -534,8 +518,6 @@ define <16 x float> @vp_round_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl ret <16 x float> %v } -declare <2 x double> @llvm.vp.round.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vp_round_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_v2f64: ; RV32ZVFH: # %bb.0: @@ -676,8 +658,6 @@ define <2 x double> @vp_round_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) ret <2 x double> %v } -declare <4 x double> @llvm.vp.round.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_v4f64: ; RV32ZVFH: # %bb.0: @@ -826,8 +806,6 @@ define <4 x double> @vp_round_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) ret <4 x double> %v } -declare <8 x double> @llvm.vp.round.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_v8f64: ; RV32ZVFH: # %bb.0: @@ -976,8 +954,6 @@ define <8 x double> @vp_round_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) ret <8 x double> %v } -declare <15 x double> @llvm.vp.round.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_v15f64: ; RV32ZVFH: # %bb.0: @@ -1126,8 +1102,6 @@ define <15 x double> @vp_round_v15f64_unmasked(<15 x double> %va, i32 zeroext %e ret <15 x double> %v } -declare <16 x double> @llvm.vp.round.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_v16f64: ; RV32ZVFH: # %bb.0: @@ -1276,8 +1250,6 @@ define <16 x double> @vp_round_v16f64_unmasked(<16 x double> %va, i32 zeroext %e ret <16 x double> %v } -declare <32 x double> @llvm.vp.round.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_v32f64: ; RV32ZVFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll index 14c550d555cf7..1c923e3f12171 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare <2 x half> @llvm.vp.roundeven.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vp_roundeven_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_v2f16: ; ZVFH: # %bb.0: @@ -96,8 +94,6 @@ define <2 x half> @vp_roundeven_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) ret <2 x half> %v } -declare <4 x half> @llvm.vp.roundeven.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vp_roundeven_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_v4f16: ; ZVFH: # %bb.0: @@ -184,8 +180,6 @@ define <4 x half> @vp_roundeven_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) ret <4 x half> %v } -declare <8 x half> @llvm.vp.roundeven.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_v8f16: ; ZVFH: # %bb.0: @@ -272,8 +266,6 @@ define <8 x half> @vp_roundeven_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) ret <8 x half> %v } -declare <16 x half> @llvm.vp.roundeven.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_v16f16: ; ZVFH: # %bb.0: @@ -362,8 +354,6 @@ define <16 x half> @vp_roundeven_v16f16_unmasked(<16 x half> %va, i32 zeroext %e ret <16 x half> %v } -declare <2 x float> @llvm.vp.roundeven.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vp_roundeven_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v2f32: ; CHECK: # %bb.0: @@ -404,8 +394,6 @@ define <2 x float> @vp_roundeven_v2f32_unmasked(<2 x float> %va, i32 zeroext %ev ret <2 x float> %v } -declare <4 x float> @llvm.vp.roundeven.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vp_roundeven_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v4f32: ; CHECK: # %bb.0: @@ -446,8 +434,6 @@ define <4 x float> @vp_roundeven_v4f32_unmasked(<4 x float> %va, i32 zeroext %ev ret <4 x float> %v } -declare <8 x float> @llvm.vp.roundeven.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vp_roundeven_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v8f32: ; CHECK: # %bb.0: @@ -490,8 +476,6 @@ define <8 x float> @vp_roundeven_v8f32_unmasked(<8 x float> %va, i32 zeroext %ev ret <8 x float> %v } -declare <16 x float> @llvm.vp.roundeven.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vp_roundeven_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v16f32: ; CHECK: # %bb.0: @@ -534,8 +518,6 @@ define <16 x float> @vp_roundeven_v16f32_unmasked(<16 x float> %va, i32 zeroext ret <16 x float> %v } -declare <2 x double> @llvm.vp.roundeven.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vp_roundeven_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_v2f64: ; RV32ZVFH: # %bb.0: @@ -676,8 +658,6 @@ define <2 x double> @vp_roundeven_v2f64_unmasked(<2 x double> %va, i32 zeroext % ret <2 x double> %v } -declare <4 x double> @llvm.vp.roundeven.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_v4f64: ; RV32ZVFH: # %bb.0: @@ -826,8 +806,6 @@ define <4 x double> @vp_roundeven_v4f64_unmasked(<4 x double> %va, i32 zeroext % ret <4 x double> %v } -declare <8 x double> @llvm.vp.roundeven.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_v8f64: ; RV32ZVFH: # %bb.0: @@ -976,8 +954,6 @@ define <8 x double> @vp_roundeven_v8f64_unmasked(<8 x double> %va, i32 zeroext % ret <8 x double> %v } -declare <15 x double> @llvm.vp.roundeven.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_v15f64: ; RV32ZVFH: # %bb.0: @@ -1126,8 +1102,6 @@ define <15 x double> @vp_roundeven_v15f64_unmasked(<15 x double> %va, i32 zeroex ret <15 x double> %v } -declare <16 x double> @llvm.vp.roundeven.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_v16f64: ; RV32ZVFH: # %bb.0: @@ -1276,8 +1250,6 @@ define <16 x double> @vp_roundeven_v16f64_unmasked(<16 x double> %va, i32 zeroex ret <16 x double> %v } -declare <32 x double> @llvm.vp.roundeven.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_v32f64: ; RV32ZVFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll index 16f04f14721d0..83cbd2b760341 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare <2 x half> @llvm.vp.roundtozero.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vp_roundtozero_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_v2f16: ; ZVFH: # %bb.0: @@ -96,8 +94,6 @@ define <2 x half> @vp_roundtozero_v2f16_unmasked(<2 x half> %va, i32 zeroext %ev ret <2 x half> %v } -declare <4 x half> @llvm.vp.roundtozero.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vp_roundtozero_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_v4f16: ; ZVFH: # %bb.0: @@ -184,8 +180,6 @@ define <4 x half> @vp_roundtozero_v4f16_unmasked(<4 x half> %va, i32 zeroext %ev ret <4 x half> %v } -declare <8 x half> @llvm.vp.roundtozero.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_v8f16: ; ZVFH: # %bb.0: @@ -272,8 +266,6 @@ define <8 x half> @vp_roundtozero_v8f16_unmasked(<8 x half> %va, i32 zeroext %ev ret <8 x half> %v } -declare <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_v16f16: ; ZVFH: # %bb.0: @@ -362,8 +354,6 @@ define <16 x half> @vp_roundtozero_v16f16_unmasked(<16 x half> %va, i32 zeroext ret <16 x half> %v } -declare <2 x float> @llvm.vp.roundtozero.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vp_roundtozero_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v2f32: ; CHECK: # %bb.0: @@ -404,8 +394,6 @@ define <2 x float> @vp_roundtozero_v2f32_unmasked(<2 x float> %va, i32 zeroext % ret <2 x float> %v } -declare <4 x float> @llvm.vp.roundtozero.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vp_roundtozero_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v4f32: ; CHECK: # %bb.0: @@ -446,8 +434,6 @@ define <4 x float> @vp_roundtozero_v4f32_unmasked(<4 x float> %va, i32 zeroext % ret <4 x float> %v } -declare <8 x float> @llvm.vp.roundtozero.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vp_roundtozero_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v8f32: ; CHECK: # %bb.0: @@ -490,8 +476,6 @@ define <8 x float> @vp_roundtozero_v8f32_unmasked(<8 x float> %va, i32 zeroext % ret <8 x float> %v } -declare <16 x float> @llvm.vp.roundtozero.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vp_roundtozero_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v16f32: ; CHECK: # %bb.0: @@ -534,8 +518,6 @@ define <16 x float> @vp_roundtozero_v16f32_unmasked(<16 x float> %va, i32 zeroex ret <16 x float> %v } -declare <2 x double> @llvm.vp.roundtozero.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vp_roundtozero_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_v2f64: ; RV32ZVFH: # %bb.0: @@ -676,8 +658,6 @@ define <2 x double> @vp_roundtozero_v2f64_unmasked(<2 x double> %va, i32 zeroext ret <2 x double> %v } -declare <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_v4f64: ; RV32ZVFH: # %bb.0: @@ -826,8 +806,6 @@ define <4 x double> @vp_roundtozero_v4f64_unmasked(<4 x double> %va, i32 zeroext ret <4 x double> %v } -declare <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_v8f64: ; RV32ZVFH: # %bb.0: @@ -976,8 +954,6 @@ define <8 x double> @vp_roundtozero_v8f64_unmasked(<8 x double> %va, i32 zeroext ret <8 x double> %v } -declare <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_v15f64: ; RV32ZVFH: # %bb.0: @@ -1126,8 +1102,6 @@ define <15 x double> @vp_roundtozero_v15f64_unmasked(<15 x double> %va, i32 zero ret <15 x double> %v } -declare <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_v16f64: ; RV32ZVFH: # %bb.0: @@ -1276,8 +1250,6 @@ define <16 x double> @vp_roundtozero_v16f64_unmasked(<16 x double> %va, i32 zero ret <16 x double> %v } -declare <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_v32f64: ; RV32ZVFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll index c0a213034c95b..71c32f1473b7f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll @@ -178,12 +178,3 @@ entry: ret i32 %op.rdx.3 } -declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1) -declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) -declare <4 x i16> @llvm.abs.v4i16(<4 x i16>, i1) -declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) - -declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1) -declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>) -declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1) -declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll index ba64655947602..af3e9db9fe123 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll @@ -11,8 +11,6 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN64 -declare <7 x i1> @llvm.vp.fcmp.v7f16(<7 x half>, <7 x half>, metadata, <7 x i1>, i32) - define <7 x i1> @fcmp_oeq_vv_v7f16(<7 x half> %va, <7 x half> %vb, <7 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: fcmp_oeq_vv_v7f16: ; ZVFH: # %bb.0: @@ -33,8 +31,6 @@ define <7 x i1> @fcmp_oeq_vv_v7f16(<7 x half> %va, <7 x half> %vb, <7 x i1> %m, ret <7 x i1> %v } -declare <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half>, <8 x half>, metadata, <8 x i1>, i32) - define <8 x i1> @fcmp_oeq_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: fcmp_oeq_vv_v8f16: ; ZVFH: # %bb.0: @@ -1055,8 +1051,6 @@ define <8 x i1> @fcmp_uno_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i3 ret <8 x i1> %v } -declare <128 x i1> @llvm.vp.fcmp.v128f16(<128 x half>, <128 x half>, metadata, <128 x i1>, i32) - define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: fcmp_oeq_vv_v128f16: ; ZVFH: # %bb.0: @@ -3361,8 +3355,6 @@ define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128 ret <128 x i1> %v } -declare <7 x i1> @llvm.vp.fcmp.v7f64(<7 x double>, <7 x double>, metadata, <7 x i1>, i32) - define <7 x i1> @fcmp_oeq_vv_v7f64(<7 x double> %va, <7 x double> %vb, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_v7f64: ; CHECK: # %bb.0: @@ -3374,8 +3366,6 @@ define <7 x i1> @fcmp_oeq_vv_v7f64(<7 x double> %va, <7 x double> %vb, <7 x i1> ret <7 x i1> %v } -declare <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double>, <8 x double>, metadata, <8 x i1>, i32) - define <8 x i1> @fcmp_oeq_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_v8f64: ; CHECK: # %bb.0: @@ -3914,8 +3904,6 @@ define <8 x i1> @fcmp_uno_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m ret <8 x i1> %v } -declare <32 x i1> @llvm.vp.fcmp.v32f64(<32 x double>, <32 x double>, metadata, <32 x i1>, i32) - define <32 x i1> @fcmp_oeq_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp-mask.ll index 456170b086463..36847c971d858 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp-mask.ll @@ -4,9 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK - -declare <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1>, <2 x i1>, metadata, <2 x i1>, i32) - define <2 x i1> @icmp_eq_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v2i1: ; CHECK: # %bb.0: @@ -17,8 +14,6 @@ define <2 x i1> @icmp_eq_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 ze ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1>, <4 x i1>, metadata, <4 x i1>, i32) - define <4 x i1> @icmp_eq_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v4i1: ; CHECK: # %bb.0: @@ -29,8 +24,6 @@ define <4 x i1> @icmp_eq_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 ze ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1>, <8 x i1>, metadata, <8 x i1>, i32) - define <8 x i1> @icmp_eq_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v8i1: ; CHECK: # %bb.0: @@ -41,8 +34,6 @@ define <8 x i1> @icmp_eq_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 ze ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1>, <16 x i1>, metadata, <16 x i1>, i32) - define <16 x i1> @icmp_eq_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v16i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll index ad57a6037652f..efc0f7ef4a441 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll @@ -7,8 +7,6 @@ ; FIXME: We're missing canonicalizations of ISD::VP_SETCC equivalent to those ; for ISD::SETCC, e.g., splats aren't moved to the RHS. -declare <8 x i1> @llvm.vp.icmp.v8i7(<8 x i7>, <8 x i7>, metadata, <8 x i1>, i32) - define <8 x i1> @icmp_eq_vv_v8i7(<8 x i7> %va, <8 x i7> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v8i7: ; CHECK: # %bb.0: @@ -57,8 +55,6 @@ define <8 x i1> @icmp_eq_vx_swap_v8i7(<8 x i7> %va, i7 %b, <8 x i1> %m, i32 zero ret <8 x i1> %v } -declare <5 x i1> @llvm.vp.icmp.v5i8(<5 x i8>, <5 x i8>, metadata, <5 x i1>, i32) - define <5 x i1> @icmp_eq_vv_v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v5i8: ; CHECK: # %bb.0: @@ -93,8 +89,6 @@ define <5 x i1> @icmp_eq_vx_swap_v5i8(<5 x i8> %va, i8 %b, <5 x i1> %m, i32 zero ret <5 x i1> %v } -declare <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8>, <8 x i8>, metadata, <8 x i1>, i32) - define <8 x i1> @icmp_eq_vv_v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v8i8: ; CHECK: # %bb.0: @@ -587,8 +581,6 @@ define <8 x i1> @icmp_sle_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %e ret <8 x i1> %v } -declare <256 x i1> @llvm.vp.icmp.v256i8(<256 x i8>, <256 x i8>, metadata, <256 x i1>, i32) - define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v256i8: ; CHECK: # %bb.0: @@ -696,8 +688,6 @@ define <256 x i1> @icmp_eq_vx_swap_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, ret <256 x i1> %v } -declare <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32>, <8 x i32>, metadata, <8 x i1>, i32) - define <8 x i1> @icmp_eq_vv_v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v8i32: ; CHECK: # %bb.0: @@ -1235,8 +1225,6 @@ define <8 x i1> @icmp_sle_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext ret <8 x i1> %v } -declare <64 x i1> @llvm.vp.icmp.v64i32(<64 x i32>, <64 x i32>, metadata, <64 x i1>, i32) - define <64 x i1> @icmp_eq_vv_v64i32(<64 x i32> %va, <64 x i32> %vb, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v64i32: ; CHECK: # %bb.0: @@ -1345,8 +1333,6 @@ define <64 x i1> @icmp_eq_vx_swap_v64i32(<64 x i32> %va, i32 %b, <64 x i1> %m, i ret <64 x i1> %v } -declare <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64>, <8 x i64>, metadata, <8 x i1>, i32) - define <8 x i1> @icmp_eq_vv_v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp-mask.ll index bd9b66997ff8d..78bc2a2ebdaef 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s -declare <4 x i16> @llvm.vp.sext.v4i16.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x i16> @vsext_v4i16_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i16_v4i1: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define <4 x i16> @vsext_v4i16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.sext.v4i32.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x i32> @vsext_v4i32_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i32_v4i1: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define <4 x i32> @vsext_v4i32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.sext.v4i64.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x i64> @vsext_v4i64_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i64_v4i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll index d8dc1f3588633..a452e5a9ffbb8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s -declare <4 x i16> @llvm.vp.sext.v4i16.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i16> @vsext_v4i16_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i16_v4i8: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define <4 x i16> @vsext_v4i16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.sext.v4i32.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i32> @vsext_v4i32_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i32_v4i8: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define <4 x i32> @vsext_v4i32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.sext.v4i64.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i64> @vsext_v4i64_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i64_v4i8: ; CHECK: # %bb.0: @@ -74,8 +68,6 @@ define <4 x i64> @vsext_v4i64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <4 x i32> @llvm.vp.sext.v4i32.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x i32> @vsext_v4i32_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i32_v4i16: ; CHECK: # %bb.0: @@ -98,8 +90,6 @@ define <4 x i32> @vsext_v4i32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.sext.v4i64.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x i64> @vsext_v4i64_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i64_v4i16: ; CHECK: # %bb.0: @@ -122,8 +112,6 @@ define <4 x i64> @vsext_v4i64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <4 x i64> @llvm.vp.sext.v4i64.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x i64> @vsext_v4i64_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i64_v4i32: ; CHECK: # %bb.0: @@ -146,8 +134,6 @@ define <4 x i64> @vsext_v4i64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <32 x i64> @llvm.vp.sext.v32i64.v32i32(<32 x i32>, <32 x i1>, i32) - define <32 x i64> @vsext_v32i64_v32i32(<32 x i32> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v32i64_v32i32: ; CHECK: # %bb.0: @@ -202,8 +188,6 @@ define <32 x i64> @vsext_v32i64_v32i32_unmasked(<32 x i32> %va, i32 zeroext %evl ret <32 x i64> %v } -declare <4 x i16> @llvm.vp.sext.v4i16.v4i7(<4 x i7>, <4 x i1>, i32) - define <4 x i16> @vsext_v4i16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i16_v4i7: ; CHECK: # %bb.0: @@ -216,8 +200,6 @@ define <4 x i16> @vsext_v4i16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) ret <4 x i16> %v } -declare <4 x i8> @llvm.vp.sext.v4i8.v4i7(<4 x i7>, <4 x i1>, i32) - define <4 x i8> @vsext_v4i8_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i8_v4i7: ; CHECK: # %bb.0: @@ -229,8 +211,6 @@ define <4 x i8> @vsext_v4i8_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ret <4 x i8> %v } -declare <4 x i15> @llvm.vp.sext.v4i15.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i15> @vsext_v4i15_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i15_v4i8: ; CHECK: # %bb.0: @@ -242,8 +222,6 @@ define <4 x i15> @vsext_v4i15_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) ret <4 x i15> %v } -declare <4 x i15> @llvm.vp.sext.v4i15.v4i9(<4 x i9>, <4 x i1>, i32) - define <4 x i15> @vsext_v4i15_v4i9(<4 x i9> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i15_v4i9: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll index a1390a8b1c0de..8d6e9a2dee0a4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfh < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfh < %s | FileCheck %s -declare <4 x half> @llvm.vp.sitofp.v4f16.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x half> @vsitofp_v4f16_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f16_v4i1: ; CHECK: # %bb.0: @@ -29,8 +27,6 @@ define <4 x half> @vsitofp_v4f16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <4 x float> @llvm.vp.sitofp.v4f32.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x float> @vsitofp_v4f32_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i1: ; CHECK: # %bb.0: @@ -56,8 +52,6 @@ define <4 x float> @vsitofp_v4f32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) ret <4 x float> %v } -declare <4 x double> @llvm.vp.sitofp.v4f64.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x double> @vsitofp_v4f64_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll index 9f1f98893d04c..afa8f2fda2ed4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare <4 x half> @llvm.vp.sitofp.v4f16.v4i7(<4 x i7>, <4 x i1>, i32) - define <4 x half> @vsitofp_v4f16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_v4f16_v4i7: ; ZVFH: # %bb.0: @@ -35,8 +33,6 @@ define <4 x half> @vsitofp_v4f16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %ev ret <4 x half> %v } -declare <4 x half> @llvm.vp.sitofp.v4f16.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x half> @vsitofp_v4f16_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_v4f16_v4i8: ; ZVFH: # %bb.0: @@ -77,8 +73,6 @@ define <4 x half> @vsitofp_v4f16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <4 x half> @llvm.vp.sitofp.v4f16.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x half> @vsitofp_v4f16_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_v4f16_v4i16: ; ZVFH: # %bb.0: @@ -115,8 +109,6 @@ define <4 x half> @vsitofp_v4f16_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) ret <4 x half> %v } -declare <4 x half> @llvm.vp.sitofp.v4f16.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x half> @vsitofp_v4f16_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_v4f16_v4i32: ; ZVFH: # %bb.0: @@ -155,8 +147,6 @@ define <4 x half> @vsitofp_v4f16_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) ret <4 x half> %v } -declare <4 x half> @llvm.vp.sitofp.v4f16.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x half> @vsitofp_v4f16_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_v4f16_v4i64: ; ZVFH: # %bb.0: @@ -197,8 +187,6 @@ define <4 x half> @vsitofp_v4f16_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) ret <4 x half> %v } -declare <4 x float> @llvm.vp.sitofp.v4f32.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x float> @vsitofp_v4f32_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i8: ; CHECK: # %bb.0: @@ -221,8 +209,6 @@ define <4 x float> @vsitofp_v4f32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) ret <4 x float> %v } -declare <4 x float> @llvm.vp.sitofp.v4f32.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x float> @vsitofp_v4f32_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i16: ; CHECK: # %bb.0: @@ -245,8 +231,6 @@ define <4 x float> @vsitofp_v4f32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl ret <4 x float> %v } -declare <4 x float> @llvm.vp.sitofp.v4f32.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x float> @vsitofp_v4f32_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i32: ; CHECK: # %bb.0: @@ -267,8 +251,6 @@ define <4 x float> @vsitofp_v4f32_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl ret <4 x float> %v } -declare <4 x float> @llvm.vp.sitofp.v4f32.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x float> @vsitofp_v4f32_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i64: ; CHECK: # %bb.0: @@ -291,8 +273,6 @@ define <4 x float> @vsitofp_v4f32_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl ret <4 x float> %v } -declare <4 x double> @llvm.vp.sitofp.v4f64.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x double> @vsitofp_v4f64_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i8: ; CHECK: # %bb.0: @@ -315,8 +295,6 @@ define <4 x double> @vsitofp_v4f64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) ret <4 x double> %v } -declare <4 x double> @llvm.vp.sitofp.v4f64.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x double> @vsitofp_v4f64_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i16: ; CHECK: # %bb.0: @@ -339,8 +317,6 @@ define <4 x double> @vsitofp_v4f64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %ev ret <4 x double> %v } -declare <4 x double> @llvm.vp.sitofp.v4f64.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x double> @vsitofp_v4f64_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i32: ; CHECK: # %bb.0: @@ -363,8 +339,6 @@ define <4 x double> @vsitofp_v4f64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %ev ret <4 x double> %v } -declare <4 x double> @llvm.vp.sitofp.v4f64.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x double> @vsitofp_v4f64_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i64: ; CHECK: # %bb.0: @@ -385,8 +359,6 @@ define <4 x double> @vsitofp_v4f64_v4i64_unmasked(<4 x i64> %va, i32 zeroext %ev ret <4 x double> %v } -declare <32 x double> @llvm.vp.sitofp.v32f64.v32i64(<32 x i64>, <32 x i1>, i32) - define <32 x double> @vsitofp_v32f64_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v32f64_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll index 9812e9832856d..7032ed925d29f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare <2 x i8> @llvm.stepvector.v2i8() - define <2 x i8> @stepvector_v2i8() { ; CHECK-LABEL: stepvector_v2i8: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define <2 x i8> @stepvector_v2i8() { ret <2 x i8> %v } -declare <3 x i8> @llvm.stepvector.v3i8() - define <3 x i8> @stepvector_v3i8() { ; CHECK-LABEL: stepvector_v3i8: ; CHECK: # %bb.0: @@ -26,8 +22,6 @@ define <3 x i8> @stepvector_v3i8() { ret <3 x i8> %v } -declare <4 x i8> @llvm.stepvector.v4i8() - define <4 x i8> @stepvector_v4i8() { ; CHECK-LABEL: stepvector_v4i8: ; CHECK: # %bb.0: @@ -38,8 +32,6 @@ define <4 x i8> @stepvector_v4i8() { ret <4 x i8> %v } -declare <8 x i8> @llvm.stepvector.v8i8() - define <8 x i8> @stepvector_v8i8() { ; CHECK-LABEL: stepvector_v8i8: ; CHECK: # %bb.0: @@ -50,8 +42,6 @@ define <8 x i8> @stepvector_v8i8() { ret <8 x i8> %v } -declare <16 x i8> @llvm.stepvector.v16i8() - define <16 x i8> @stepvector_v16i8() { ; CHECK-LABEL: stepvector_v16i8: ; CHECK: # %bb.0: @@ -62,8 +52,6 @@ define <16 x i8> @stepvector_v16i8() { ret <16 x i8> %v } -declare <2 x i16> @llvm.stepvector.v2i16() - define <2 x i16> @stepvector_v2i16() { ; CHECK-LABEL: stepvector_v2i16: ; CHECK: # %bb.0: @@ -74,8 +62,6 @@ define <2 x i16> @stepvector_v2i16() { ret <2 x i16> %v } -declare <4 x i16> @llvm.stepvector.v4i16() - define <4 x i16> @stepvector_v4i16() { ; CHECK-LABEL: stepvector_v4i16: ; CHECK: # %bb.0: @@ -86,8 +72,6 @@ define <4 x i16> @stepvector_v4i16() { ret <4 x i16> %v } -declare <8 x i16> @llvm.stepvector.v8i16() - define <8 x i16> @stepvector_v8i16() { ; CHECK-LABEL: stepvector_v8i16: ; CHECK: # %bb.0: @@ -98,8 +82,6 @@ define <8 x i16> @stepvector_v8i16() { ret <8 x i16> %v } -declare <16 x i16> @llvm.stepvector.v16i16() - define <16 x i16> @stepvector_v16i16() { ; CHECK-LABEL: stepvector_v16i16: ; CHECK: # %bb.0: @@ -110,8 +92,6 @@ define <16 x i16> @stepvector_v16i16() { ret <16 x i16> %v } -declare <2 x i32> @llvm.stepvector.v2i32() - define <2 x i32> @stepvector_v2i32() { ; CHECK-LABEL: stepvector_v2i32: ; CHECK: # %bb.0: @@ -122,8 +102,6 @@ define <2 x i32> @stepvector_v2i32() { ret <2 x i32> %v } -declare <4 x i32> @llvm.stepvector.v4i32() - define <4 x i32> @stepvector_v4i32() { ; CHECK-LABEL: stepvector_v4i32: ; CHECK: # %bb.0: @@ -134,8 +112,6 @@ define <4 x i32> @stepvector_v4i32() { ret <4 x i32> %v } -declare <8 x i32> @llvm.stepvector.v8i32() - define <8 x i32> @stepvector_v8i32() { ; CHECK-LABEL: stepvector_v8i32: ; CHECK: # %bb.0: @@ -146,8 +122,6 @@ define <8 x i32> @stepvector_v8i32() { ret <8 x i32> %v } -declare <16 x i32> @llvm.stepvector.v16i32() - define <16 x i32> @stepvector_v16i32() { ; CHECK-LABEL: stepvector_v16i32: ; CHECK: # %bb.0: @@ -158,8 +132,6 @@ define <16 x i32> @stepvector_v16i32() { ret <16 x i32> %v } -declare <2 x i64> @llvm.stepvector.v2i64() - define <2 x i64> @stepvector_v2i64() { ; CHECK-LABEL: stepvector_v2i64: ; CHECK: # %bb.0: @@ -170,8 +142,6 @@ define <2 x i64> @stepvector_v2i64() { ret <2 x i64> %v } -declare <4 x i64> @llvm.stepvector.v4i64() - define <4 x i64> @stepvector_v4i64() { ; CHECK-LABEL: stepvector_v4i64: ; CHECK: # %bb.0: @@ -182,8 +152,6 @@ define <4 x i64> @stepvector_v4i64() { ret <4 x i64> %v } -declare <8 x i64> @llvm.stepvector.v8i64() - define <8 x i64> @stepvector_v8i64() { ; CHECK-LABEL: stepvector_v8i64: ; CHECK: # %bb.0: @@ -194,8 +162,6 @@ define <8 x i64> @stepvector_v8i64() { ret <8 x i64> %v } -declare <16 x i64> @llvm.stepvector.v16i64() - define <16 x i64> @stepvector_v16i64() { ; CHECK-LABEL: stepvector_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll index 056f55260b854..6cbf32151e748 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll @@ -623,11 +623,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr>, i32 immarg, <32 x i1>, <32 x i8>) -declare <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr>, i32 immarg, <8 x i1>, <8 x i32>) -declare void @llvm.masked.scatter.v32i8.v32p0(<32 x i8>, <32 x ptr>, i32 immarg, <32 x i1>) -declare void @llvm.masked.scatter.v8i32.v8p0(<8 x i32>, <8 x ptr>, i32 immarg, <8 x i1>) - ; Make sure we don't crash in getTgtMemIntrinsic for a vector of pointers. define void @gather_of_pointers(ptr noalias nocapture %arg, ptr noalias nocapture readonly %arg1) { ; V-LABEL: gather_of_pointers: @@ -757,8 +752,6 @@ bb18: ; preds = %bb2 ret void } -declare <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr>, i32 immarg, <2 x i1>, <2 x ptr>) - ; Make sure we don't crash in getTgtMemIntrinsic for a vector of pointers. define void @scatter_of_pointers(ptr noalias nocapture %arg, ptr noalias nocapture readonly %arg1) { ; V-LABEL: scatter_of_pointers: @@ -888,8 +881,6 @@ bb18: ; preds = %bb2 ret void } -declare void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr>, <2 x ptr>, i32 immarg, <2 x i1>) - define void @strided_load_startval_add_with_splat(ptr noalias nocapture %arg, ptr noalias nocapture readonly %arg1, i32 signext %arg2) { ; CHECK-LABEL: strided_load_startval_add_with_splat: ; CHECK: # %bb.0: # %bb @@ -1010,9 +1001,6 @@ bb35: ; preds = %bb35, %bb32 br i1 %i45, label %bb34, label %bb35 } -declare <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr>, i32 immarg, <16 x i1>, <16 x i8>) -declare void @llvm.masked.scatter.v16i8.v16p0(<16 x i8>, <16 x ptr>, i32 immarg, <16 x i1>) - define void @gather_no_scalar_remainder(ptr noalias nocapture noundef %arg, ptr noalias nocapture noundef readonly %arg1, i64 noundef %arg2) { ; CHECK-LABEL: gather_no_scalar_remainder: ; CHECK: # %bb.0: # %bb diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-negative.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-negative.ll index 8f7d738fe6d91..ef09a3fb6d5fa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-negative.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-negative.ll @@ -140,4 +140,3 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr>, i32 immarg, <32 x i1>, <32 x i8>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll index 62b65ddd3d19a..108c75c8c4abc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll @@ -622,11 +622,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr>, i32 immarg, <32 x i1>, <32 x i8>) -declare <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr>, i32 immarg, <8 x i1>, <8 x i32>) -declare void @llvm.masked.scatter.v32i8.v32p0(<32 x i8>, <32 x ptr>, i32 immarg, <32 x i1>) -declare void @llvm.masked.scatter.v8i32.v8p0(<8 x i32>, <8 x ptr>, i32 immarg, <8 x i1>) - ; Make sure we don't crash in getTgtMemIntrinsic for a vector of pointers. define void @gather_of_pointers(ptr noalias nocapture %arg, ptr noalias nocapture readonly %arg1) { ; V-LABEL: @gather_of_pointers( @@ -702,8 +697,6 @@ bb18: ; preds = %bb2 ret void } -declare <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr>, i32 immarg, <2 x i1>, <2 x ptr>) - ; Make sure we don't crash in getTgtMemIntrinsic for a vector of pointers. define void @scatter_of_pointers(ptr noalias nocapture %arg, ptr noalias nocapture readonly %arg1) { ; V-LABEL: @scatter_of_pointers( @@ -779,8 +772,6 @@ bb18: ; preds = %bb2 ret void } -declare void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr>, <2 x ptr>, i32 immarg, <2 x i1>) - define void @strided_load_startval_add_with_splat(ptr noalias nocapture %arg, ptr noalias nocapture readonly %arg1, i32 signext %arg2) { ; CHECK-LABEL: @strided_load_startval_add_with_splat( ; CHECK-NEXT: bb: @@ -896,9 +887,6 @@ bb35: ; preds = %bb35, %bb32 br i1 %i45, label %bb34, label %bb35 } -declare <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr>, i32 immarg, <16 x i1>, <16 x i8>) -declare void @llvm.masked.scatter.v16i8.v16p0(<16 x i8>, <16 x ptr>, i32 immarg, <16 x i1>) - define void @gather_no_scalar_remainder(ptr noalias nocapture noundef %arg, ptr noalias nocapture noundef readonly %arg1, i64 noundef %arg2) { ; CHECK-LABEL: @gather_no_scalar_remainder( ; CHECK-NEXT: bb: @@ -964,8 +952,6 @@ entry: ret <8 x i8> %3 } -declare <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr>, i32 immarg, <8 x i1>, <8 x i8>) - define void @gather_narrow_idx(ptr noalias nocapture %A, ptr noalias nocapture readonly %B) { ; CHECK-LABEL: @gather_narrow_idx( ; CHECK-NEXT: entry: @@ -1101,7 +1087,6 @@ vector.body: ; preds = %vector.body, %entry %i2 = mul nuw nsw <32 x i64> %vec.ind, splat (i64 5) %i3 = getelementptr inbounds i8, ptr %A, <32 x i64> %i2 - %elems = sub i64 1024, %index %evl = call i32 @llvm.experimental.get.vector.length.i64(i64 %elems, i32 32, i1 false) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll index 4b7f82f94f5e4..8af4ced77be39 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll @@ -24,8 +24,6 @@ ; RUN: -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-RV64,CHECK-NO-OPT,CHECK-NO-OPT-ZVFHMIN -declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i8(ptr, i8, <2 x i1>, i32) - define <2 x i8> @strided_vpload_v2i8_i8(ptr %ptr, i8 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2i8_i8: ; CHECK: # %bb.0: @@ -36,8 +34,6 @@ define <2 x i8> @strided_vpload_v2i8_i8(ptr %ptr, i8 signext %stride, <2 x i1> % ret <2 x i8> %load } -declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i16(ptr, i16, <2 x i1>, i32) - define <2 x i8> @strided_vpload_v2i8_i16(ptr %ptr, i16 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2i8_i16: ; CHECK: # %bb.0: @@ -48,8 +44,6 @@ define <2 x i8> @strided_vpload_v2i8_i16(ptr %ptr, i16 signext %stride, <2 x i1> ret <2 x i8> %load } -declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr, i64, <2 x i1>, i32) - define <2 x i8> @strided_vpload_v2i8_i64(ptr %ptr, i64 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v2i8_i64: ; CHECK-RV32: # %bb.0: @@ -66,8 +60,6 @@ define <2 x i8> @strided_vpload_v2i8_i64(ptr %ptr, i64 signext %stride, <2 x i1> ret <2 x i8> %load } -declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i32(ptr, i32, <2 x i1>, i32) - define <2 x i8> @strided_vpload_v2i8(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2i8: ; CHECK: # %bb.0: @@ -78,8 +70,6 @@ define <2 x i8> @strided_vpload_v2i8(ptr %ptr, i32 signext %stride, <2 x i1> %m, ret <2 x i8> %load } -declare <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i32(ptr, i32, <4 x i1>, i32) - define <4 x i8> @strided_vpload_v4i8(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4i8: ; CHECK: # %bb.0: @@ -100,8 +90,6 @@ define <4 x i8> @strided_vpload_v4i8_allones_mask(ptr %ptr, i32 signext %stride, ret <4 x i8> %load } -declare <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0.i32(ptr, i32, <8 x i1>, i32) - define <8 x i8> @strided_vpload_v8i8(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8i8: ; CHECK: # %bb.0: @@ -122,8 +110,6 @@ define <8 x i8> @strided_vpload_v8i8_unit_stride(ptr %ptr, <8 x i1> %m, i32 zero ret <8 x i8> %load } -declare <2 x i16> @llvm.experimental.vp.strided.load.v2i16.p0.i32(ptr, i32, <2 x i1>, i32) - define <2 x i16> @strided_vpload_v2i16(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2i16: ; CHECK: # %bb.0: @@ -134,8 +120,6 @@ define <2 x i16> @strided_vpload_v2i16(ptr %ptr, i32 signext %stride, <2 x i1> % ret <2 x i16> %load } -declare <4 x i16> @llvm.experimental.vp.strided.load.v4i16.p0.i32(ptr, i32, <4 x i1>, i32) - define <4 x i16> @strided_vpload_v4i16(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4i16: ; CHECK: # %bb.0: @@ -146,8 +130,6 @@ define <4 x i16> @strided_vpload_v4i16(ptr %ptr, i32 signext %stride, <4 x i1> % ret <4 x i16> %load } -declare <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0.i32(ptr, i32, <8 x i1>, i32) - define <8 x i16> @strided_vpload_v8i16(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8i16: ; CHECK: # %bb.0: @@ -178,8 +160,6 @@ define <8 x i16> @strided_vpload_v8i16_allones_mask(ptr %ptr, i32 signext %strid ret <8 x i16> %load } -declare <2 x i32> @llvm.experimental.vp.strided.load.v2i32.p0.i32(ptr, i32, <2 x i1>, i32) - define <2 x i32> @strided_vpload_v2i32(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2i32: ; CHECK: # %bb.0: @@ -190,8 +170,6 @@ define <2 x i32> @strided_vpload_v2i32(ptr %ptr, i32 signext %stride, <2 x i1> % ret <2 x i32> %load } -declare <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i32(ptr, i32, <4 x i1>, i32) - define <4 x i32> @strided_vpload_v4i32(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4i32: ; CHECK: # %bb.0: @@ -212,8 +190,6 @@ define <4 x i32> @strided_vpload_v4i32_unit_stride(ptr %ptr, <4 x i1> %m, i32 ze ret <4 x i32> %load } -declare <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0.i32(ptr, i32, <8 x i1>, i32) - define <8 x i32> @strided_vpload_v8i32(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8i32: ; CHECK: # %bb.0: @@ -234,8 +210,6 @@ define <8 x i32> @strided_vpload_v8i32_allones_mask(ptr %ptr, i32 signext %strid ret <8 x i32> %load } -declare <2 x i64> @llvm.experimental.vp.strided.load.v2i64.p0.i32(ptr, i32, <2 x i1>, i32) - define <2 x i64> @strided_vpload_v2i64(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2i64: ; CHECK: # %bb.0: @@ -256,8 +230,6 @@ define <2 x i64> @strided_vpload_v2i64_unit_stride(ptr %ptr, <2 x i1> %m, i32 ze ret <2 x i64> %load } -declare <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0.i32(ptr, i32, <4 x i1>, i32) - define <4 x i64> @strided_vpload_v4i64(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4i64: ; CHECK: # %bb.0: @@ -278,8 +250,6 @@ define <4 x i64> @strided_vpload_v4i64_allones_mask(ptr %ptr, i32 signext %strid ret <4 x i64> %load } -declare <8 x i64> @llvm.experimental.vp.strided.load.v8i64.p0.i32(ptr, i32, <8 x i1>, i32) - define <8 x i64> @strided_vpload_v8i64(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8i64: ; CHECK: # %bb.0: @@ -290,8 +260,6 @@ define <8 x i64> @strided_vpload_v8i64(ptr %ptr, i32 signext %stride, <8 x i1> % ret <8 x i64> %load } -declare <2 x bfloat> @llvm.experimental.vp.strided.load.v2bf16.p0.i32(ptr, i32, <2 x i1>, i32) - define <2 x bfloat> @strided_vpload_v2bf16(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2bf16: ; CHECK: # %bb.0: @@ -312,8 +280,6 @@ define <2 x bfloat> @strided_vpload_v2bf16_allones_mask(ptr %ptr, i32 signext %s ret <2 x bfloat> %load } -declare <4 x bfloat> @llvm.experimental.vp.strided.load.v4bf16.p0.i32(ptr, i32, <4 x i1>, i32) - define <4 x bfloat> @strided_vpload_v4bf16(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4bf16: ; CHECK: # %bb.0: @@ -324,8 +290,6 @@ define <4 x bfloat> @strided_vpload_v4bf16(ptr %ptr, i32 signext %stride, <4 x i ret <4 x bfloat> %load } -declare <8 x bfloat> @llvm.experimental.vp.strided.load.v8bf16.p0.i32(ptr, i32, <8 x i1>, i32) - define <8 x bfloat> @strided_vpload_v8bf16(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8bf16: ; CHECK: # %bb.0: @@ -346,8 +310,6 @@ define <8 x bfloat> @strided_vpload_v8bf16_unit_stride(ptr %ptr, <8 x i1> %m, i3 ret <8 x bfloat> %load } -declare <2 x half> @llvm.experimental.vp.strided.load.v2f16.p0.i32(ptr, i32, <2 x i1>, i32) - define <2 x half> @strided_vpload_v2f16(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2f16: ; CHECK: # %bb.0: @@ -368,8 +330,6 @@ define <2 x half> @strided_vpload_v2f16_allones_mask(ptr %ptr, i32 signext %stri ret <2 x half> %load } -declare <4 x half> @llvm.experimental.vp.strided.load.v4f16.p0.i32(ptr, i32, <4 x i1>, i32) - define <4 x half> @strided_vpload_v4f16(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4f16: ; CHECK: # %bb.0: @@ -380,8 +340,6 @@ define <4 x half> @strided_vpload_v4f16(ptr %ptr, i32 signext %stride, <4 x i1> ret <4 x half> %load } -declare <8 x half> @llvm.experimental.vp.strided.load.v8f16.p0.i32(ptr, i32, <8 x i1>, i32) - define <8 x half> @strided_vpload_v8f16(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8f16: ; CHECK: # %bb.0: @@ -402,8 +360,6 @@ define <8 x half> @strided_vpload_v8f16_unit_stride(ptr %ptr, <8 x i1> %m, i32 z ret <8 x half> %load } -declare <2 x float> @llvm.experimental.vp.strided.load.v2f32.p0.i32(ptr, i32, <2 x i1>, i32) - define <2 x float> @strided_vpload_v2f32(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2f32: ; CHECK: # %bb.0: @@ -414,8 +370,6 @@ define <2 x float> @strided_vpload_v2f32(ptr %ptr, i32 signext %stride, <2 x i1> ret <2 x float> %load } -declare <4 x float> @llvm.experimental.vp.strided.load.v4f32.p0.i32(ptr, i32, <4 x i1>, i32) - define <4 x float> @strided_vpload_v4f32(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4f32: ; CHECK: # %bb.0: @@ -436,8 +390,6 @@ define <4 x float> @strided_vpload_v4f32_unit_stride(ptr %ptr, <4 x i1> %m, i32 ret <4 x float> %load } -declare <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0.i32(ptr, i32, <8 x i1>, i32) - define <8 x float> @strided_vpload_v8f32(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8f32: ; CHECK: # %bb.0: @@ -458,8 +410,6 @@ define <8 x float> @strided_vpload_v8f32_allones_mask(ptr %ptr, i32 signext %str ret <8 x float> %load } -declare <2 x double> @llvm.experimental.vp.strided.load.v2f64.p0.i32(ptr, i32, <2 x i1>, i32) - define <2 x double> @strided_vpload_v2f64(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2f64: ; CHECK: # %bb.0: @@ -480,9 +430,6 @@ define <2 x double> @strided_vpload_v2f64_unit_stride(ptr %ptr, <2 x i1> %m, i32 ret <2 x double> %load } - -declare <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0.i32(ptr, i32, <4 x i1>, i32) - define <4 x double> @strided_vpload_v4f64(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4f64: ; CHECK: # %bb.0: @@ -503,8 +450,6 @@ define <4 x double> @strided_vpload_v4f64_allones_mask(ptr %ptr, i32 signext %st ret <4 x double> %load } -declare <8 x double> @llvm.experimental.vp.strided.load.v8f64.p0.i32(ptr, i32, <8 x i1>, i32) - define <8 x double> @strided_vpload_v8f64(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8f64: ; CHECK: # %bb.0: @@ -536,8 +481,6 @@ define <3 x double> @strided_vpload_v3f64_allones_mask(ptr %ptr, i32 signext %st ret <3 x double> %v } -declare <3 x double> @llvm.experimental.vp.strided.load.v3f64.p0.i32(ptr, i32, <3 x i1>, i32) - ; Splitting define <32 x double> @strided_vpload_v32f64(ptr %ptr, i32 signext %stride, <32 x i1> %m, i32 zeroext %evl) nounwind { ; CHECK-LABEL: strided_vpload_v32f64: @@ -593,8 +536,6 @@ define <32 x double> @strided_vpload_v32f64_allones_mask(ptr %ptr, i32 signext % ret <32 x double> %load } -declare <32 x double> @llvm.experimental.vp.strided.load.v32f64.p0.i32(ptr, i32, <32 x i1>, i32) - ; Widening + splitting (with HiIsEmpty == true) define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_load_v33f64: @@ -702,8 +643,6 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask ret <33 x double> %v } -declare <33 x double> @llvm.experimental.vp.strided.load.v33f64.p0.i64(ptr, i64, <33 x i1>, i32) - ; Test unmasked integer zero strided define <4 x i8> @zero_strided_unmasked_vpload_4i8_i8(ptr %ptr) { ; CHECK-OPT-LABEL: zero_strided_unmasked_vpload_4i8_i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll index 7ca329835b7ac..25624ea0fcf6c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll @@ -12,8 +12,6 @@ ; RUN: -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-RV64 -declare void @llvm.experimental.vp.strided.store.v2i8.p0.i8(<2 x i8>, ptr, i8, <2 x i1>, i32) - define void @strided_vpstore_v2i8_i8(<2 x i8> %val, ptr %ptr, i8 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2i8_i8: ; CHECK: # %bb.0: @@ -24,8 +22,6 @@ define void @strided_vpstore_v2i8_i8(<2 x i8> %val, ptr %ptr, i8 signext %stride ret void } -declare void @llvm.experimental.vp.strided.store.v2i8.p0.i16(<2 x i8>, ptr, i16, <2 x i1>, i32) - define void @strided_vpstore_v2i8_i16(<2 x i8> %val, ptr %ptr, i16 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2i8_i16: ; CHECK: # %bb.0: @@ -36,8 +32,6 @@ define void @strided_vpstore_v2i8_i16(<2 x i8> %val, ptr %ptr, i16 signext %stri ret void } -declare void @llvm.experimental.vp.strided.store.v2i8.p0.i64(<2 x i8>, ptr, i64, <2 x i1>, i32) - define void @strided_vpstore_v2i8_i64(<2 x i8> %val, ptr %ptr, i64 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v2i8_i64: ; CHECK-RV32: # %bb.0: @@ -54,8 +48,6 @@ define void @strided_vpstore_v2i8_i64(<2 x i8> %val, ptr %ptr, i64 signext %stri ret void } -declare void @llvm.experimental.vp.strided.store.v2i8.p0.i32(<2 x i8>, ptr, i32, <2 x i1>, i32) - define void @strided_vpstore_v2i8(<2 x i8> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2i8: ; CHECK: # %bb.0: @@ -66,8 +58,6 @@ define void @strided_vpstore_v2i8(<2 x i8> %val, ptr %ptr, i32 signext %stride, ret void } -declare void @llvm.experimental.vp.strided.store.v4i8.p0.i32(<4 x i8>, ptr, i32, <4 x i1>, i32) - define void @strided_vpstore_v4i8(<4 x i8> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4i8: ; CHECK: # %bb.0: @@ -78,8 +68,6 @@ define void @strided_vpstore_v4i8(<4 x i8> %val, ptr %ptr, i32 signext %stride, ret void } -declare void @llvm.experimental.vp.strided.store.v8i8.p0.i32(<8 x i8>, ptr, i32, <8 x i1>, i32) - define void @strided_vpstore_v8i8(<8 x i8> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8i8: ; CHECK: # %bb.0: @@ -100,8 +88,6 @@ define void @strided_vpstore_v8i8_unit_stride(<8 x i8> %val, ptr %ptr, <8 x i1> ret void } -declare void @llvm.experimental.vp.strided.store.v2i16.p0.i32(<2 x i16>, ptr, i32, <2 x i1>, i32) - define void @strided_vpstore_v2i16(<2 x i16> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2i16: ; CHECK: # %bb.0: @@ -112,8 +98,6 @@ define void @strided_vpstore_v2i16(<2 x i16> %val, ptr %ptr, i32 signext %stride ret void } -declare void @llvm.experimental.vp.strided.store.v4i16.p0.i32(<4 x i16>, ptr, i32, <4 x i1>, i32) - define void @strided_vpstore_v4i16(<4 x i16> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4i16: ; CHECK: # %bb.0: @@ -124,8 +108,6 @@ define void @strided_vpstore_v4i16(<4 x i16> %val, ptr %ptr, i32 signext %stride ret void } -declare void @llvm.experimental.vp.strided.store.v8i16.p0.i32(<8 x i16>, ptr, i32, <8 x i1>, i32) - define void @strided_vpstore_v8i16(<8 x i16> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8i16: ; CHECK: # %bb.0: @@ -146,8 +128,6 @@ define void @strided_vpstore_v8i16_unit_stride(<8 x i16> %val, ptr %ptr, <8 x i1 ret void } -declare void @llvm.experimental.vp.strided.store.v2i32.p0.i32(<2 x i32>, ptr, i32, <2 x i1>, i32) - define void @strided_vpstore_v2i32(<2 x i32> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2i32: ; CHECK: # %bb.0: @@ -158,8 +138,6 @@ define void @strided_vpstore_v2i32(<2 x i32> %val, ptr %ptr, i32 signext %stride ret void } -declare void @llvm.experimental.vp.strided.store.v4i32.p0.i32(<4 x i32>, ptr, i32, <4 x i1>, i32) - define void @strided_vpstore_v4i32(<4 x i32> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4i32: ; CHECK: # %bb.0: @@ -180,8 +158,6 @@ define void @strided_vpstore_v4i32_unit_stride(<4 x i32> %val, ptr %ptr, <4 x i1 ret void } -declare void @llvm.experimental.vp.strided.store.v8i32.p0.i32(<8 x i32>, ptr, i32, <8 x i1>, i32) - define void @strided_vpstore_v8i32(<8 x i32> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8i32: ; CHECK: # %bb.0: @@ -192,8 +168,6 @@ define void @strided_vpstore_v8i32(<8 x i32> %val, ptr %ptr, i32 signext %stride ret void } -declare void @llvm.experimental.vp.strided.store.v2i64.p0.i32(<2 x i64>, ptr, i32, <2 x i1>, i32) - define void @strided_vpstore_v2i64(<2 x i64> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2i64: ; CHECK: # %bb.0: @@ -214,8 +188,6 @@ define void @strided_vpstore_v2i64_unit_stride(<2 x i64> %val, ptr %ptr, <2 x i1 ret void } -declare void @llvm.experimental.vp.strided.store.v4i64.p0.i32(<4 x i64>, ptr, i32, <4 x i1>, i32) - define void @strided_vpstore_v4i64(<4 x i64> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4i64: ; CHECK: # %bb.0: @@ -226,8 +198,6 @@ define void @strided_vpstore_v4i64(<4 x i64> %val, ptr %ptr, i32 signext %stride ret void } -declare void @llvm.experimental.vp.strided.store.v8i64.p0.i32(<8 x i64>, ptr, i32, <8 x i1>, i32) - define void @strided_vpstore_v8i64(<8 x i64> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8i64: ; CHECK: # %bb.0: @@ -238,8 +208,6 @@ define void @strided_vpstore_v8i64(<8 x i64> %val, ptr %ptr, i32 signext %stride ret void } -declare void @llvm.experimental.vp.strided.store.v2bf16.p0.i32(<2 x bfloat>, ptr, i32, <2 x i1>, i32) - define void @strided_vpstore_v2bf16(<2 x bfloat> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2bf16: ; CHECK: # %bb.0: @@ -250,8 +218,6 @@ define void @strided_vpstore_v2bf16(<2 x bfloat> %val, ptr %ptr, i32 signext %st ret void } -declare void @llvm.experimental.vp.strided.store.v4bf16.p0.i32(<4 x bfloat>, ptr, i32, <4 x i1>, i32) - define void @strided_vpstore_v4bf16(<4 x bfloat> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4bf16: ; CHECK: # %bb.0: @@ -262,8 +228,6 @@ define void @strided_vpstore_v4bf16(<4 x bfloat> %val, ptr %ptr, i32 signext %st ret void } -declare void @llvm.experimental.vp.strided.store.v8bf16.p0.i32(<8 x bfloat>, ptr, i32, <8 x i1>, i32) - define void @strided_vpstore_v8bf16(<8 x bfloat> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8bf16: ; CHECK: # %bb.0: @@ -284,8 +248,6 @@ define void @strided_vpstore_v8bf16_unit_stride(<8 x bfloat> %val, ptr %ptr, <8 ret void } -declare void @llvm.experimental.vp.strided.store.v2f16.p0.i32(<2 x half>, ptr, i32, <2 x i1>, i32) - define void @strided_vpstore_v2f16(<2 x half> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2f16: ; CHECK: # %bb.0: @@ -296,8 +258,6 @@ define void @strided_vpstore_v2f16(<2 x half> %val, ptr %ptr, i32 signext %strid ret void } -declare void @llvm.experimental.vp.strided.store.v4f16.p0.i32(<4 x half>, ptr, i32, <4 x i1>, i32) - define void @strided_vpstore_v4f16(<4 x half> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4f16: ; CHECK: # %bb.0: @@ -308,8 +268,6 @@ define void @strided_vpstore_v4f16(<4 x half> %val, ptr %ptr, i32 signext %strid ret void } -declare void @llvm.experimental.vp.strided.store.v8f16.p0.i32(<8 x half>, ptr, i32, <8 x i1>, i32) - define void @strided_vpstore_v8f16(<8 x half> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8f16: ; CHECK: # %bb.0: @@ -330,8 +288,6 @@ define void @strided_vpstore_v8f16_unit_stride(<8 x half> %val, ptr %ptr, <8 x i ret void } -declare void @llvm.experimental.vp.strided.store.v2f32.p0.i32(<2 x float>, ptr, i32, <2 x i1>, i32) - define void @strided_vpstore_v2f32(<2 x float> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2f32: ; CHECK: # %bb.0: @@ -342,8 +298,6 @@ define void @strided_vpstore_v2f32(<2 x float> %val, ptr %ptr, i32 signext %stri ret void } -declare void @llvm.experimental.vp.strided.store.v4f32.p0.i32(<4 x float>, ptr, i32, <4 x i1>, i32) - define void @strided_vpstore_v4f32(<4 x float> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4f32: ; CHECK: # %bb.0: @@ -364,8 +318,6 @@ define void @strided_vpstore_v4f32_unit_stride(<4 x float> %val, ptr %ptr, <4 x ret void } -declare void @llvm.experimental.vp.strided.store.v8f32.p0.i32(<8 x float>, ptr, i32, <8 x i1>, i32) - define void @strided_vpstore_v8f32(<8 x float> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8f32: ; CHECK: # %bb.0: @@ -376,8 +328,6 @@ define void @strided_vpstore_v8f32(<8 x float> %val, ptr %ptr, i32 signext %stri ret void } -declare void @llvm.experimental.vp.strided.store.v2f64.p0.i32(<2 x double>, ptr, i32, <2 x i1>, i32) - define void @strided_vpstore_v2f64(<2 x double> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2f64: ; CHECK: # %bb.0: @@ -398,8 +348,6 @@ define void @strided_vpstore_v2f64_unit_stride(<2 x double> %val, ptr %ptr, <2 x ret void } -declare void @llvm.experimental.vp.strided.store.v4f64.p0.i32(<4 x double>, ptr, i32, <4 x i1>, i32) - define void @strided_vpstore_v4f64(<4 x double> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4f64: ; CHECK: # %bb.0: @@ -410,8 +358,6 @@ define void @strided_vpstore_v4f64(<4 x double> %val, ptr %ptr, i32 signext %str ret void } -declare void @llvm.experimental.vp.strided.store.v8f64.p0.i32(<8 x double>, ptr, i32, <8 x i1>, i32) - define void @strided_vpstore_v8f64(<8 x double> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8f64: ; CHECK: # %bb.0: @@ -453,8 +399,6 @@ define void @strided_vpstore_v3f32_allones_mask(<3 x float> %v, ptr %ptr, i32 si ret void } -declare void @llvm.experimental.vp.strided.store.v3f32.p0.i32(<3 x float>, ptr , i32, <3 x i1>, i32) - ; Splitting define void @strided_store_v32f64(<32 x double> %v, ptr %ptr, i32 signext %stride, <32 x i1> %mask, i32 zeroext %evl) { ; CHECK-LABEL: strided_store_v32f64: @@ -506,4 +450,3 @@ define void @strided_store_v32f64_allones_mask(<32 x double> %v, ptr %ptr, i32 s ret void } -declare void @llvm.experimental.vp.strided.store.v32f64.p0.i32(<32 x double>, ptr, i32, <32 x i1>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-sat-clip.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-sat-clip.ll index 3b1dc298c12ce..e0b3d04332067 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-sat-clip.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-sat-clip.ll @@ -1,17 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare <4 x i16> @llvm.smax.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i16> @llvm.smin.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) -declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>) -declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>) - -declare <4 x i16> @llvm.umin.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>) -declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>) - define void @trunc_sat_i8i16_maxmin(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_i8i16_maxmin: ; CHECK: # %bb.0: @@ -134,7 +123,6 @@ define void @trunc_sat_u8u16_minmax(ptr %x, ptr %y) { ret void } - define void @trunc_sat_i16i32_notopt(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_i16i32_notopt: ; CHECK: # %bb.0: @@ -261,7 +249,6 @@ define void @trunc_sat_u16u32_minmax(ptr %x, ptr %y) { ret void } - define void @trunc_sat_i32i64_notopt(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_i32i64_notopt: ; CHECK: # %bb.0: @@ -317,7 +304,6 @@ define void @trunc_sat_i32i64_minmax(ptr %x, ptr %y) { ret void } - define void @trunc_sat_u32u64_notopt(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_u32u64_notopt: ; CHECK: # %bb.0: @@ -352,7 +338,6 @@ define void @trunc_sat_u32u64_min(ptr %x, ptr %y) { ret void } - define void @trunc_sat_u32u64_maxmin(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_u32u64_maxmin: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp-mask.ll index b6ef97603d61c..05ac11d1ca9d7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare <2 x i1> @llvm.vp.trunc.v2i1.v2i16(<2 x i16>, <2 x i1>, i32) - define <2 x i1> @vtrunc_v2i1_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i1_v2i16: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define <2 x i1> @vtrunc_v2i1_v2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) { ret <2 x i1> %v } -declare <2 x i1> @llvm.vp.trunc.v2i1.v2i32(<2 x i32>, <2 x i1>, i32) - define <2 x i1> @vtrunc_v2i1_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i1_v2i32: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define <2 x i1> @vtrunc_v2i1_v2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { ret <2 x i1> %v } -declare <2 x i1> @llvm.vp.trunc.v2i1.v2i64(<2 x i64>, <2 x i1>, i32) - define <2 x i1> @vtrunc_v2i1_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i1_v2i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll index 461b4d0e02cb8..f992d1f8f7eee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i7> @llvm.vp.trunc.v2i7.v2i16(<2 x i16>, <2 x i1>, i32) - define <2 x i7> @vtrunc_v2i7_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i7_v2i16: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define <2 x i7> @vtrunc_v2i7_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { ret <2 x i7> %v } -declare <2 x i8> @llvm.vp.trunc.v2i8.v2i15(<2 x i15>, <2 x i1>, i32) - define <2 x i8> @vtrunc_v2i8_v2i15(<2 x i15> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i8_v2i15: ; CHECK: # %bb.0: @@ -26,8 +22,6 @@ define <2 x i8> @vtrunc_v2i8_v2i15(<2 x i15> %a, <2 x i1> %m, i32 zeroext %vl) { ret <2 x i8> %v } -declare <2 x i8> @llvm.vp.trunc.v2i8.v2i16(<2 x i16>, <2 x i1>, i32) - define <2 x i8> @vtrunc_v2i8_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i8_v2i16: ; CHECK: # %bb.0: @@ -48,8 +42,6 @@ define <2 x i8> @vtrunc_v2i8_v2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) { ret <2 x i8> %v } -declare <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16>, <128 x i1>, i32) - define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v128i7_v128i16: ; CHECK: # %bb.0: @@ -79,8 +71,6 @@ define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zero ret <128 x i7> %v } -declare <2 x i8> @llvm.vp.trunc.v2i8.v2i32(<2 x i32>, <2 x i1>, i32) - define <2 x i8> @vtrunc_v2i8_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i8_v2i32: ; CHECK: # %bb.0: @@ -105,8 +95,6 @@ define <2 x i8> @vtrunc_v2i8_v2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { ret <2 x i8> %v } -declare <2 x i8> @llvm.vp.trunc.v2i8.v2i64(<2 x i64>, <2 x i1>, i32) - define <2 x i8> @vtrunc_v2i8_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i8_v2i64: ; CHECK: # %bb.0: @@ -135,8 +123,6 @@ define <2 x i8> @vtrunc_v2i8_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { ret <2 x i8> %v } -declare <2 x i16> @llvm.vp.trunc.v2i16.v2i32(<2 x i32>, <2 x i1>, i32) - define <2 x i16> @vtrunc_v2i16_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i16_v2i32: ; CHECK: # %bb.0: @@ -157,8 +143,6 @@ define <2 x i16> @vtrunc_v2i16_v2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { ret <2 x i16> %v } -declare <2 x i16> @llvm.vp.trunc.v2i16.v2i64(<2 x i64>, <2 x i1>, i32) - define <2 x i16> @vtrunc_v2i16_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i16_v2i64: ; CHECK: # %bb.0: @@ -183,8 +167,6 @@ define <2 x i16> @vtrunc_v2i16_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { ret <2 x i16> %v } -declare <15 x i16> @llvm.vp.trunc.v15i16.v15i64(<15 x i64>, <15 x i1>, i32) - define <15 x i16> @vtrunc_v15i16_v15i64(<15 x i64> %a, <15 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v15i16_v15i64: ; CHECK: # %bb.0: @@ -197,8 +179,6 @@ define <15 x i16> @vtrunc_v15i16_v15i64(<15 x i64> %a, <15 x i1> %m, i32 zeroext ret <15 x i16> %v } -declare <2 x i32> @llvm.vp.trunc.v2i32.v2i64(<2 x i64>, <2 x i1>, i32) - define <2 x i32> @vtrunc_v2i32_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i32_v2i64: ; CHECK: # %bb.0: @@ -219,8 +199,6 @@ define <2 x i32> @vtrunc_v2i32_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { ret <2 x i32> %v } -declare <128 x i32> @llvm.vp.trunc.v128i32.v128i64(<128 x i64>, <128 x i1>, i32) - define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 zeroext %vl) { ; RV32-LABEL: vtrunc_v128i32_v128i64: ; RV32: # %bb.0: @@ -845,8 +823,6 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze ret <128 x i32> %v } -declare <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64>, <32 x i1>, i32) - define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v32i32_v32i64: ; CHECK: # %bb.0: @@ -876,8 +852,6 @@ define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext ret <32 x i32> %v } -declare <2 x i7> @llvm.vp.trunc.v2i7.v2i8(<2 x i8>, <2 x i1>, i32) - define <2 x i7> @vtrunc_v2i7_v2i8(<2 x i8> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i7_v2i8: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll index e625c46a57145..8aaf1e7fa2330 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfh < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfh < %s | FileCheck %s -declare <4 x half> @llvm.vp.uitofp.v4f16.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x half> @vuitofp_v4f16_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f16_v4i1: ; CHECK: # %bb.0: @@ -29,8 +27,6 @@ define <4 x half> @vuitofp_v4f16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <4 x float> @llvm.vp.uitofp.v4f32.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x float> @vuitofp_v4f32_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i1: ; CHECK: # %bb.0: @@ -56,8 +52,6 @@ define <4 x float> @vuitofp_v4f32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) ret <4 x float> %v } -declare <4 x double> @llvm.vp.uitofp.v4f64.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x double> @vuitofp_v4f64_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll index b72e3cfcb920a..3d1febe95421f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfhmin < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <4 x half> @llvm.vp.uitofp.v4f16.v4i7(<4 x i7>, <4 x i1>, i32) - define <4 x half> @vuitofp_v4f16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_v4f16_v4i7: ; ZVFH: # %bb.0: @@ -35,8 +33,6 @@ define <4 x half> @vuitofp_v4f16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %ev ret <4 x half> %v } -declare <4 x half> @llvm.vp.uitofp.v4f16.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x half> @vuitofp_v4f16_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_v4f16_v4i8: ; ZVFH: # %bb.0: @@ -77,8 +73,6 @@ define <4 x half> @vuitofp_v4f16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <4 x half> @llvm.vp.uitofp.v4f16.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x half> @vuitofp_v4f16_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_v4f16_v4i16: ; ZVFH: # %bb.0: @@ -115,8 +109,6 @@ define <4 x half> @vuitofp_v4f16_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) ret <4 x half> %v } -declare <4 x half> @llvm.vp.uitofp.v4f16.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x half> @vuitofp_v4f16_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_v4f16_v4i32: ; ZVFH: # %bb.0: @@ -155,8 +147,6 @@ define <4 x half> @vuitofp_v4f16_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) ret <4 x half> %v } -declare <4 x half> @llvm.vp.uitofp.v4f16.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x half> @vuitofp_v4f16_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_v4f16_v4i64: ; ZVFH: # %bb.0: @@ -197,8 +187,6 @@ define <4 x half> @vuitofp_v4f16_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) ret <4 x half> %v } -declare <4 x float> @llvm.vp.uitofp.v4f32.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x float> @vuitofp_v4f32_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i8: ; CHECK: # %bb.0: @@ -221,8 +209,6 @@ define <4 x float> @vuitofp_v4f32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) ret <4 x float> %v } -declare <4 x float> @llvm.vp.uitofp.v4f32.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x float> @vuitofp_v4f32_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i16: ; CHECK: # %bb.0: @@ -245,8 +231,6 @@ define <4 x float> @vuitofp_v4f32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl ret <4 x float> %v } -declare <4 x float> @llvm.vp.uitofp.v4f32.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x float> @vuitofp_v4f32_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i32: ; CHECK: # %bb.0: @@ -267,8 +251,6 @@ define <4 x float> @vuitofp_v4f32_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl ret <4 x float> %v } -declare <4 x float> @llvm.vp.uitofp.v4f32.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x float> @vuitofp_v4f32_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i64: ; CHECK: # %bb.0: @@ -291,8 +273,6 @@ define <4 x float> @vuitofp_v4f32_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl ret <4 x float> %v } -declare <4 x double> @llvm.vp.uitofp.v4f64.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x double> @vuitofp_v4f64_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i8: ; CHECK: # %bb.0: @@ -315,8 +295,6 @@ define <4 x double> @vuitofp_v4f64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) ret <4 x double> %v } -declare <4 x double> @llvm.vp.uitofp.v4f64.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x double> @vuitofp_v4f64_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i16: ; CHECK: # %bb.0: @@ -339,8 +317,6 @@ define <4 x double> @vuitofp_v4f64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %ev ret <4 x double> %v } -declare <4 x double> @llvm.vp.uitofp.v4f64.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x double> @vuitofp_v4f64_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i32: ; CHECK: # %bb.0: @@ -363,8 +339,6 @@ define <4 x double> @vuitofp_v4f64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %ev ret <4 x double> %v } -declare <4 x double> @llvm.vp.uitofp.v4f64.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x double> @vuitofp_v4f64_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i64: ; CHECK: # %bb.0: @@ -385,8 +359,6 @@ define <4 x double> @vuitofp_v4f64_v4i64_unmasked(<4 x i64> %va, i32 zeroext %ev ret <4 x double> %v } -declare <32 x double> @llvm.vp.uitofp.v32f64.v32i64(<32 x i64>, <32 x i1>, i32) - define <32 x double> @vuitofp_v32f64_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v32f64_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll index fa39b06b4d779..8f4744b92b1ee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll @@ -72,8 +72,6 @@ define void @store_v4i32_align2(<4 x i32> %x, ptr %ptr) { ret void } -declare <2 x i16> @llvm.masked.gather.v2i16.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i16>) - define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %passthru) { ; RV32-SLOW-LABEL: mgather_v2i16_align1: ; RV32-SLOW: # %bb.0: @@ -160,8 +158,6 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> % ret <2 x i16> %v } -declare <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i64>) - define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %passthru) { ; RV32-SLOW-LABEL: mgather_v2i64_align4: ; RV32-SLOW: # %bb.0: @@ -244,8 +240,6 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> % ret <2 x i64> %v } -declare void @llvm.masked.scatter.v4i16.v4p0(<4 x i16>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4i16_align1(<4 x i16> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32-SLOW-LABEL: mscatter_v4i16_align1: ; RV32-SLOW: # %bb.0: @@ -392,8 +386,6 @@ define void @mscatter_v4i16_align1(<4 x i16> %val, <4 x ptr> %ptrs, <4 x i1> %m) ret void } -declare void @llvm.masked.scatter.v2i32.v2p0(<2 x i32>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2i32_align2(<2 x i32> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32-SLOW-LABEL: mscatter_v2i32_align2: ; RV32-SLOW: # %bb.0: @@ -476,8 +468,6 @@ define void @mscatter_v2i32_align2(<2 x i32> %val, <2 x ptr> %ptrs, <2 x i1> %m) ret void } -declare <2 x i32> @llvm.masked.load.v2i32(ptr, i32, <2 x i1>, <2 x i32>) - define void @masked_load_v2i32_align1(ptr %a, <2 x i32> %m, ptr %res_ptr) nounwind { ; RV32-SLOW-LABEL: masked_load_v2i32_align1: ; RV32-SLOW: # %bb.0: @@ -580,8 +570,6 @@ define void @masked_load_v2i32_align1(ptr %a, <2 x i32> %m, ptr %res_ptr) nounwi ret void } -declare void @llvm.masked.store.v2i32.p0(<2 x i32>, ptr, i32, <2 x i1>) - define void @masked_store_v2i32_align2(<2 x i32> %val, ptr %a, <2 x i32> %m) nounwind { ; SLOW-LABEL: masked_store_v2i32_align2: ; SLOW: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp-mask.ll index 2f30cf4f88096..391f54adb4c11 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp-mask.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK -declare <2 x i1> @llvm.vp.add.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @vadd_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i1: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <2 x i1> @vadd_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroex ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.add.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @vadd_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i1: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define <4 x i1> @vadd_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroex ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.add.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @vadd_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i1: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define <8 x i1> @vadd_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroex ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.add.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @vadd_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i1: ; CHECK: # %bb.0: @@ -52,8 +44,6 @@ define <16 x i1> @vadd_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 z ret <16 x i1> %v } -declare <32 x i1> @llvm.vp.add.v32i1(<32 x i1>, <32 x i1>, <32 x i1>, i32) - define <32 x i1> @vadd_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v32i1: ; CHECK: # %bb.0: @@ -64,8 +54,6 @@ define <32 x i1> @vadd_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 z ret <32 x i1> %v } -declare <64 x i1> @llvm.vp.add.v64i1(<64 x i1>, <64 x i1>, <64 x i1>, i32) - define <64 x i1> @vadd_vv_v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v64i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll index 22c629088bacd..96dff2464e501 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.add.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vadd_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i7: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <8 x i7> @vadd_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.add.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vadd_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i8: ; CHECK: # %bb.0: @@ -82,8 +78,6 @@ define <2 x i8> @vadd_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.add.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vadd_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i8: ; CHECK: # %bb.0: @@ -160,8 +154,6 @@ define <4 x i8> @vadd_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.add.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vadd_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v5i8: ; CHECK: # %bb.0: @@ -226,8 +218,6 @@ define <5 x i8> @vadd_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.add.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vadd_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i8: ; CHECK: # %bb.0: @@ -292,8 +282,6 @@ define <8 x i8> @vadd_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.add.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vadd_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i8: ; CHECK: # %bb.0: @@ -358,8 +346,6 @@ define <16 x i8> @vadd_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.add.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v258i8: ; CHECK: # %bb.0: @@ -439,8 +425,6 @@ define <256 x i8> @vadd_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.add.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vadd_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i16: ; CHECK: # %bb.0: @@ -505,8 +489,6 @@ define <2 x i16> @vadd_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.add.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vadd_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i16: ; CHECK: # %bb.0: @@ -571,8 +553,6 @@ define <4 x i16> @vadd_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.add.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vadd_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i16: ; CHECK: # %bb.0: @@ -637,8 +617,6 @@ define <8 x i16> @vadd_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.add.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vadd_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i16: ; CHECK: # %bb.0: @@ -703,8 +681,6 @@ define <16 x i16> @vadd_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.add.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vadd_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i32: ; CHECK: # %bb.0: @@ -769,8 +745,6 @@ define <2 x i32> @vadd_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vadd_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i32: ; CHECK: # %bb.0: @@ -835,8 +809,6 @@ define <4 x i32> @vadd_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.add.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vadd_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i32: ; CHECK: # %bb.0: @@ -901,8 +873,6 @@ define <8 x i32> @vadd_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.add.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vadd_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i32: ; CHECK: # %bb.0: @@ -967,8 +937,6 @@ define <16 x i32> @vadd_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.add.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vadd_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i64: ; CHECK: # %bb.0: @@ -1063,8 +1031,6 @@ define <2 x i64> @vadd_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.add.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vadd_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i64: ; CHECK: # %bb.0: @@ -1159,8 +1125,6 @@ define <4 x i64> @vadd_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.add.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vadd_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i64: ; CHECK: # %bb.0: @@ -1255,8 +1219,6 @@ define <8 x i64> @vadd_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.add.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vadd_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i64: ; CHECK: # %bb.0: @@ -1353,8 +1315,6 @@ define <16 x i64> @vadd_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.add.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll index 1be3fd0910338..96eb846538f7f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.and.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vand_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i7: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <8 x i7> @vand_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.and.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vand_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v2i8: ; CHECK: # %bb.0: @@ -106,8 +102,6 @@ define <2 x i8> @vand_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.and.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vand_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v4i8: ; CHECK: # %bb.0: @@ -172,8 +166,6 @@ define <4 x i8> @vand_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.and.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vand_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i8: ; CHECK: # %bb.0: @@ -238,8 +230,6 @@ define <8 x i8> @vand_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.and.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vand_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v16i8: ; CHECK: # %bb.0: @@ -304,8 +294,6 @@ define <16 x i8> @vand_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.and.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vand_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v2i16: ; CHECK: # %bb.0: @@ -370,8 +358,6 @@ define <2 x i16> @vand_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.and.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vand_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v4i16: ; CHECK: # %bb.0: @@ -436,8 +422,6 @@ define <4 x i16> @vand_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.and.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vand_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i16: ; CHECK: # %bb.0: @@ -502,8 +486,6 @@ define <8 x i16> @vand_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.and.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vand_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v16i16: ; CHECK: # %bb.0: @@ -568,8 +550,6 @@ define <16 x i16> @vand_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.and.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vand_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v2i32: ; CHECK: # %bb.0: @@ -634,8 +614,6 @@ define <2 x i32> @vand_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.and.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vand_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v4i32: ; CHECK: # %bb.0: @@ -700,8 +678,6 @@ define <4 x i32> @vand_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.and.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vand_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i32: ; CHECK: # %bb.0: @@ -766,8 +742,6 @@ define <8 x i32> @vand_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.and.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vand_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v16i32: ; CHECK: # %bb.0: @@ -832,8 +806,6 @@ define <16 x i32> @vand_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.and.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vand_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v2i64: ; CHECK: # %bb.0: @@ -928,8 +900,6 @@ define <2 x i64> @vand_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.and.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vand_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v4i64: ; CHECK: # %bb.0: @@ -1024,8 +994,6 @@ define <4 x i64> @vand_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.and.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vand_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i64: ; CHECK: # %bb.0: @@ -1120,8 +1088,6 @@ define <8 x i64> @vand_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <11 x i64> @llvm.vp.and.v11i64(<11 x i64>, <11 x i64>, <11 x i1>, i32) - define <11 x i64> @vand_vv_v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v11i64: ; CHECK: # %bb.0: @@ -1216,8 +1182,6 @@ define <11 x i64> @vand_vi_v11i64_unmasked(<11 x i64> %va, i32 zeroext %evl) { ret <11 x i64> %v } -declare <16 x i64> @llvm.vp.and.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vand_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll index 2455d872ae7f0..da26c63b61e34 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll @@ -168,8 +168,6 @@ define <16 x bfloat> @vfsgnj_vv_v16bf16_unmasked(<16 x bfloat> %va, <16 x bfloat ret <16 x bfloat> %v } -declare <2 x half> @llvm.vp.copysign.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfsgnj_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v2f16: ; CHECK: # %bb.0: @@ -190,8 +188,6 @@ define <2 x half> @vfsgnj_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 ret <2 x half> %v } -declare <4 x half> @llvm.vp.copysign.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfsgnj_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v4f16: ; CHECK: # %bb.0: @@ -212,8 +208,6 @@ define <4 x half> @vfsgnj_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 ret <4 x half> %v } -declare <8 x half> @llvm.vp.copysign.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfsgnj_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v8f16: ; CHECK: # %bb.0: @@ -234,8 +228,6 @@ define <8 x half> @vfsgnj_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 ret <8 x half> %v } -declare <16 x half> @llvm.vp.copysign.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfsgnj_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v16f16: ; CHECK: # %bb.0: @@ -256,8 +248,6 @@ define <16 x half> @vfsgnj_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, ret <16 x half> %v } -declare <2 x float> @llvm.vp.copysign.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfsgnj_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v2f32: ; CHECK: # %bb.0: @@ -278,8 +268,6 @@ define <2 x float> @vfsgnj_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i ret <2 x float> %v } -declare <4 x float> @llvm.vp.copysign.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfsgnj_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v4f32: ; CHECK: # %bb.0: @@ -300,8 +288,6 @@ define <4 x float> @vfsgnj_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i ret <4 x float> %v } -declare <8 x float> @llvm.vp.copysign.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfsgnj_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v8f32: ; CHECK: # %bb.0: @@ -322,8 +308,6 @@ define <8 x float> @vfsgnj_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i ret <8 x float> %v } -declare <16 x float> @llvm.vp.copysign.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfsgnj_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v16f32: ; CHECK: # %bb.0: @@ -344,8 +328,6 @@ define <16 x float> @vfsgnj_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %v ret <16 x float> %v } -declare <2 x double> @llvm.vp.copysign.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfsgnj_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v2f64: ; CHECK: # %bb.0: @@ -366,8 +348,6 @@ define <2 x double> @vfsgnj_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb ret <2 x double> %v } -declare <4 x double> @llvm.vp.copysign.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfsgnj_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v4f64: ; CHECK: # %bb.0: @@ -388,8 +368,6 @@ define <4 x double> @vfsgnj_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb ret <4 x double> %v } -declare <8 x double> @llvm.vp.copysign.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfsgnj_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v8f64: ; CHECK: # %bb.0: @@ -410,8 +388,6 @@ define <8 x double> @vfsgnj_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb ret <8 x double> %v } -declare <15 x double> @llvm.vp.copysign.v15f64(<15 x double>, <15 x double>, <15 x i1>, i32) - define <15 x double> @vfsgnj_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v15f64: ; CHECK: # %bb.0: @@ -432,8 +408,6 @@ define <15 x double> @vfsgnj_vv_v15f64_unmasked(<15 x double> %va, <15 x double> ret <15 x double> %v } -declare <16 x double> @llvm.vp.copysign.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfsgnj_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v16f64: ; CHECK: # %bb.0: @@ -454,8 +428,6 @@ define <16 x double> @vfsgnj_vv_v16f64_unmasked(<16 x double> %va, <16 x double> ret <16 x double> %v } -declare <32 x double> @llvm.vp.copysign.v32f64(<32 x double>, <32 x double>, <32 x i1>, i32) - define <32 x double> @vfsgnj_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll index b2279dca45d8d..6bea222ffb90e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.sdiv.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vdiv_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i7: ; CHECK: # %bb.0: @@ -20,8 +18,6 @@ define <8 x i7> @vdiv_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.sdiv.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vdiv_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v2i8: ; CHECK: # %bb.0: @@ -66,8 +62,6 @@ define <2 x i8> @vdiv_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.sdiv.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vdiv_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v4i8: ; CHECK: # %bb.0: @@ -112,8 +106,6 @@ define <4 x i8> @vdiv_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <6 x i8> @llvm.vp.sdiv.v6i8(<6 x i8>, <6 x i8>, <6 x i1>, i32) - define <6 x i8> @vdiv_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v6i8: ; CHECK: # %bb.0: @@ -124,8 +116,6 @@ define <6 x i8> @vdiv_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroex ret <6 x i8> %v } -declare <8 x i8> @llvm.vp.sdiv.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vdiv_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i8: ; CHECK: # %bb.0: @@ -170,8 +160,6 @@ define <8 x i8> @vdiv_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.sdiv.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vdiv_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v16i8: ; CHECK: # %bb.0: @@ -216,8 +204,6 @@ define <16 x i8> @vdiv_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.sdiv.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vdiv_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v2i16: ; CHECK: # %bb.0: @@ -262,8 +248,6 @@ define <2 x i16> @vdiv_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.sdiv.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vdiv_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v4i16: ; CHECK: # %bb.0: @@ -308,8 +292,6 @@ define <4 x i16> @vdiv_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.sdiv.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vdiv_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i16: ; CHECK: # %bb.0: @@ -354,8 +336,6 @@ define <8 x i16> @vdiv_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.sdiv.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vdiv_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v16i16: ; CHECK: # %bb.0: @@ -400,8 +380,6 @@ define <16 x i16> @vdiv_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext % ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.sdiv.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vdiv_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v2i32: ; CHECK: # %bb.0: @@ -446,8 +424,6 @@ define <2 x i32> @vdiv_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vdiv_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v4i32: ; CHECK: # %bb.0: @@ -492,8 +468,6 @@ define <4 x i32> @vdiv_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.sdiv.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vdiv_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i32: ; CHECK: # %bb.0: @@ -538,8 +512,6 @@ define <8 x i32> @vdiv_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.sdiv.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vdiv_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v16i32: ; CHECK: # %bb.0: @@ -584,8 +556,6 @@ define <16 x i32> @vdiv_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext % ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.sdiv.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vdiv_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v2i64: ; CHECK: # %bb.0: @@ -660,8 +630,6 @@ define <2 x i64> @vdiv_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.sdiv.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vdiv_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v4i64: ; CHECK: # %bb.0: @@ -736,8 +704,6 @@ define <4 x i64> @vdiv_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.sdiv.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vdiv_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i64: ; CHECK: # %bb.0: @@ -812,8 +778,6 @@ define <8 x i64> @vdiv_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.sdiv.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vdiv_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v16i64: ; CHECK: # %bb.0: @@ -888,9 +852,6 @@ define <16 x i64> @vdiv_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext % ret <16 x i64> %v } - -declare <3 x i8> @llvm.vp.sdiv.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32) - define <3 x i8> @vdiv_vv_v3i8_unmasked(<3 x i8> %va, <3 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v3i8_unmasked: ; CHECK: # %bb.0: @@ -911,8 +872,6 @@ define <3 x i8> @vdiv_vv_v3i8_unmasked_avl3(<3 x i8> %va, <3 x i8> %b) { ret <3 x i8> %v } -declare <7 x i8> @llvm.vp.sdiv.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) - define <7 x i8> @vdiv_vv_v7i8_unmasked(<7 x i8> %va, <7 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v7i8_unmasked: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll index f1155a0657b40..1a7874b2c8c6f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.udiv.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vdivu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i7: ; CHECK: # %bb.0: @@ -19,8 +17,6 @@ define <8 x i7> @vdivu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroe ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.udiv.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vdivu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v2i8: ; CHECK: # %bb.0: @@ -65,8 +61,6 @@ define <2 x i8> @vdivu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.udiv.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vdivu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v4i8: ; CHECK: # %bb.0: @@ -111,8 +105,6 @@ define <4 x i8> @vdivu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <6 x i8> @llvm.vp.udiv.v6i8(<6 x i8>, <6 x i8>, <6 x i1>, i32) - define <6 x i8> @vdivu_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v6i8: ; CHECK: # %bb.0: @@ -123,8 +115,6 @@ define <6 x i8> @vdivu_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroe ret <6 x i8> %v } -declare <8 x i8> @llvm.vp.udiv.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vdivu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i8: ; CHECK: # %bb.0: @@ -169,8 +159,6 @@ define <8 x i8> @vdivu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.udiv.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vdivu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v16i8: ; CHECK: # %bb.0: @@ -215,8 +203,6 @@ define <16 x i8> @vdivu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.udiv.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vdivu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v2i16: ; CHECK: # %bb.0: @@ -261,8 +247,6 @@ define <2 x i16> @vdivu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.udiv.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vdivu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v4i16: ; CHECK: # %bb.0: @@ -307,8 +291,6 @@ define <4 x i16> @vdivu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.udiv.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vdivu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i16: ; CHECK: # %bb.0: @@ -353,8 +335,6 @@ define <8 x i16> @vdivu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.udiv.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vdivu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v16i16: ; CHECK: # %bb.0: @@ -399,8 +379,6 @@ define <16 x i16> @vdivu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.udiv.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vdivu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v2i32: ; CHECK: # %bb.0: @@ -445,8 +423,6 @@ define <2 x i32> @vdivu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vdivu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v4i32: ; CHECK: # %bb.0: @@ -491,8 +467,6 @@ define <4 x i32> @vdivu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.udiv.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vdivu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i32: ; CHECK: # %bb.0: @@ -537,8 +511,6 @@ define <8 x i32> @vdivu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.udiv.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vdivu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v16i32: ; CHECK: # %bb.0: @@ -583,8 +555,6 @@ define <16 x i32> @vdivu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.udiv.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vdivu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v2i64: ; CHECK: # %bb.0: @@ -659,8 +629,6 @@ define <2 x i64> @vdivu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.udiv.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vdivu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v4i64: ; CHECK: # %bb.0: @@ -735,8 +703,6 @@ define <4 x i64> @vdivu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.udiv.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vdivu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i64: ; CHECK: # %bb.0: @@ -811,8 +777,6 @@ define <8 x i64> @vdivu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.udiv.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vdivu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v16i64: ; CHECK: # %bb.0: @@ -887,7 +851,6 @@ define <16 x i64> @vdivu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext ret <16 x i64> %v } - define <8 x i8> @vdivu_vv_v8i8_unmasked_avl3(<8 x i8> %va, <8 x i8> %b) { ; CHECK-LABEL: vdivu_vv_v8i8_unmasked_avl3: ; CHECK: # %bb.0: @@ -908,8 +871,6 @@ define <8 x i8> @vdivu_vv_v8i8_unmasked_avl7(<8 x i8> %va, <8 x i8> %b) { ret <8 x i8> %v } -declare <3 x i8> @llvm.vp.udiv.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32) - define <3 x i8> @vdivu_vv_v3i8_unmasked(<3 x i8> %va, <3 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v3i8_unmasked: ; CHECK: # %bb.0: @@ -930,8 +891,6 @@ define <3 x i8> @vdivu_vv_v3i8_unmasked_avl3(<3 x i8> %va, <3 x i8> %b) { ret <3 x i8> %v } -declare <7 x i8> @llvm.vp.udiv.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) - define <7 x i8> @vdivu_vv_v7i8_unmasked(<7 x i8> %va, <7 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v7i8_unmasked: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll index 01bd706ed31f8..2774aba974a29 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll @@ -220,8 +220,6 @@ define <16 x bfloat> @vfabs_vv_v16bf16_unmasked(<16 x bfloat> %va, i32 zeroext % ret <16 x bfloat> %v } -declare <2 x half> @llvm.vp.fabs.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vfabs_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_v2f16: ; ZVFH: # %bb.0: @@ -274,8 +272,6 @@ define <2 x half> @vfabs_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ret <2 x half> %v } -declare <4 x half> @llvm.vp.fabs.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vfabs_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_v4f16: ; ZVFH: # %bb.0: @@ -328,8 +324,6 @@ define <4 x half> @vfabs_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <8 x half> @llvm.vp.fabs.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vfabs_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_v8f16: ; ZVFH: # %bb.0: @@ -382,8 +376,6 @@ define <8 x half> @vfabs_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ret <8 x half> %v } -declare <16 x half> @llvm.vp.fabs.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vfabs_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_v16f16: ; ZVFH: # %bb.0: @@ -436,8 +428,6 @@ define <16 x half> @vfabs_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) ret <16 x half> %v } -declare <2 x float> @llvm.vp.fabs.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vfabs_vv_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v2f32: ; CHECK: # %bb.0: @@ -458,8 +448,6 @@ define <2 x float> @vfabs_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ret <2 x float> %v } -declare <4 x float> @llvm.vp.fabs.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vfabs_vv_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v4f32: ; CHECK: # %bb.0: @@ -480,8 +468,6 @@ define <4 x float> @vfabs_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ret <4 x float> %v } -declare <8 x float> @llvm.vp.fabs.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vfabs_vv_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v8f32: ; CHECK: # %bb.0: @@ -502,8 +488,6 @@ define <8 x float> @vfabs_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ret <8 x float> %v } -declare <16 x float> @llvm.vp.fabs.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vfabs_vv_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v16f32: ; CHECK: # %bb.0: @@ -524,8 +508,6 @@ define <16 x float> @vfabs_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl ret <16 x float> %v } -declare <2 x double> @llvm.vp.fabs.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vfabs_vv_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v2f64: ; CHECK: # %bb.0: @@ -546,8 +528,6 @@ define <2 x double> @vfabs_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) ret <2 x double> %v } -declare <4 x double> @llvm.vp.fabs.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vfabs_vv_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v4f64: ; CHECK: # %bb.0: @@ -568,8 +548,6 @@ define <4 x double> @vfabs_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) ret <4 x double> %v } -declare <8 x double> @llvm.vp.fabs.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vfabs_vv_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v8f64: ; CHECK: # %bb.0: @@ -590,8 +568,6 @@ define <8 x double> @vfabs_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) ret <8 x double> %v } -declare <15 x double> @llvm.vp.fabs.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vfabs_vv_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v15f64: ; CHECK: # %bb.0: @@ -612,8 +588,6 @@ define <15 x double> @vfabs_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %e ret <15 x double> %v } -declare <16 x double> @llvm.vp.fabs.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vfabs_vv_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v16f64: ; CHECK: # %bb.0: @@ -634,8 +608,6 @@ define <16 x double> @vfabs_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %e ret <16 x double> %v } -declare <32 x double> @llvm.vp.fabs.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vfabs_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll index 599f505808ab4..003feb6d748f5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half>, <2 x half>, metadata, metadata) define <2 x half> @vfadd_vv_v2f16(<2 x half> %va, <2 x half> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v2f16: ; CHECK: # %bb.0: # %entry @@ -28,7 +27,6 @@ define <2 x half> @vfadd_vf_v2f16(<2 x half> %va, half %b) strictfp { ret <2 x half> %vc } -declare <4 x half> @llvm.experimental.constrained.fadd.v4f16(<4 x half>, <4 x half>, metadata, metadata) define <4 x half> @vfadd_vv_v4f16(<4 x half> %va, <4 x half> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v4f16: ; CHECK: # %bb.0: # %entry @@ -52,7 +50,6 @@ define <4 x half> @vfadd_vf_v4f16(<4 x half> %va, half %b) strictfp { ret <4 x half> %vc } -declare <8 x half> @llvm.experimental.constrained.fadd.v8f16(<8 x half>, <8 x half>, metadata, metadata) define <8 x half> @vfadd_vv_v8f16(<8 x half> %va, <8 x half> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v8f16: ; CHECK: # %bb.0: # %entry @@ -76,7 +73,6 @@ define <8 x half> @vfadd_vf_v8f16(<8 x half> %va, half %b) strictfp { ret <8 x half> %vc } -declare <16 x half> @llvm.experimental.constrained.fadd.v16f16(<16 x half>, <16 x half>, metadata, metadata) define <16 x half> @vfadd_vv_v16f16(<16 x half> %va, <16 x half> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v16f16: ; CHECK: # %bb.0: # %entry @@ -100,7 +96,6 @@ define <16 x half> @vfadd_vf_v16f16(<16 x half> %va, half %b) strictfp { ret <16 x half> %vc } -declare <32 x half> @llvm.experimental.constrained.fadd.v32f16(<32 x half>, <32 x half>, metadata, metadata) define <32 x half> @vfadd_vv_v32f16(<32 x half> %va, <32 x half> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v32f16: ; CHECK: # %bb.0: # %entry @@ -126,7 +121,6 @@ define <32 x half> @vfadd_vf_v32f16(<32 x half> %va, half %b) strictfp { ret <32 x half> %vc } -declare <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float>, <2 x float>, metadata, metadata) define <2 x float> @vfadd_vv_v2f32(<2 x float> %va, <2 x float> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v2f32: ; CHECK: # %bb.0: # %entry @@ -150,7 +144,6 @@ define <2 x float> @vfadd_vf_v2f32(<2 x float> %va, float %b) strictfp { ret <2 x float> %vc } -declare <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float>, <4 x float>, metadata, metadata) define <4 x float> @vfadd_vv_v4f32(<4 x float> %va, <4 x float> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v4f32: ; CHECK: # %bb.0: # %entry @@ -174,7 +167,6 @@ define <4 x float> @vfadd_vf_v4f32(<4 x float> %va, float %b) strictfp { ret <4 x float> %vc } -declare <8 x float> @llvm.experimental.constrained.fadd.v8f32(<8 x float>, <8 x float>, metadata, metadata) define <8 x float> @vfadd_vv_v8f32(<8 x float> %va, <8 x float> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v8f32: ; CHECK: # %bb.0: # %entry @@ -198,7 +190,6 @@ define <8 x float> @vfadd_vf_v8f32(<8 x float> %va, float %b) strictfp { ret <8 x float> %vc } -declare <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float>, <16 x float>, metadata, metadata) define <16 x float> @vfadd_vv_v16f32(<16 x float> %va, <16 x float> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v16f32: ; CHECK: # %bb.0: # %entry @@ -222,7 +213,6 @@ define <16 x float> @vfadd_vf_v16f32(<16 x float> %va, float %b) strictfp { ret <16 x float> %vc } -declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata) define <2 x double> @vfadd_vv_v2f64(<2 x double> %va, <2 x double> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v2f64: ; CHECK: # %bb.0: # %entry @@ -246,7 +236,6 @@ define <2 x double> @vfadd_vf_v2f64(<2 x double> %va, double %b) strictfp { ret <2 x double> %vc } -declare <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double>, <4 x double>, metadata, metadata) define <4 x double> @vfadd_vv_v4f64(<4 x double> %va, <4 x double> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v4f64: ; CHECK: # %bb.0: # %entry @@ -270,7 +259,6 @@ define <4 x double> @vfadd_vf_v4f64(<4 x double> %va, double %b) strictfp { ret <4 x double> %vc } -declare <8 x double> @llvm.experimental.constrained.fadd.v8f64(<8 x double>, <8 x double>, metadata, metadata) define <8 x double> @vfadd_vv_v8f64(<8 x double> %va, <8 x double> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll index 8a8fe234cacd1..2a65f57bcb94c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.fadd.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfadd_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_v2f16: ; ZVFH: # %bb.0: @@ -104,8 +102,6 @@ define <2 x half> @vfadd_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext ret <2 x half> %v } -declare <3 x half> @llvm.vp.fadd.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32) - define <3 x half> @vfadd_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_v3f16: ; ZVFH: # %bb.0: @@ -127,8 +123,6 @@ define <3 x half> @vfadd_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i3 ret <3 x half> %v } -declare <4 x half> @llvm.vp.fadd.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfadd_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_v4f16: ; ZVFH: # %bb.0: @@ -223,8 +217,6 @@ define <4 x half> @vfadd_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext ret <4 x half> %v } -declare <8 x half> @llvm.vp.fadd.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfadd_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_v8f16: ; ZVFH: # %bb.0: @@ -319,8 +311,6 @@ define <8 x half> @vfadd_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext ret <8 x half> %v } -declare <16 x half> @llvm.vp.fadd.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfadd_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_v16f16: ; ZVFH: # %bb.0: @@ -415,8 +405,6 @@ define <16 x half> @vfadd_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroe ret <16 x half> %v } -declare <2 x float> @llvm.vp.fadd.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfadd_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v2f32: ; CHECK: # %bb.0: @@ -485,8 +473,6 @@ define <2 x float> @vfadd_vf_v2f32_unmasked_commute(<2 x float> %va, float %b, i ret <2 x float> %v } -declare <4 x float> @llvm.vp.fadd.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfadd_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v4f32: ; CHECK: # %bb.0: @@ -531,8 +517,6 @@ define <4 x float> @vfadd_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroe ret <4 x float> %v } -declare <8 x float> @llvm.vp.fadd.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfadd_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v8f32: ; CHECK: # %bb.0: @@ -577,8 +561,6 @@ define <8 x float> @vfadd_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroe ret <8 x float> %v } -declare <16 x float> @llvm.vp.fadd.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfadd_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v16f32: ; CHECK: # %bb.0: @@ -623,8 +605,6 @@ define <16 x float> @vfadd_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 ze ret <16 x float> %v } -declare <2 x double> @llvm.vp.fadd.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfadd_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v2f64: ; CHECK: # %bb.0: @@ -669,8 +649,6 @@ define <2 x double> @vfadd_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 ze ret <2 x double> %v } -declare <4 x double> @llvm.vp.fadd.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfadd_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v4f64: ; CHECK: # %bb.0: @@ -715,8 +693,6 @@ define <4 x double> @vfadd_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 ze ret <4 x double> %v } -declare <8 x double> @llvm.vp.fadd.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfadd_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v8f64: ; CHECK: # %bb.0: @@ -761,8 +737,6 @@ define <8 x double> @vfadd_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 ze ret <8 x double> %v } -declare <16 x double> @llvm.vp.fadd.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfadd_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll index 690c8af7fc8e7..9eb92f45bb149 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll @@ -286,12 +286,3 @@ define <16 x i1> @isnotfinite_v16f64_unmasked(<16 x double> %x, i32 zeroext %evl ret <16 x i1> %1 } -declare <2 x i1> @llvm.vp.is.fpclass.v2f16(<2 x half>, i32, <2 x i1>, i32) -declare <2 x i1> @llvm.vp.is.fpclass.v2f32(<2 x float>, i32, <2 x i1>, i32) -declare <4 x i1> @llvm.vp.is.fpclass.v4f32(<4 x float>, i32, <4 x i1>, i32) -declare <8 x i1> @llvm.vp.is.fpclass.v8f32(<8 x float>, i32, <8 x i1>, i32) -declare <16 x i1> @llvm.vp.is.fpclass.v16f32(<16 x float>, i32, <16 x i1>, i32) -declare <2 x i1> @llvm.vp.is.fpclass.v2f64(<2 x double>, i32, <2 x i1>, i32) -declare <4 x i1> @llvm.vp.is.fpclass.v4f64(<4 x double>, i32, <4 x i1>, i32) -declare <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double>, i32, <8 x i1>, i32) -declare <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double>, i32, <16 x i1>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll index 85e8638301ded..4ae62901a627a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll @@ -30,7 +30,6 @@ define <2 x i1> @isnan_v2f32(<2 x float> %x) { ret <2 x i1> %1 } - define <4 x i1> @isnan_v4f32(<4 x float> %x) { ; CHECK-LABEL: isnan_v4f32: ; CHECK: # %bb.0: @@ -155,12 +154,3 @@ define <16 x i1> @isnotfinite_v16f64(<16 x double> %x) { ret <16 x i1> %1 } -declare <2 x i1> @llvm.is.fpclass.v2f16(<2 x half>, i32) -declare <2 x i1> @llvm.is.fpclass.v2f32(<2 x float>, i32) -declare <4 x i1> @llvm.is.fpclass.v4f32(<4 x float>, i32) -declare <8 x i1> @llvm.is.fpclass.v8f32(<8 x float>, i32) -declare <16 x i1> @llvm.is.fpclass.v16f32(<16 x float>, i32) -declare <2 x i1> @llvm.is.fpclass.v2f64(<2 x double>, i32) -declare <4 x i1> @llvm.is.fpclass.v4f64(<4 x double>, i32) -declare <8 x i1> @llvm.is.fpclass.v8f64(<8 x double>, i32) -declare <16 x i1> @llvm.is.fpclass.v16f64(<16 x double>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmp-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmp-constrained-sdnode.ll index dfd509062ccf7..dbc714f50946d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmp-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmp-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <1 x i1> @llvm.experimental.constrained.fcmp.v1f16(<1 x half>, <1 x half>, metadata, metadata) define <1 x i1> @fcmp_oeq_vv_v1f16(<1 x half> %va, <1 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v1f16: ; CHECK: # %bb.0: @@ -637,7 +636,6 @@ define <1 x i1> @fcmp_uno_fv_v1f16(<1 x half> %va, half %b) nounwind strictfp { ret <1 x i1> %1 } -declare <2 x i1> @llvm.experimental.constrained.fcmp.v2f16(<2 x half>, <2 x half>, metadata, metadata) define <2 x i1> @fcmp_oeq_vv_v2f16(<2 x half> %va, <2 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v2f16: ; CHECK: # %bb.0: @@ -1270,7 +1268,6 @@ define <2 x i1> @fcmp_uno_fv_v2f16(<2 x half> %va, half %b) nounwind strictfp { ret <2 x i1> %1 } -declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f16(<4 x half>, <4 x half>, metadata, metadata) define <4 x i1> @fcmp_oeq_vv_v4f16(<4 x half> %va, <4 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v4f16: ; CHECK: # %bb.0: @@ -1903,7 +1900,6 @@ define <4 x i1> @fcmp_uno_fv_v4f16(<4 x half> %va, half %b) nounwind strictfp { ret <4 x i1> %1 } -declare <8 x i1> @llvm.experimental.constrained.fcmp.v8f16(<8 x half>, <8 x half>, metadata, metadata) define <8 x i1> @fcmp_oeq_vv_v8f16(<8 x half> %va, <8 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v8f16: ; CHECK: # %bb.0: @@ -2536,7 +2532,6 @@ define <8 x i1> @fcmp_uno_fv_v8f16(<8 x half> %va, half %b) nounwind strictfp { ret <8 x i1> %1 } -declare <16 x i1> @llvm.experimental.constrained.fcmp.v16f16(<16 x half>, <16 x half>, metadata, metadata) define <16 x i1> @fcmp_oeq_vv_v16f16(<16 x half> %va, <16 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v16f16: ; CHECK: # %bb.0: @@ -3211,7 +3206,6 @@ define <16 x i1> @fcmp_uno_fv_v16f16(<16 x half> %va, half %b) nounwind strictfp ret <16 x i1> %1 } -declare <32 x i1> @llvm.experimental.constrained.fcmp.v32f16(<32 x half>, <32 x half>, metadata, metadata) define <32 x i1> @fcmp_oeq_vv_v32f16(<32 x half> %va, <32 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v32f16: ; CHECK: # %bb.0: @@ -3928,7 +3922,6 @@ define <32 x i1> @fcmp_uno_fv_v32f16(<32 x half> %va, half %b) nounwind strictfp ret <32 x i1> %1 } -declare <1 x i1> @llvm.experimental.constrained.fcmp.v1f32(<1 x float>, <1 x float>, metadata, metadata) define <1 x i1> @fcmp_oeq_vv_v1f32(<1 x float> %va, <1 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v1f32: ; CHECK: # %bb.0: @@ -4561,7 +4554,6 @@ define <1 x i1> @fcmp_uno_fv_v1f32(<1 x float> %va, float %b) nounwind strictfp ret <1 x i1> %1 } -declare <2 x i1> @llvm.experimental.constrained.fcmp.v2f32(<2 x float>, <2 x float>, metadata, metadata) define <2 x i1> @fcmp_oeq_vv_v2f32(<2 x float> %va, <2 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v2f32: ; CHECK: # %bb.0: @@ -5194,7 +5186,6 @@ define <2 x i1> @fcmp_uno_fv_v2f32(<2 x float> %va, float %b) nounwind strictfp ret <2 x i1> %1 } -declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float>, <4 x float>, metadata, metadata) define <4 x i1> @fcmp_oeq_vv_v4f32(<4 x float> %va, <4 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v4f32: ; CHECK: # %bb.0: @@ -5827,7 +5818,6 @@ define <4 x i1> @fcmp_uno_fv_v4f32(<4 x float> %va, float %b) nounwind strictfp ret <4 x i1> %1 } -declare <8 x i1> @llvm.experimental.constrained.fcmp.v8f32(<8 x float>, <8 x float>, metadata, metadata) define <8 x i1> @fcmp_oeq_vv_v8f32(<8 x float> %va, <8 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v8f32: ; CHECK: # %bb.0: @@ -6502,7 +6492,6 @@ define <8 x i1> @fcmp_uno_fv_v8f32(<8 x float> %va, float %b) nounwind strictfp ret <8 x i1> %1 } -declare <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(<16 x float>, <16 x float>, metadata, metadata) define <16 x i1> @fcmp_oeq_vv_v16f32(<16 x float> %va, <16 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v16f32: ; CHECK: # %bb.0: @@ -7177,7 +7166,6 @@ define <16 x i1> @fcmp_uno_fv_v16f32(<16 x float> %va, float %b) nounwind strict ret <16 x i1> %1 } -declare <1 x i1> @llvm.experimental.constrained.fcmp.v1f64(<1 x double>, <1 x double>, metadata, metadata) define <1 x i1> @fcmp_oeq_vv_v1f64(<1 x double> %va, <1 x double> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v1f64: ; CHECK: # %bb.0: @@ -7810,7 +7798,6 @@ define <1 x i1> @fcmp_uno_fv_v1f64(<1 x double> %va, double %b) nounwind strictf ret <1 x i1> %1 } -declare <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double>, <2 x double>, metadata, metadata) define <2 x i1> @fcmp_oeq_vv_v2f64(<2 x double> %va, <2 x double> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v2f64: ; CHECK: # %bb.0: @@ -8443,7 +8430,6 @@ define <2 x i1> @fcmp_uno_fv_v2f64(<2 x double> %va, double %b) nounwind strictf ret <2 x i1> %1 } -declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f64(<4 x double>, <4 x double>, metadata, metadata) define <4 x i1> @fcmp_oeq_vv_v4f64(<4 x double> %va, <4 x double> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v4f64: ; CHECK: # %bb.0: @@ -9118,7 +9104,6 @@ define <4 x i1> @fcmp_uno_fv_v4f64(<4 x double> %va, double %b) nounwind strictf ret <4 x i1> %1 } -declare <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(<8 x double>, <8 x double>, metadata, metadata) define <8 x i1> @fcmp_oeq_vv_v8f64(<8 x double> %va, <8 x double> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmps-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmps-constrained-sdnode.ll index 472f2073667db..218efde7a477e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmps-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmps-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <1 x i1> @llvm.experimental.constrained.fcmps.v1f16(<1 x half>, <1 x half>, metadata, metadata) define <1 x i1> @fcmps_oeq_vv_v1f16(<1 x half> %va, <1 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v1f16: ; CHECK: # %bb.0: @@ -536,7 +535,6 @@ define <1 x i1> @fcmps_uno_fv_v1f16(<1 x half> %va, half %b) nounwind strictfp { ret <1 x i1> %1 } -declare <2 x i1> @llvm.experimental.constrained.fcmps.v2f16(<2 x half>, <2 x half>, metadata, metadata) define <2 x i1> @fcmps_oeq_vv_v2f16(<2 x half> %va, <2 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v2f16: ; CHECK: # %bb.0: @@ -1068,7 +1066,6 @@ define <2 x i1> @fcmps_uno_fv_v2f16(<2 x half> %va, half %b) nounwind strictfp { ret <2 x i1> %1 } -declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f16(<4 x half>, <4 x half>, metadata, metadata) define <4 x i1> @fcmps_oeq_vv_v4f16(<4 x half> %va, <4 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v4f16: ; CHECK: # %bb.0: @@ -1600,7 +1597,6 @@ define <4 x i1> @fcmps_uno_fv_v4f16(<4 x half> %va, half %b) nounwind strictfp { ret <4 x i1> %1 } -declare <8 x i1> @llvm.experimental.constrained.fcmps.v8f16(<8 x half>, <8 x half>, metadata, metadata) define <8 x i1> @fcmps_oeq_vv_v8f16(<8 x half> %va, <8 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v8f16: ; CHECK: # %bb.0: @@ -2132,7 +2128,6 @@ define <8 x i1> @fcmps_uno_fv_v8f16(<8 x half> %va, half %b) nounwind strictfp { ret <8 x i1> %1 } -declare <16 x i1> @llvm.experimental.constrained.fcmps.v16f16(<16 x half>, <16 x half>, metadata, metadata) define <16 x i1> @fcmps_oeq_vv_v16f16(<16 x half> %va, <16 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v16f16: ; CHECK: # %bb.0: @@ -2664,7 +2659,6 @@ define <16 x i1> @fcmps_uno_fv_v16f16(<16 x half> %va, half %b) nounwind strictf ret <16 x i1> %1 } -declare <32 x i1> @llvm.experimental.constrained.fcmps.v32f16(<32 x half>, <32 x half>, metadata, metadata) define <32 x i1> @fcmps_oeq_vv_v32f16(<32 x half> %va, <32 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v32f16: ; CHECK: # %bb.0: @@ -3238,7 +3232,6 @@ define <32 x i1> @fcmps_uno_fv_v32f16(<32 x half> %va, half %b) nounwind strictf ret <32 x i1> %1 } -declare <1 x i1> @llvm.experimental.constrained.fcmps.v1f32(<1 x float>, <1 x float>, metadata, metadata) define <1 x i1> @fcmps_oeq_vv_v1f32(<1 x float> %va, <1 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v1f32: ; CHECK: # %bb.0: @@ -3770,7 +3763,6 @@ define <1 x i1> @fcmps_uno_fv_v1f32(<1 x float> %va, float %b) nounwind strictfp ret <1 x i1> %1 } -declare <2 x i1> @llvm.experimental.constrained.fcmps.v2f32(<2 x float>, <2 x float>, metadata, metadata) define <2 x i1> @fcmps_oeq_vv_v2f32(<2 x float> %va, <2 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v2f32: ; CHECK: # %bb.0: @@ -4302,7 +4294,6 @@ define <2 x i1> @fcmps_uno_fv_v2f32(<2 x float> %va, float %b) nounwind strictfp ret <2 x i1> %1 } -declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float>, <4 x float>, metadata, metadata) define <4 x i1> @fcmps_oeq_vv_v4f32(<4 x float> %va, <4 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v4f32: ; CHECK: # %bb.0: @@ -4834,7 +4825,6 @@ define <4 x i1> @fcmps_uno_fv_v4f32(<4 x float> %va, float %b) nounwind strictfp ret <4 x i1> %1 } -declare <8 x i1> @llvm.experimental.constrained.fcmps.v8f32(<8 x float>, <8 x float>, metadata, metadata) define <8 x i1> @fcmps_oeq_vv_v8f32(<8 x float> %va, <8 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v8f32: ; CHECK: # %bb.0: @@ -5366,7 +5356,6 @@ define <8 x i1> @fcmps_uno_fv_v8f32(<8 x float> %va, float %b) nounwind strictfp ret <8 x i1> %1 } -declare <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(<16 x float>, <16 x float>, metadata, metadata) define <16 x i1> @fcmps_oeq_vv_v16f32(<16 x float> %va, <16 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v16f32: ; CHECK: # %bb.0: @@ -5898,7 +5887,6 @@ define <16 x i1> @fcmps_uno_fv_v16f32(<16 x float> %va, float %b) nounwind stric ret <16 x i1> %1 } -declare <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double>, <1 x double>, metadata, metadata) define <1 x i1> @fcmps_oeq_vv_v1f64(<1 x double> %va, <1 x double> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v1f64: ; CHECK: # %bb.0: @@ -6430,7 +6418,6 @@ define <1 x i1> @fcmps_uno_fv_v1f64(<1 x double> %va, double %b) nounwind strict ret <1 x i1> %1 } -declare <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double>, <2 x double>, metadata, metadata) define <2 x i1> @fcmps_oeq_vv_v2f64(<2 x double> %va, <2 x double> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v2f64: ; CHECK: # %bb.0: @@ -6962,7 +6949,6 @@ define <2 x i1> @fcmps_uno_fv_v2f64(<2 x double> %va, double %b) nounwind strict ret <2 x i1> %1 } -declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f64(<4 x double>, <4 x double>, metadata, metadata) define <4 x i1> @fcmps_oeq_vv_v4f64(<4 x double> %va, <4 x double> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v4f64: ; CHECK: # %bb.0: @@ -7494,7 +7480,6 @@ define <4 x i1> @fcmps_uno_fv_v4f64(<4 x double> %va, double %b) nounwind strict ret <4 x i1> %1 } -declare <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(<8 x double>, <8 x double>, metadata, metadata) define <8 x i1> @fcmps_oeq_vv_v8f64(<8 x double> %va, <8 x double> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll index 1bc880d93af1a..f912ed9f0ed2d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.experimental.constrained.fdiv.v2f16(<2 x half>, <2 x half>, metadata, metadata) define <2 x half> @vfdiv_vv_v2f16(<2 x half> %va, <2 x half> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v2f16: ; CHECK: # %bb.0: # %entry @@ -28,7 +27,6 @@ define <2 x half> @vfdiv_vf_v2f16(<2 x half> %va, half %b) strictfp { ret <2 x half> %vc } -declare <4 x half> @llvm.experimental.constrained.fdiv.v4f16(<4 x half>, <4 x half>, metadata, metadata) define <4 x half> @vfdiv_vv_v4f16(<4 x half> %va, <4 x half> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v4f16: ; CHECK: # %bb.0: # %entry @@ -52,7 +50,6 @@ define <4 x half> @vfdiv_vf_v4f16(<4 x half> %va, half %b) strictfp { ret <4 x half> %vc } -declare <8 x half> @llvm.experimental.constrained.fdiv.v8f16(<8 x half>, <8 x half>, metadata, metadata) define <8 x half> @vfdiv_vv_v8f16(<8 x half> %va, <8 x half> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v8f16: ; CHECK: # %bb.0: # %entry @@ -88,7 +85,6 @@ define <8 x half> @vfdiv_fv_v8f16(<8 x half> %va, half %b) strictfp { ret <8 x half> %vc } -declare <16 x half> @llvm.experimental.constrained.fdiv.v16f16(<16 x half>, <16 x half>, metadata, metadata) define <16 x half> @vfdiv_vv_v16f16(<16 x half> %va, <16 x half> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v16f16: ; CHECK: # %bb.0: # %entry @@ -112,7 +108,6 @@ define <16 x half> @vfdiv_vf_v16f16(<16 x half> %va, half %b) strictfp { ret <16 x half> %vc } -declare <32 x half> @llvm.experimental.constrained.fdiv.v32f16(<32 x half>, <32 x half>, metadata, metadata) define <32 x half> @vfdiv_vv_v32f16(<32 x half> %va, <32 x half> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v32f16: ; CHECK: # %bb.0: # %entry @@ -138,7 +133,6 @@ define <32 x half> @vfdiv_vf_v32f16(<32 x half> %va, half %b) strictfp { ret <32 x half> %vc } -declare <2 x float> @llvm.experimental.constrained.fdiv.v2f32(<2 x float>, <2 x float>, metadata, metadata) define <2 x float> @vfdiv_vv_v2f32(<2 x float> %va, <2 x float> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v2f32: ; CHECK: # %bb.0: # %entry @@ -162,7 +156,6 @@ define <2 x float> @vfdiv_vf_v2f32(<2 x float> %va, float %b) strictfp { ret <2 x float> %vc } -declare <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float>, <4 x float>, metadata, metadata) define <4 x float> @vfdiv_vv_v4f32(<4 x float> %va, <4 x float> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v4f32: ; CHECK: # %bb.0: # %entry @@ -186,7 +179,6 @@ define <4 x float> @vfdiv_vf_v4f32(<4 x float> %va, float %b) strictfp { ret <4 x float> %vc } -declare <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float>, <8 x float>, metadata, metadata) define <8 x float> @vfdiv_vv_v8f32(<8 x float> %va, <8 x float> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v8f32: ; CHECK: # %bb.0: # %entry @@ -222,7 +214,6 @@ define <8 x float> @vfdiv_fv_v8f32(<8 x float> %va, float %b) strictfp { ret <8 x float> %vc } -declare <16 x float> @llvm.experimental.constrained.fdiv.v16f32(<16 x float>, <16 x float>, metadata, metadata) define <16 x float> @vfdiv_vv_v16f32(<16 x float> %va, <16 x float> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v16f32: ; CHECK: # %bb.0: # %entry @@ -246,7 +237,6 @@ define <16 x float> @vfdiv_vf_v16f32(<16 x float> %va, float %b) strictfp { ret <16 x float> %vc } -declare <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double>, <2 x double>, metadata, metadata) define <2 x double> @vfdiv_vv_v2f64(<2 x double> %va, <2 x double> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v2f64: ; CHECK: # %bb.0: # %entry @@ -270,7 +260,6 @@ define <2 x double> @vfdiv_vf_v2f64(<2 x double> %va, double %b) strictfp { ret <2 x double> %vc } -declare <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double>, <4 x double>, metadata, metadata) define <4 x double> @vfdiv_vv_v4f64(<4 x double> %va, <4 x double> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v4f64: ; CHECK: # %bb.0: # %entry @@ -294,7 +283,6 @@ define <4 x double> @vfdiv_vf_v4f64(<4 x double> %va, double %b) strictfp { ret <4 x double> %vc } -declare <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double>, <8 x double>, metadata, metadata) define <8 x double> @vfdiv_vv_v8f64(<8 x double> %va, <8 x double> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll index 30f509436214a..977e236c91fe7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.fdiv.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfdiv_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_v2f16: ; ZVFH: # %bb.0: @@ -104,8 +102,6 @@ define <2 x half> @vfdiv_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext ret <2 x half> %v } -declare <3 x half> @llvm.vp.fdiv.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32) - define <3 x half> @vfdiv_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_v3f16: ; ZVFH: # %bb.0: @@ -127,8 +123,6 @@ define <3 x half> @vfdiv_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i3 ret <3 x half> %v } -declare <4 x half> @llvm.vp.fdiv.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfdiv_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_v4f16: ; ZVFH: # %bb.0: @@ -223,8 +217,6 @@ define <4 x half> @vfdiv_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext ret <4 x half> %v } -declare <8 x half> @llvm.vp.fdiv.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfdiv_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_v8f16: ; ZVFH: # %bb.0: @@ -319,8 +311,6 @@ define <8 x half> @vfdiv_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext ret <8 x half> %v } -declare <16 x half> @llvm.vp.fdiv.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfdiv_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_v16f16: ; ZVFH: # %bb.0: @@ -415,8 +405,6 @@ define <16 x half> @vfdiv_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroe ret <16 x half> %v } -declare <2 x float> @llvm.vp.fdiv.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfdiv_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v2f32: ; CHECK: # %bb.0: @@ -461,8 +449,6 @@ define <2 x float> @vfdiv_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroe ret <2 x float> %v } -declare <4 x float> @llvm.vp.fdiv.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfdiv_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v4f32: ; CHECK: # %bb.0: @@ -507,8 +493,6 @@ define <4 x float> @vfdiv_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroe ret <4 x float> %v } -declare <8 x float> @llvm.vp.fdiv.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfdiv_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v8f32: ; CHECK: # %bb.0: @@ -553,8 +537,6 @@ define <8 x float> @vfdiv_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroe ret <8 x float> %v } -declare <16 x float> @llvm.vp.fdiv.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfdiv_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v16f32: ; CHECK: # %bb.0: @@ -599,8 +581,6 @@ define <16 x float> @vfdiv_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 ze ret <16 x float> %v } -declare <2 x double> @llvm.vp.fdiv.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfdiv_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v2f64: ; CHECK: # %bb.0: @@ -645,8 +625,6 @@ define <2 x double> @vfdiv_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 ze ret <2 x double> %v } -declare <4 x double> @llvm.vp.fdiv.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfdiv_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v4f64: ; CHECK: # %bb.0: @@ -691,8 +669,6 @@ define <4 x double> @vfdiv_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 ze ret <4 x double> %v } -declare <8 x double> @llvm.vp.fdiv.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfdiv_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v8f64: ; CHECK: # %bb.0: @@ -737,8 +713,6 @@ define <8 x double> @vfdiv_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 ze ret <8 x double> %v } -declare <16 x double> @llvm.vp.fdiv.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfdiv_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll index 6f9885d9529d5..f28b970f48ff7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfma_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_v2f16: ; ZVFH: # %bb.0: @@ -111,8 +109,6 @@ define <2 x half> @vfma_vf_v2f16_unmasked(<2 x half> %va, half %b, <2 x half> %v ret <2 x half> %v } -declare <4 x half> @llvm.vp.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfma_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_v4f16: ; ZVFH: # %bb.0: @@ -214,8 +210,6 @@ define <4 x half> @vfma_vf_v4f16_unmasked(<4 x half> %va, half %b, <4 x half> %v ret <4 x half> %v } -declare <8 x half> @llvm.vp.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfma_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_v8f16: ; ZVFH: # %bb.0: @@ -317,8 +311,6 @@ define <8 x half> @vfma_vf_v8f16_unmasked(<8 x half> %va, half %b, <8 x half> %v ret <8 x half> %v } -declare <16 x half> @llvm.vp.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfma_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_v16f16: ; ZVFH: # %bb.0: @@ -420,8 +412,6 @@ define <16 x half> @vfma_vf_v16f16_unmasked(<16 x half> %va, half %b, <16 x half ret <16 x half> %v } -declare <2 x float> @llvm.vp.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfma_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f32: ; CHECK: # %bb.0: @@ -467,8 +457,6 @@ define <2 x float> @vfma_vf_v2f32_unmasked(<2 x float> %va, float %b, <2 x float ret <2 x float> %v } -declare <4 x float> @llvm.vp.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfma_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f32: ; CHECK: # %bb.0: @@ -514,8 +502,6 @@ define <4 x float> @vfma_vf_v4f32_unmasked(<4 x float> %va, float %b, <4 x float ret <4 x float> %v } -declare <8 x float> @llvm.vp.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfma_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f32: ; CHECK: # %bb.0: @@ -561,8 +547,6 @@ define <8 x float> @vfma_vf_v8f32_unmasked(<8 x float> %va, float %b, <8 x float ret <8 x float> %v } -declare <16 x float> @llvm.vp.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfma_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f32: ; CHECK: # %bb.0: @@ -608,8 +592,6 @@ define <16 x float> @vfma_vf_v16f32_unmasked(<16 x float> %va, float %b, <16 x f ret <16 x float> %v } -declare <2 x double> @llvm.vp.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfma_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f64: ; CHECK: # %bb.0: @@ -655,8 +637,6 @@ define <2 x double> @vfma_vf_v2f64_unmasked(<2 x double> %va, double %b, <2 x do ret <2 x double> %v } -declare <4 x double> @llvm.vp.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfma_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f64: ; CHECK: # %bb.0: @@ -702,8 +682,6 @@ define <4 x double> @vfma_vf_v4f64_unmasked(<4 x double> %va, double %b, <4 x do ret <4 x double> %v } -declare <8 x double> @llvm.vp.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfma_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f64: ; CHECK: # %bb.0: @@ -749,8 +727,6 @@ define <8 x double> @vfma_vf_v8f64_unmasked(<8 x double> %va, double %b, <8 x do ret <8 x double> %v } -declare <15 x double> @llvm.vp.fma.v15f64(<15 x double>, <15 x double>, <15 x double>, <15 x i1>, i32) - define <15 x double> @vfma_vv_v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v15f64: ; CHECK: # %bb.0: @@ -776,8 +752,6 @@ define <15 x double> @vfma_vv_v15f64_unmasked(<15 x double> %va, <15 x double> % ret <15 x double> %v } -declare <16 x double> @llvm.vp.fma.v16f64(<16 x double>, <16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfma_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f64: ; CHECK: # %bb.0: @@ -827,8 +801,6 @@ define <16 x double> @vfma_vf_v16f64_unmasked(<16 x double> %va, double %b, <16 ret <16 x double> %v } -declare <32 x double> @llvm.vp.fma.v32f64(<32 x double>, <32 x double>, <32 x double>, <32 x i1>, i32) - define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmacc-vp.ll index bc13e1d217a9b..99bdddcbc3253 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmacc-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.vp.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, <2 x i1>, i32) -declare <2 x half> @llvm.vp.fneg.v2f16(<2 x half>, <2 x i1>, i32) -declare <2 x half> @llvm.vp.merge.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) -declare <2 x half> @llvm.vp.select.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) - define <2 x half> @vfmacc_vv_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v2f16: ; CHECK: # %bb.0: @@ -115,11 +110,6 @@ define <2 x half> @vfmacc_vf_v2f16_commute_ta(<2 x half> %va, half %b, <2 x half ret <2 x half> %u } -declare <4 x half> @llvm.vp.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x i1>, i32) -declare <4 x half> @llvm.vp.fneg.v4f16(<4 x half>, <4 x i1>, i32) -declare <4 x half> @llvm.vp.merge.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) -declare <4 x half> @llvm.vp.select.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) - define <4 x half> @vfmacc_vv_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v4f16: ; CHECK: # %bb.0: @@ -226,11 +216,6 @@ define <4 x half> @vfmacc_vf_v4f16_commute_ta(<4 x half> %va, half %b, <4 x half ret <4 x half> %u } -declare <8 x half> @llvm.vp.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x i1>, i32) -declare <8 x half> @llvm.vp.fneg.v8f16(<8 x half>, <8 x i1>, i32) -declare <8 x half> @llvm.vp.merge.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) -declare <8 x half> @llvm.vp.select.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) - define <8 x half> @vfmacc_vv_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v8f16: ; CHECK: # %bb.0: @@ -337,11 +322,6 @@ define <8 x half> @vfmacc_vf_v8f16_commute_ta(<8 x half> %va, half %b, <8 x half ret <8 x half> %u } -declare <16 x half> @llvm.vp.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, <16 x i1>, i32) -declare <16 x half> @llvm.vp.fneg.v16f16(<16 x half>, <16 x i1>, i32) -declare <16 x half> @llvm.vp.merge.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) -declare <16 x half> @llvm.vp.select.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) - define <16 x half> @vfmacc_vv_v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v16f16: ; CHECK: # %bb.0: @@ -448,11 +428,6 @@ define <16 x half> @vfmacc_vf_v16f16_commute_ta(<16 x half> %va, half %b, <16 x ret <16 x half> %u } -declare <32 x half> @llvm.vp.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, <32 x i1>, i32) -declare <32 x half> @llvm.vp.fneg.v32f16(<32 x half>, <32 x i1>, i32) -declare <32 x half> @llvm.vp.merge.v32f16(<32 x i1>, <32 x half>, <32 x half>, i32) -declare <32 x half> @llvm.vp.select.v32f16(<32 x i1>, <32 x half>, <32 x half>, i32) - define <32 x half> @vfmacc_vv_v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v32f16: ; CHECK: # %bb.0: @@ -559,11 +534,6 @@ define <32 x half> @vfmacc_vf_v32f16_commute_ta(<32 x half> %va, half %b, <32 x ret <32 x half> %u } -declare <2 x float> @llvm.vp.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, <2 x i1>, i32) -declare <2 x float> @llvm.vp.fneg.v2f32(<2 x float>, <2 x i1>, i32) -declare <2 x float> @llvm.vp.merge.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) -declare <2 x float> @llvm.vp.select.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) - define <2 x float> @vfmacc_vv_v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v2f32: ; CHECK: # %bb.0: @@ -670,11 +640,6 @@ define <2 x float> @vfmacc_vf_v2f32_commute_ta(<2 x float> %va, float %b, <2 x f ret <2 x float> %u } -declare <4 x float> @llvm.vp.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32) -declare <4 x float> @llvm.vp.fneg.v4f32(<4 x float>, <4 x i1>, i32) -declare <4 x float> @llvm.vp.merge.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) -declare <4 x float> @llvm.vp.select.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) - define <4 x float> @vfmacc_vv_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v4f32: ; CHECK: # %bb.0: @@ -781,11 +746,6 @@ define <4 x float> @vfmacc_vf_v4f32_commute_ta(<4 x float> %va, float %b, <4 x f ret <4 x float> %u } -declare <8 x float> @llvm.vp.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, <8 x i1>, i32) -declare <8 x float> @llvm.vp.fneg.v8f32(<8 x float>, <8 x i1>, i32) -declare <8 x float> @llvm.vp.merge.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) -declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) - define <8 x float> @vfmacc_vv_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v8f32: ; CHECK: # %bb.0: @@ -892,11 +852,6 @@ define <8 x float> @vfmacc_vf_v8f32_commute_ta(<8 x float> %va, float %b, <8 x f ret <8 x float> %u } -declare <16 x float> @llvm.vp.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, <16 x i1>, i32) -declare <16 x float> @llvm.vp.fneg.v16f32(<16 x float>, <16 x i1>, i32) -declare <16 x float> @llvm.vp.merge.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) -declare <16 x float> @llvm.vp.select.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) - define <16 x float> @vfmacc_vv_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v16f32: ; CHECK: # %bb.0: @@ -1003,11 +958,6 @@ define <16 x float> @vfmacc_vf_v16f32_commute_ta(<16 x float> %va, float %b, <16 ret <16 x float> %u } -declare <2 x double> @llvm.vp.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, <2 x i1>, i32) -declare <2 x double> @llvm.vp.fneg.v2f64(<2 x double>, <2 x i1>, i32) -declare <2 x double> @llvm.vp.merge.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) -declare <2 x double> @llvm.vp.select.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) - define <2 x double> @vfmacc_vv_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v2f64: ; CHECK: # %bb.0: @@ -1114,11 +1064,6 @@ define <2 x double> @vfmacc_vf_v2f64_commute_ta(<2 x double> %va, double %b, <2 ret <2 x double> %u } -declare <4 x double> @llvm.vp.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, <4 x i1>, i32) -declare <4 x double> @llvm.vp.fneg.v4f64(<4 x double>, <4 x i1>, i32) -declare <4 x double> @llvm.vp.merge.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) -declare <4 x double> @llvm.vp.select.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) - define <4 x double> @vfmacc_vv_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v4f64: ; CHECK: # %bb.0: @@ -1225,11 +1170,6 @@ define <4 x double> @vfmacc_vf_v4f64_commute_ta(<4 x double> %va, double %b, <4 ret <4 x double> %u } -declare <8 x double> @llvm.vp.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, <8 x i1>, i32) -declare <8 x double> @llvm.vp.fneg.v8f64(<8 x double>, <8 x i1>, i32) -declare <8 x double> @llvm.vp.merge.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) -declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) - define <8 x double> @vfmacc_vv_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmadd-constrained-sdnode.ll index b8f3f0fef0419..bf7336e58ffc0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmadd-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmadd-constrained-sdnode.ll @@ -7,8 +7,6 @@ ; This tests a mix of vfmacc and vfmadd by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, metadata, metadata) - define <2 x half> @vfmadd_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v2f16: ; CHECK: # %bb.0: @@ -31,8 +29,6 @@ define <2 x half> @vfmadd_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) stri ret <2 x half> %vd } -declare <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, metadata, metadata) - define <4 x half> @vfmadd_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v4f16: ; CHECK: # %bb.0: @@ -55,8 +51,6 @@ define <4 x half> @vfmadd_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) stri ret <4 x half> %vd } -declare <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, metadata, metadata) - define <8 x half> @vfmadd_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v8f16: ; CHECK: # %bb.0: @@ -79,8 +73,6 @@ define <8 x half> @vfmadd_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) stri ret <8 x half> %vd } -declare <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, metadata, metadata) - define <16 x half> @vfmadd_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v16f16: ; CHECK: # %bb.0: @@ -103,8 +95,6 @@ define <16 x half> @vfmadd_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c) ret <16 x half> %vd } -declare <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, metadata, metadata) - define <32 x half> @vfmadd_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v32f16: ; CHECK: # %bb.0: @@ -129,8 +119,6 @@ define <32 x half> @vfmadd_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c) ret <32 x half> %vd } -declare <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, metadata, metadata) - define <2 x float> @vfmadd_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v2f32: ; CHECK: # %bb.0: @@ -153,8 +141,6 @@ define <2 x float> @vfmadd_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c) ret <2 x float> %vd } -declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata) - define <4 x float> @vfmadd_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v4f32: ; CHECK: # %bb.0: @@ -177,8 +163,6 @@ define <4 x float> @vfmadd_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c) ret <4 x float> %vd } -declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata) - define <8 x float> @vfmadd_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v8f32: ; CHECK: # %bb.0: @@ -201,8 +185,6 @@ define <8 x float> @vfmadd_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c) ret <8 x float> %vd } -declare <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, metadata, metadata) - define <16 x float> @vfmadd_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v16f32: ; CHECK: # %bb.0: @@ -225,8 +207,6 @@ define <16 x float> @vfmadd_vf_v16f32(<16 x float> %va, <16 x float> %vb, float ret <16 x float> %vd } -declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata) - define <2 x double> @vfmadd_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v2f64: ; CHECK: # %bb.0: @@ -249,8 +229,6 @@ define <2 x double> @vfmadd_vf_v2f64(<2 x double> %va, <2 x double> %vb, double ret <2 x double> %vd } -declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata) - define <4 x double> @vfmadd_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v4f64: ; CHECK: # %bb.0: @@ -273,8 +251,6 @@ define <4 x double> @vfmadd_vf_v4f64(<4 x double> %va, <4 x double> %vb, double ret <4 x double> %vd } -declare <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, metadata, metadata) - define <8 x double> @vfmadd_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll index c736973dd0706..403d0b8d57940 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.maxnum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfmax_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v2f16: ; ZVFH: # %bb.0: @@ -52,8 +50,6 @@ define <2 x half> @vfmax_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 z ret <2 x half> %v } -declare <4 x half> @llvm.vp.maxnum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfmax_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v4f16: ; ZVFH: # %bb.0: @@ -96,8 +92,6 @@ define <4 x half> @vfmax_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 z ret <4 x half> %v } -declare <8 x half> @llvm.vp.maxnum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v8f16: ; ZVFH: # %bb.0: @@ -140,8 +134,6 @@ define <8 x half> @vfmax_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 z ret <8 x half> %v } -declare <16 x half> @llvm.vp.maxnum.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v16f16: ; ZVFH: # %bb.0: @@ -184,8 +176,6 @@ define <16 x half> @vfmax_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i ret <16 x half> %v } -declare <2 x float> @llvm.vp.maxnum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfmax_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v2f32: ; CHECK: # %bb.0: @@ -206,8 +196,6 @@ define <2 x float> @vfmax_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i3 ret <2 x float> %v } -declare <4 x float> @llvm.vp.maxnum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfmax_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v4f32: ; CHECK: # %bb.0: @@ -228,8 +216,6 @@ define <4 x float> @vfmax_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i3 ret <4 x float> %v } -declare <8 x float> @llvm.vp.maxnum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfmax_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v8f32: ; CHECK: # %bb.0: @@ -250,8 +236,6 @@ define <8 x float> @vfmax_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i3 ret <8 x float> %v } -declare <16 x float> @llvm.vp.maxnum.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfmax_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v16f32: ; CHECK: # %bb.0: @@ -272,8 +256,6 @@ define <16 x float> @vfmax_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb ret <16 x float> %v } -declare <2 x double> @llvm.vp.maxnum.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfmax_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v2f64: ; CHECK: # %bb.0: @@ -294,8 +276,6 @@ define <2 x double> @vfmax_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, ret <2 x double> %v } -declare <4 x double> @llvm.vp.maxnum.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfmax_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v4f64: ; CHECK: # %bb.0: @@ -316,8 +296,6 @@ define <4 x double> @vfmax_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, ret <4 x double> %v } -declare <8 x double> @llvm.vp.maxnum.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfmax_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v8f64: ; CHECK: # %bb.0: @@ -338,8 +316,6 @@ define <8 x double> @vfmax_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, ret <8 x double> %v } -declare <15 x double> @llvm.vp.maxnum.v15f64(<15 x double>, <15 x double>, <15 x i1>, i32) - define <15 x double> @vfmax_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v15f64: ; CHECK: # %bb.0: @@ -360,8 +336,6 @@ define <15 x double> @vfmax_vv_v15f64_unmasked(<15 x double> %va, <15 x double> ret <15 x double> %v } -declare <16 x double> @llvm.vp.maxnum.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfmax_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v16f64: ; CHECK: # %bb.0: @@ -382,8 +356,6 @@ define <16 x double> @vfmax_vv_v16f64_unmasked(<16 x double> %va, <16 x double> ret <16 x double> %v } -declare <32 x double> @llvm.vp.maxnum.v32f64(<32 x double>, <32 x double>, <32 x i1>, i32) - define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll index c37df892de442..44362efa1fe83 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.maxnum.v2f16(<2 x half>, <2 x half>) - define <2 x half> @vfmax_v2f16_vv(<2 x half> %a, <2 x half> %b) { ; ZVFH-LABEL: vfmax_v2f16_vv: ; ZVFH: # %bb.0: @@ -81,8 +79,6 @@ define <2 x half> @vfmax_v2f16_fv(<2 x half> %a, half %b) { ret <2 x half> %v } -declare <4 x half> @llvm.maxnum.v4f16(<4 x half>, <4 x half>) - define <4 x half> @vfmax_v4f16_vv(<4 x half> %a, <4 x half> %b) { ; ZVFH-LABEL: vfmax_v4f16_vv: ; ZVFH: # %bb.0: @@ -154,8 +150,6 @@ define <4 x half> @vfmax_v4f16_fv(<4 x half> %a, half %b) { ret <4 x half> %v } -declare <8 x half> @llvm.maxnum.v8f16(<8 x half>, <8 x half>) - define <8 x half> @vfmax_v8f16_vv(<8 x half> %a, <8 x half> %b) { ; ZVFH-LABEL: vfmax_v8f16_vv: ; ZVFH: # %bb.0: @@ -227,8 +221,6 @@ define <8 x half> @vfmax_v8f16_fv(<8 x half> %a, half %b) { ret <8 x half> %v } -declare <16 x half> @llvm.maxnum.v16f16(<16 x half>, <16 x half>) - define <16 x half> @vfmax_v16f16_vv(<16 x half> %a, <16 x half> %b) { ; ZVFH-LABEL: vfmax_v16f16_vv: ; ZVFH: # %bb.0: @@ -300,8 +292,6 @@ define <16 x half> @vfmax_v16f16_fv(<16 x half> %a, half %b) { ret <16 x half> %v } -declare <2 x float> @llvm.maxnum.v2f32(<2 x float>, <2 x float>) - define <2 x float> @vfmax_v2f32_vv(<2 x float> %a, <2 x float> %b) { ; CHECK-LABEL: vfmax_v2f32_vv: ; CHECK: # %bb.0: @@ -336,8 +326,6 @@ define <2 x float> @vfmax_v2f32_fv(<2 x float> %a, float %b) { ret <2 x float> %v } -declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>) - define <4 x float> @vfmax_v4f32_vv(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: vfmax_v4f32_vv: ; CHECK: # %bb.0: @@ -372,8 +360,6 @@ define <4 x float> @vfmax_v4f32_fv(<4 x float> %a, float %b) { ret <4 x float> %v } -declare <8 x float> @llvm.maxnum.v8f32(<8 x float>, <8 x float>) - define <8 x float> @vfmax_v8f32_vv(<8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: vfmax_v8f32_vv: ; CHECK: # %bb.0: @@ -408,8 +394,6 @@ define <8 x float> @vfmax_v8f32_fv(<8 x float> %a, float %b) { ret <8 x float> %v } -declare <16 x float> @llvm.maxnum.v16f32(<16 x float>, <16 x float>) - define <16 x float> @vfmax_v16f32_vv(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: vfmax_v16f32_vv: ; CHECK: # %bb.0: @@ -444,8 +428,6 @@ define <16 x float> @vfmax_v16f32_fv(<16 x float> %a, float %b) { ret <16 x float> %v } -declare <2 x double> @llvm.maxnum.v2f64(<2 x double>, <2 x double>) - define <2 x double> @vfmax_v2f64_vv(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: vfmax_v2f64_vv: ; CHECK: # %bb.0: @@ -480,8 +462,6 @@ define <2 x double> @vfmax_v2f64_fv(<2 x double> %a, double %b) { ret <2 x double> %v } -declare <4 x double> @llvm.maxnum.v4f64(<4 x double>, <4 x double>) - define <4 x double> @vfmax_v4f64_vv(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: vfmax_v4f64_vv: ; CHECK: # %bb.0: @@ -516,8 +496,6 @@ define <4 x double> @vfmax_v4f64_fv(<4 x double> %a, double %b) { ret <4 x double> %v } -declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>) - define <8 x double> @vfmax_v8f64_vv(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: vfmax_v8f64_vv: ; CHECK: # %bb.0: @@ -552,8 +530,6 @@ define <8 x double> @vfmax_v8f64_fv(<8 x double> %a, double %b) { ret <8 x double> %v } -declare <16 x double> @llvm.maxnum.v16f64(<16 x double>, <16 x double>) - define <16 x double> @vfmax_v16f64_vv(<16 x double> %a, <16 x double> %b) { ; CHECK-LABEL: vfmax_v16f64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll index c4a143de5cff1..56f7a8d48c5a1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.minnum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v2f16: ; ZVFH: # %bb.0: @@ -52,8 +50,6 @@ define <2 x half> @vfmin_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 z ret <2 x half> %v } -declare <4 x half> @llvm.vp.minnum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v4f16: ; ZVFH: # %bb.0: @@ -96,8 +92,6 @@ define <4 x half> @vfmin_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 z ret <4 x half> %v } -declare <8 x half> @llvm.vp.minnum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v8f16: ; ZVFH: # %bb.0: @@ -140,8 +134,6 @@ define <8 x half> @vfmin_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 z ret <8 x half> %v } -declare <16 x half> @llvm.vp.minnum.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v16f16: ; ZVFH: # %bb.0: @@ -184,8 +176,6 @@ define <16 x half> @vfmin_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i ret <16 x half> %v } -declare <2 x float> @llvm.vp.minnum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfmin_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v2f32: ; CHECK: # %bb.0: @@ -206,8 +196,6 @@ define <2 x float> @vfmin_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i3 ret <2 x float> %v } -declare <4 x float> @llvm.vp.minnum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v4f32: ; CHECK: # %bb.0: @@ -228,8 +216,6 @@ define <4 x float> @vfmin_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i3 ret <4 x float> %v } -declare <8 x float> @llvm.vp.minnum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfmin_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v8f32: ; CHECK: # %bb.0: @@ -250,8 +236,6 @@ define <8 x float> @vfmin_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i3 ret <8 x float> %v } -declare <16 x float> @llvm.vp.minnum.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfmin_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v16f32: ; CHECK: # %bb.0: @@ -272,8 +256,6 @@ define <16 x float> @vfmin_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb ret <16 x float> %v } -declare <2 x double> @llvm.vp.minnum.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfmin_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v2f64: ; CHECK: # %bb.0: @@ -294,8 +276,6 @@ define <2 x double> @vfmin_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, ret <2 x double> %v } -declare <4 x double> @llvm.vp.minnum.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfmin_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v4f64: ; CHECK: # %bb.0: @@ -316,8 +296,6 @@ define <4 x double> @vfmin_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, ret <4 x double> %v } -declare <8 x double> @llvm.vp.minnum.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfmin_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v8f64: ; CHECK: # %bb.0: @@ -338,8 +316,6 @@ define <8 x double> @vfmin_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, ret <8 x double> %v } -declare <15 x double> @llvm.vp.minnum.v15f64(<15 x double>, <15 x double>, <15 x i1>, i32) - define <15 x double> @vfmin_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v15f64: ; CHECK: # %bb.0: @@ -360,8 +336,6 @@ define <15 x double> @vfmin_vv_v15f64_unmasked(<15 x double> %va, <15 x double> ret <15 x double> %v } -declare <16 x double> @llvm.vp.minnum.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v16f64: ; CHECK: # %bb.0: @@ -382,8 +356,6 @@ define <16 x double> @vfmin_vv_v16f64_unmasked(<16 x double> %va, <16 x double> ret <16 x double> %v } -declare <32 x double> @llvm.vp.minnum.v32f64(<32 x double>, <32 x double>, <32 x i1>, i32) - define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll index 90afe36a36c0f..c9bb99d6cb3d6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.minnum.v2f16(<2 x half>, <2 x half>) - define <2 x half> @vfmin_v2f16_vv(<2 x half> %a, <2 x half> %b) { ; ZVFH-LABEL: vfmin_v2f16_vv: ; ZVFH: # %bb.0: @@ -81,8 +79,6 @@ define <2 x half> @vfmin_v2f16_fv(<2 x half> %a, half %b) { ret <2 x half> %v } -declare <4 x half> @llvm.minnum.v4f16(<4 x half>, <4 x half>) - define <4 x half> @vfmin_v4f16_vv(<4 x half> %a, <4 x half> %b) { ; ZVFH-LABEL: vfmin_v4f16_vv: ; ZVFH: # %bb.0: @@ -154,8 +150,6 @@ define <4 x half> @vfmin_v4f16_fv(<4 x half> %a, half %b) { ret <4 x half> %v } -declare <8 x half> @llvm.minnum.v8f16(<8 x half>, <8 x half>) - define <8 x half> @vfmin_v8f16_vv(<8 x half> %a, <8 x half> %b) { ; ZVFH-LABEL: vfmin_v8f16_vv: ; ZVFH: # %bb.0: @@ -227,8 +221,6 @@ define <8 x half> @vfmin_v8f16_fv(<8 x half> %a, half %b) { ret <8 x half> %v } -declare <16 x half> @llvm.minnum.v16f16(<16 x half>, <16 x half>) - define <16 x half> @vfmin_v16f16_vv(<16 x half> %a, <16 x half> %b) { ; ZVFH-LABEL: vfmin_v16f16_vv: ; ZVFH: # %bb.0: @@ -300,8 +292,6 @@ define <16 x half> @vfmin_v16f16_fv(<16 x half> %a, half %b) { ret <16 x half> %v } -declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>) - define <2 x float> @vfmin_v2f32_vv(<2 x float> %a, <2 x float> %b) { ; CHECK-LABEL: vfmin_v2f32_vv: ; CHECK: # %bb.0: @@ -336,8 +326,6 @@ define <2 x float> @vfmin_v2f32_fv(<2 x float> %a, float %b) { ret <2 x float> %v } -declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>) - define <4 x float> @vfmin_v4f32_vv(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: vfmin_v4f32_vv: ; CHECK: # %bb.0: @@ -372,8 +360,6 @@ define <4 x float> @vfmin_v4f32_fv(<4 x float> %a, float %b) { ret <4 x float> %v } -declare <8 x float> @llvm.minnum.v8f32(<8 x float>, <8 x float>) - define <8 x float> @vfmin_v8f32_vv(<8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: vfmin_v8f32_vv: ; CHECK: # %bb.0: @@ -408,8 +394,6 @@ define <8 x float> @vfmin_v8f32_fv(<8 x float> %a, float %b) { ret <8 x float> %v } -declare <16 x float> @llvm.minnum.v16f32(<16 x float>, <16 x float>) - define <16 x float> @vfmin_v16f32_vv(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: vfmin_v16f32_vv: ; CHECK: # %bb.0: @@ -444,8 +428,6 @@ define <16 x float> @vfmin_v16f32_fv(<16 x float> %a, float %b) { ret <16 x float> %v } -declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>) - define <2 x double> @vfmin_v2f64_vv(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: vfmin_v2f64_vv: ; CHECK: # %bb.0: @@ -480,8 +462,6 @@ define <2 x double> @vfmin_v2f64_fv(<2 x double> %a, double %b) { ret <2 x double> %v } -declare <4 x double> @llvm.minnum.v4f64(<4 x double>, <4 x double>) - define <4 x double> @vfmin_v4f64_vv(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: vfmin_v4f64_vv: ; CHECK: # %bb.0: @@ -516,8 +496,6 @@ define <4 x double> @vfmin_v4f64_fv(<4 x double> %a, double %b) { ret <4 x double> %v } -declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>) - define <8 x double> @vfmin_v8f64_vv(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: vfmin_v8f64_vv: ; CHECK: # %bb.0: @@ -552,8 +530,6 @@ define <8 x double> @vfmin_v8f64_fv(<8 x double> %a, double %b) { ret <8 x double> %v } -declare <16 x double> @llvm.minnum.v16f64(<16 x double>, <16 x double>) - define <16 x double> @vfmin_v16f64_vv(<16 x double> %a, <16 x double> %b) { ; CHECK-LABEL: vfmin_v16f64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsac-vp.ll index 99fc035235671..f8478c13b3aee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsac-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.vp.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, <2 x i1>, i32) -declare <2 x half> @llvm.vp.fneg.v2f16(<2 x half>, <2 x i1>, i32) -declare <2 x half> @llvm.vp.merge.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) -declare <2 x half> @llvm.vp.select.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) - define <2 x half> @vfmsac_vv_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v2f16: ; CHECK: # %bb.0: @@ -123,11 +118,6 @@ define <2 x half> @vfmsac_vf_v2f16_commute_ta(<2 x half> %a, half %b, <2 x half> ret <2 x half> %u } -declare <4 x half> @llvm.vp.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x i1>, i32) -declare <4 x half> @llvm.vp.fneg.v4f16(<4 x half>, <4 x i1>, i32) -declare <4 x half> @llvm.vp.merge.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) -declare <4 x half> @llvm.vp.select.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) - define <4 x half> @vfmsac_vv_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v4f16: ; CHECK: # %bb.0: @@ -242,11 +232,6 @@ define <4 x half> @vfmsac_vf_v4f16_commute_ta(<4 x half> %a, half %b, <4 x half> ret <4 x half> %u } -declare <8 x half> @llvm.vp.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x i1>, i32) -declare <8 x half> @llvm.vp.fneg.v8f16(<8 x half>, <8 x i1>, i32) -declare <8 x half> @llvm.vp.merge.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) -declare <8 x half> @llvm.vp.select.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) - define <8 x half> @vfmsac_vv_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v8f16: ; CHECK: # %bb.0: @@ -361,11 +346,6 @@ define <8 x half> @vfmsac_vf_v8f16_commute_ta(<8 x half> %a, half %b, <8 x half> ret <8 x half> %u } -declare <16 x half> @llvm.vp.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, <16 x i1>, i32) -declare <16 x half> @llvm.vp.fneg.v16f16(<16 x half>, <16 x i1>, i32) -declare <16 x half> @llvm.vp.merge.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) -declare <16 x half> @llvm.vp.select.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) - define <16 x half> @vfmsac_vv_v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v16f16: ; CHECK: # %bb.0: @@ -480,11 +460,6 @@ define <16 x half> @vfmsac_vf_v16f16_commute_ta(<16 x half> %a, half %b, <16 x h ret <16 x half> %u } -declare <32 x half> @llvm.vp.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, <32 x i1>, i32) -declare <32 x half> @llvm.vp.fneg.v32f16(<32 x half>, <32 x i1>, i32) -declare <32 x half> @llvm.vp.merge.v32f16(<32 x i1>, <32 x half>, <32 x half>, i32) -declare <32 x half> @llvm.vp.select.v32f16(<32 x i1>, <32 x half>, <32 x half>, i32) - define <32 x half> @vfmsac_vv_v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v32f16: ; CHECK: # %bb.0: @@ -599,11 +574,6 @@ define <32 x half> @vfmsac_vf_v32f16_commute_ta(<32 x half> %a, half %b, <32 x h ret <32 x half> %u } -declare <2 x float> @llvm.vp.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, <2 x i1>, i32) -declare <2 x float> @llvm.vp.fneg.v2f32(<2 x float>, <2 x i1>, i32) -declare <2 x float> @llvm.vp.merge.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) -declare <2 x float> @llvm.vp.select.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) - define <2 x float> @vfmsac_vv_v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v2f32: ; CHECK: # %bb.0: @@ -718,11 +688,6 @@ define <2 x float> @vfmsac_vf_v2f32_commute_ta(<2 x float> %a, float %b, <2 x fl ret <2 x float> %u } -declare <4 x float> @llvm.vp.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32) -declare <4 x float> @llvm.vp.fneg.v4f32(<4 x float>, <4 x i1>, i32) -declare <4 x float> @llvm.vp.merge.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) -declare <4 x float> @llvm.vp.select.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) - define <4 x float> @vfmsac_vv_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v4f32: ; CHECK: # %bb.0: @@ -837,11 +802,6 @@ define <4 x float> @vfmsac_vf_v4f32_commute_ta(<4 x float> %a, float %b, <4 x fl ret <4 x float> %u } -declare <8 x float> @llvm.vp.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, <8 x i1>, i32) -declare <8 x float> @llvm.vp.fneg.v8f32(<8 x float>, <8 x i1>, i32) -declare <8 x float> @llvm.vp.merge.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) -declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) - define <8 x float> @vfmsac_vv_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v8f32: ; CHECK: # %bb.0: @@ -956,11 +916,6 @@ define <8 x float> @vfmsac_vf_v8f32_commute_ta(<8 x float> %a, float %b, <8 x fl ret <8 x float> %u } -declare <16 x float> @llvm.vp.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, <16 x i1>, i32) -declare <16 x float> @llvm.vp.fneg.v16f32(<16 x float>, <16 x i1>, i32) -declare <16 x float> @llvm.vp.merge.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) -declare <16 x float> @llvm.vp.select.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) - define <16 x float> @vfmsac_vv_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v16f32: ; CHECK: # %bb.0: @@ -1075,11 +1030,6 @@ define <16 x float> @vfmsac_vf_v16f32_commute_ta(<16 x float> %a, float %b, <16 ret <16 x float> %u } -declare <2 x double> @llvm.vp.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, <2 x i1>, i32) -declare <2 x double> @llvm.vp.fneg.v2f64(<2 x double>, <2 x i1>, i32) -declare <2 x double> @llvm.vp.merge.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) -declare <2 x double> @llvm.vp.select.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) - define <2 x double> @vfmsac_vv_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v2f64: ; CHECK: # %bb.0: @@ -1194,11 +1144,6 @@ define <2 x double> @vfmsac_vf_v2f64_commute_ta(<2 x double> %a, double %b, <2 x ret <2 x double> %u } -declare <4 x double> @llvm.vp.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, <4 x i1>, i32) -declare <4 x double> @llvm.vp.fneg.v4f64(<4 x double>, <4 x i1>, i32) -declare <4 x double> @llvm.vp.merge.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) -declare <4 x double> @llvm.vp.select.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) - define <4 x double> @vfmsac_vv_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v4f64: ; CHECK: # %bb.0: @@ -1313,11 +1258,6 @@ define <4 x double> @vfmsac_vf_v4f64_commute_ta(<4 x double> %a, double %b, <4 x ret <4 x double> %u } -declare <8 x double> @llvm.vp.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, <8 x i1>, i32) -declare <8 x double> @llvm.vp.fneg.v8f64(<8 x double>, <8 x i1>, i32) -declare <8 x double> @llvm.vp.merge.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) -declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) - define <8 x double> @vfmsac_vv_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsub-constrained-sdnode.ll index 268494bf337e1..fd733c8dac518 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsub-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsub-constrained-sdnode.ll @@ -7,8 +7,6 @@ ; This tests a mix of vfmsac and vfmsub by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, metadata, metadata) - define <2 x half> @vfmsub_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v2f16: ; CHECK: # %bb.0: @@ -33,8 +31,6 @@ define <2 x half> @vfmsub_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) stri ret <2 x half> %vd } -declare <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, metadata, metadata) - define <4 x half> @vfmsub_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v4f16: ; CHECK: # %bb.0: @@ -59,8 +55,6 @@ define <4 x half> @vfmsub_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) stri ret <4 x half> %vd } -declare <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, metadata, metadata) - define <8 x half> @vfmsub_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v8f16: ; CHECK: # %bb.0: @@ -85,8 +79,6 @@ define <8 x half> @vfmsub_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) stri ret <8 x half> %vd } -declare <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, metadata, metadata) - define <16 x half> @vfmsub_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v16f16: ; CHECK: # %bb.0: @@ -111,8 +103,6 @@ define <16 x half> @vfmsub_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c) ret <16 x half> %vd } -declare <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, metadata, metadata) - define <32 x half> @vfmsub_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v32f16: ; CHECK: # %bb.0: @@ -139,8 +129,6 @@ define <32 x half> @vfmsub_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c) ret <32 x half> %vd } -declare <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, metadata, metadata) - define <2 x float> @vfmsub_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v2f32: ; CHECK: # %bb.0: @@ -165,8 +153,6 @@ define <2 x float> @vfmsub_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c) ret <2 x float> %vd } -declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata) - define <4 x float> @vfmsub_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v4f32: ; CHECK: # %bb.0: @@ -191,8 +177,6 @@ define <4 x float> @vfmsub_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c) ret <4 x float> %vd } -declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata) - define <8 x float> @vfmsub_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v8f32: ; CHECK: # %bb.0: @@ -217,8 +201,6 @@ define <8 x float> @vfmsub_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c) ret <8 x float> %vd } -declare <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, metadata, metadata) - define <16 x float> @vfmsub_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v16f32: ; CHECK: # %bb.0: @@ -243,8 +225,6 @@ define <16 x float> @vfmsub_vf_v16f32(<16 x float> %va, <16 x float> %vb, float ret <16 x float> %vd } -declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata) - define <2 x double> @vfmsub_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v2f64: ; CHECK: # %bb.0: @@ -269,8 +249,6 @@ define <2 x double> @vfmsub_vf_v2f64(<2 x double> %va, <2 x double> %vb, double ret <2 x double> %vd } -declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata) - define <4 x double> @vfmsub_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v4f64: ; CHECK: # %bb.0: @@ -295,8 +273,6 @@ define <4 x double> @vfmsub_vf_v4f64(<4 x double> %va, <4 x double> %vb, double ret <4 x double> %vd } -declare <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, metadata, metadata) - define <8 x double> @vfmsub_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll index c8148a5e8d49c..f6b2327f5a41c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <1 x half> @llvm.experimental.constrained.fmul.v1f16(<1 x half>, <1 x half>, metadata, metadata) define <1 x half> @vfmul_vv_v1f16(<1 x half> %va, <1 x half> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v1f16: ; CHECK: # %bb.0: # %entry @@ -28,7 +27,6 @@ define <1 x half> @vfmul_vf_v1f16(<1 x half> %va, half %b) strictfp { ret <1 x half> %vc } -declare <2 x half> @llvm.experimental.constrained.fmul.v2f16(<2 x half>, <2 x half>, metadata, metadata) define <2 x half> @vfmul_vv_v2f16(<2 x half> %va, <2 x half> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v2f16: ; CHECK: # %bb.0: # %entry @@ -52,7 +50,6 @@ define <2 x half> @vfmul_vf_v2f16(<2 x half> %va, half %b) strictfp { ret <2 x half> %vc } -declare <4 x half> @llvm.experimental.constrained.fmul.v4f16(<4 x half>, <4 x half>, metadata, metadata) define <4 x half> @vfmul_vv_v4f16(<4 x half> %va, <4 x half> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v4f16: ; CHECK: # %bb.0: # %entry @@ -76,7 +73,6 @@ define <4 x half> @vfmul_vf_v4f16(<4 x half> %va, half %b) strictfp { ret <4 x half> %vc } -declare <8 x half> @llvm.experimental.constrained.fmul.v8f16(<8 x half>, <8 x half>, metadata, metadata) define <8 x half> @vfmul_vv_v8f16(<8 x half> %va, <8 x half> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v8f16: ; CHECK: # %bb.0: # %entry @@ -100,7 +96,6 @@ define <8 x half> @vfmul_vf_v8f16(<8 x half> %va, half %b) strictfp { ret <8 x half> %vc } -declare <16 x half> @llvm.experimental.constrained.fmul.v16f16(<16 x half>, <16 x half>, metadata, metadata) define <16 x half> @vfmul_vv_v16f16(<16 x half> %va, <16 x half> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v16f16: ; CHECK: # %bb.0: # %entry @@ -124,7 +119,6 @@ define <16 x half> @vfmul_vf_v16f16(<16 x half> %va, half %b) strictfp { ret <16 x half> %vc } -declare <32 x half> @llvm.experimental.constrained.fmul.v32f16(<32 x half>, <32 x half>, metadata, metadata) define <32 x half> @vfmul_vv_v32f16(<32 x half> %va, <32 x half> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v32f16: ; CHECK: # %bb.0: # %entry @@ -150,7 +144,6 @@ define <32 x half> @vfmul_vf_v32f16(<32 x half> %va, half %b) strictfp { ret <32 x half> %vc } -declare <1 x float> @llvm.experimental.constrained.fmul.v1f32(<1 x float>, <1 x float>, metadata, metadata) define <1 x float> @vfmul_vv_v1f32(<1 x float> %va, <1 x float> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v1f32: ; CHECK: # %bb.0: # %entry @@ -174,7 +167,6 @@ define <1 x float> @vfmul_vf_v1f32(<1 x float> %va, float %b) strictfp { ret <1 x float> %vc } -declare <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float>, <2 x float>, metadata, metadata) define <2 x float> @vfmul_vv_v2f32(<2 x float> %va, <2 x float> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v2f32: ; CHECK: # %bb.0: # %entry @@ -198,7 +190,6 @@ define <2 x float> @vfmul_vf_v2f32(<2 x float> %va, float %b) strictfp { ret <2 x float> %vc } -declare <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float>, <4 x float>, metadata, metadata) define <4 x float> @vfmul_vv_v4f32(<4 x float> %va, <4 x float> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v4f32: ; CHECK: # %bb.0: # %entry @@ -222,7 +213,6 @@ define <4 x float> @vfmul_vf_v4f32(<4 x float> %va, float %b) strictfp { ret <4 x float> %vc } -declare <8 x float> @llvm.experimental.constrained.fmul.v8f32(<8 x float>, <8 x float>, metadata, metadata) define <8 x float> @vfmul_vv_v8f32(<8 x float> %va, <8 x float> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v8f32: ; CHECK: # %bb.0: # %entry @@ -246,7 +236,6 @@ define <8 x float> @vfmul_vf_v8f32(<8 x float> %va, float %b) strictfp { ret <8 x float> %vc } -declare <16 x float> @llvm.experimental.constrained.fmul.v16f32(<16 x float>, <16 x float>, metadata, metadata) define <16 x float> @vfmul_vv_v16f32(<16 x float> %va, <16 x float> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v16f32: ; CHECK: # %bb.0: # %entry @@ -270,7 +259,6 @@ define <16 x float> @vfmul_vf_v16f32(<16 x float> %va, float %b) strictfp { ret <16 x float> %vc } -declare <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double>, <1 x double>, metadata, metadata) define <1 x double> @vfmul_vv_v1f64(<1 x double> %va, <1 x double> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v1f64: ; CHECK: # %bb.0: # %entry @@ -294,7 +282,6 @@ define <1 x double> @vfmul_vf_v1f64(<1 x double> %va, double %b) strictfp { ret <1 x double> %vc } -declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2 x double>, metadata, metadata) define <2 x double> @vfmul_vv_v2f64(<2 x double> %va, <2 x double> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v2f64: ; CHECK: # %bb.0: # %entry @@ -318,7 +305,6 @@ define <2 x double> @vfmul_vf_v2f64(<2 x double> %va, double %b) strictfp { ret <2 x double> %vc } -declare <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double>, <4 x double>, metadata, metadata) define <4 x double> @vfmul_vv_v4f64(<4 x double> %va, <4 x double> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v4f64: ; CHECK: # %bb.0: # %entry @@ -342,7 +328,6 @@ define <4 x double> @vfmul_vf_v4f64(<4 x double> %va, double %b) strictfp { ret <4 x double> %vc } -declare <8 x double> @llvm.experimental.constrained.fmul.v8f64(<8 x double>, <8 x double>, metadata, metadata) define <8 x double> @vfmul_vv_v8f64(<8 x double> %va, <8 x double> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll index 3c0819e549552..167327fdbd571 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.fmul.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfmul_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_v2f16: ; ZVFH: # %bb.0: @@ -104,8 +102,6 @@ define <2 x half> @vfmul_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext ret <2 x half> %v } -declare <3 x half> @llvm.vp.fmul.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32) - define <3 x half> @vfmul_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_v3f16: ; ZVFH: # %bb.0: @@ -127,8 +123,6 @@ define <3 x half> @vfmul_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i3 ret <3 x half> %v } -declare <4 x half> @llvm.vp.fmul.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfmul_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_v4f16: ; ZVFH: # %bb.0: @@ -223,8 +217,6 @@ define <4 x half> @vfmul_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext ret <4 x half> %v } -declare <8 x half> @llvm.vp.fmul.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfmul_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_v8f16: ; ZVFH: # %bb.0: @@ -319,8 +311,6 @@ define <8 x half> @vfmul_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext ret <8 x half> %v } -declare <16 x half> @llvm.vp.fmul.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfmul_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_v16f16: ; ZVFH: # %bb.0: @@ -415,8 +405,6 @@ define <16 x half> @vfmul_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroe ret <16 x half> %v } -declare <2 x float> @llvm.vp.fmul.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfmul_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v2f32: ; CHECK: # %bb.0: @@ -461,8 +449,6 @@ define <2 x float> @vfmul_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroe ret <2 x float> %v } -declare <4 x float> @llvm.vp.fmul.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfmul_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v4f32: ; CHECK: # %bb.0: @@ -507,8 +493,6 @@ define <4 x float> @vfmul_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroe ret <4 x float> %v } -declare <8 x float> @llvm.vp.fmul.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfmul_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v8f32: ; CHECK: # %bb.0: @@ -553,8 +537,6 @@ define <8 x float> @vfmul_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroe ret <8 x float> %v } -declare <16 x float> @llvm.vp.fmul.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfmul_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v16f32: ; CHECK: # %bb.0: @@ -599,8 +581,6 @@ define <16 x float> @vfmul_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 ze ret <16 x float> %v } -declare <2 x double> @llvm.vp.fmul.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfmul_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v2f64: ; CHECK: # %bb.0: @@ -645,8 +625,6 @@ define <2 x double> @vfmul_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 ze ret <2 x double> %v } -declare <4 x double> @llvm.vp.fmul.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfmul_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v4f64: ; CHECK: # %bb.0: @@ -691,8 +669,6 @@ define <4 x double> @vfmul_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 ze ret <4 x double> %v } -declare <8 x double> @llvm.vp.fmul.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfmul_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v8f64: ; CHECK: # %bb.0: @@ -737,8 +713,6 @@ define <8 x double> @vfmul_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 ze ret <8 x double> %v } -declare <16 x double> @llvm.vp.fmul.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfmul_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll index cc911d06d8d58..a9857880b5942 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.vp.fmuladd.v2f16(<2 x half>, <2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfma_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f16: ; CHECK: # %bb.0: @@ -51,8 +49,6 @@ define <2 x half> @vfma_vf_v2f16_unmasked(<2 x half> %va, half %b, <2 x half> %v ret <2 x half> %v } -declare <4 x half> @llvm.vp.fmuladd.v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfma_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f16: ; CHECK: # %bb.0: @@ -98,8 +94,6 @@ define <4 x half> @vfma_vf_v4f16_unmasked(<4 x half> %va, half %b, <4 x half> %v ret <4 x half> %v } -declare <8 x half> @llvm.vp.fmuladd.v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfma_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f16: ; CHECK: # %bb.0: @@ -145,8 +139,6 @@ define <8 x half> @vfma_vf_v8f16_unmasked(<8 x half> %va, half %b, <8 x half> %v ret <8 x half> %v } -declare <16 x half> @llvm.vp.fmuladd.v16f16(<16 x half>, <16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfma_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f16: ; CHECK: # %bb.0: @@ -192,8 +184,6 @@ define <16 x half> @vfma_vf_v16f16_unmasked(<16 x half> %va, half %b, <16 x half ret <16 x half> %v } -declare <2 x float> @llvm.vp.fmuladd.v2f32(<2 x float>, <2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfma_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f32: ; CHECK: # %bb.0: @@ -239,8 +229,6 @@ define <2 x float> @vfma_vf_v2f32_unmasked(<2 x float> %va, float %b, <2 x float ret <2 x float> %v } -declare <4 x float> @llvm.vp.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfma_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f32: ; CHECK: # %bb.0: @@ -286,8 +274,6 @@ define <4 x float> @vfma_vf_v4f32_unmasked(<4 x float> %va, float %b, <4 x float ret <4 x float> %v } -declare <8 x float> @llvm.vp.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfma_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f32: ; CHECK: # %bb.0: @@ -333,8 +319,6 @@ define <8 x float> @vfma_vf_v8f32_unmasked(<8 x float> %va, float %b, <8 x float ret <8 x float> %v } -declare <16 x float> @llvm.vp.fmuladd.v16f32(<16 x float>, <16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfma_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f32: ; CHECK: # %bb.0: @@ -380,8 +364,6 @@ define <16 x float> @vfma_vf_v16f32_unmasked(<16 x float> %va, float %b, <16 x f ret <16 x float> %v } -declare <2 x double> @llvm.vp.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfma_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f64: ; CHECK: # %bb.0: @@ -427,8 +409,6 @@ define <2 x double> @vfma_vf_v2f64_unmasked(<2 x double> %va, double %b, <2 x do ret <2 x double> %v } -declare <4 x double> @llvm.vp.fmuladd.v4f64(<4 x double>, <4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfma_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f64: ; CHECK: # %bb.0: @@ -474,8 +454,6 @@ define <4 x double> @vfma_vf_v4f64_unmasked(<4 x double> %va, double %b, <4 x do ret <4 x double> %v } -declare <8 x double> @llvm.vp.fmuladd.v8f64(<8 x double>, <8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfma_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f64: ; CHECK: # %bb.0: @@ -521,8 +499,6 @@ define <8 x double> @vfma_vf_v8f64_unmasked(<8 x double> %va, double %b, <8 x do ret <8 x double> %v } -declare <15 x double> @llvm.vp.fmuladd.v15f64(<15 x double>, <15 x double>, <15 x double>, <15 x i1>, i32) - define <15 x double> @vfma_vv_v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v15f64: ; CHECK: # %bb.0: @@ -548,8 +524,6 @@ define <15 x double> @vfma_vv_v15f64_unmasked(<15 x double> %va, <15 x double> % ret <15 x double> %v } -declare <16 x double> @llvm.vp.fmuladd.v16f64(<16 x double>, <16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfma_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f64: ; CHECK: # %bb.0: @@ -599,8 +573,6 @@ define <16 x double> @vfma_vf_v16f64_unmasked(<16 x double> %va, double %b, <16 ret <16 x double> %v } -declare <32 x double> @llvm.vp.fmuladd.v32f64(<32 x double>, <32 x double>, <32 x double>, <32 x i1>, i32) - define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll index dede0e707d929..84a89b23bc3b5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll @@ -204,8 +204,6 @@ define <16 x bfloat> @vfneg_vv_v16bf16_unmasked(<16 x bfloat> %va, i32 zeroext % ret <16 x bfloat> %v } -declare <2 x half> @llvm.vp.fneg.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vfneg_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_v2f16: ; ZVFH: # %bb.0: @@ -254,8 +252,6 @@ define <2 x half> @vfneg_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ret <2 x half> %v } -declare <4 x half> @llvm.vp.fneg.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vfneg_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_v4f16: ; ZVFH: # %bb.0: @@ -304,8 +300,6 @@ define <4 x half> @vfneg_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <8 x half> @llvm.vp.fneg.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vfneg_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_v8f16: ; ZVFH: # %bb.0: @@ -354,8 +348,6 @@ define <8 x half> @vfneg_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ret <8 x half> %v } -declare <16 x half> @llvm.vp.fneg.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vfneg_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_v16f16: ; ZVFH: # %bb.0: @@ -404,8 +396,6 @@ define <16 x half> @vfneg_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) ret <16 x half> %v } -declare <2 x float> @llvm.vp.fneg.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vfneg_vv_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v2f32: ; CHECK: # %bb.0: @@ -426,8 +416,6 @@ define <2 x float> @vfneg_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ret <2 x float> %v } -declare <4 x float> @llvm.vp.fneg.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vfneg_vv_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v4f32: ; CHECK: # %bb.0: @@ -448,8 +436,6 @@ define <4 x float> @vfneg_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ret <4 x float> %v } -declare <8 x float> @llvm.vp.fneg.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vfneg_vv_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v8f32: ; CHECK: # %bb.0: @@ -470,8 +456,6 @@ define <8 x float> @vfneg_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ret <8 x float> %v } -declare <16 x float> @llvm.vp.fneg.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vfneg_vv_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v16f32: ; CHECK: # %bb.0: @@ -492,8 +476,6 @@ define <16 x float> @vfneg_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl ret <16 x float> %v } -declare <2 x double> @llvm.vp.fneg.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vfneg_vv_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v2f64: ; CHECK: # %bb.0: @@ -514,8 +496,6 @@ define <2 x double> @vfneg_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) ret <2 x double> %v } -declare <4 x double> @llvm.vp.fneg.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vfneg_vv_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v4f64: ; CHECK: # %bb.0: @@ -536,8 +516,6 @@ define <4 x double> @vfneg_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) ret <4 x double> %v } -declare <8 x double> @llvm.vp.fneg.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vfneg_vv_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v8f64: ; CHECK: # %bb.0: @@ -558,8 +536,6 @@ define <8 x double> @vfneg_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) ret <8 x double> %v } -declare <15 x double> @llvm.vp.fneg.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vfneg_vv_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v15f64: ; CHECK: # %bb.0: @@ -580,8 +556,6 @@ define <15 x double> @vfneg_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %e ret <15 x double> %v } -declare <16 x double> @llvm.vp.fneg.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vfneg_vv_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v16f64: ; CHECK: # %bb.0: @@ -602,8 +576,6 @@ define <16 x double> @vfneg_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %e ret <16 x double> %v } -declare <32 x double> @llvm.vp.fneg.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vfneg_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmacc-vp.ll index 4ab94444b1b89..3bcf7496868d0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmacc-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.vp.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, <2 x i1>, i32) -declare <2 x half> @llvm.vp.fneg.v2f16(<2 x half>, <2 x i1>, i32) -declare <2 x half> @llvm.vp.merge.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) -declare <2 x half> @llvm.vp.select.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) - define <2 x half> @vfnmacc_vv_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v2f16: ; CHECK: # %bb.0: @@ -131,11 +126,6 @@ define <2 x half> @vfnmacc_vf_v2f16_commute_ta(<2 x half> %a, half %b, <2 x half ret <2 x half> %u } -declare <4 x half> @llvm.vp.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x i1>, i32) -declare <4 x half> @llvm.vp.fneg.v4f16(<4 x half>, <4 x i1>, i32) -declare <4 x half> @llvm.vp.merge.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) -declare <4 x half> @llvm.vp.select.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) - define <4 x half> @vfnmacc_vv_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v4f16: ; CHECK: # %bb.0: @@ -258,11 +248,6 @@ define <4 x half> @vfnmacc_vf_v4f16_commute_ta(<4 x half> %a, half %b, <4 x half ret <4 x half> %u } -declare <8 x half> @llvm.vp.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x i1>, i32) -declare <8 x half> @llvm.vp.fneg.v8f16(<8 x half>, <8 x i1>, i32) -declare <8 x half> @llvm.vp.merge.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) -declare <8 x half> @llvm.vp.select.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) - define <8 x half> @vfnmacc_vv_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v8f16: ; CHECK: # %bb.0: @@ -385,11 +370,6 @@ define <8 x half> @vfnmacc_vf_v8f16_commute_ta(<8 x half> %a, half %b, <8 x half ret <8 x half> %u } -declare <16 x half> @llvm.vp.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, <16 x i1>, i32) -declare <16 x half> @llvm.vp.fneg.v16f16(<16 x half>, <16 x i1>, i32) -declare <16 x half> @llvm.vp.merge.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) -declare <16 x half> @llvm.vp.select.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) - define <16 x half> @vfnmacc_vv_v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v16f16: ; CHECK: # %bb.0: @@ -512,11 +492,6 @@ define <16 x half> @vfnmacc_vf_v16f16_commute_ta(<16 x half> %a, half %b, <16 x ret <16 x half> %u } -declare <32 x half> @llvm.vp.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, <32 x i1>, i32) -declare <32 x half> @llvm.vp.fneg.v32f16(<32 x half>, <32 x i1>, i32) -declare <32 x half> @llvm.vp.merge.v32f16(<32 x i1>, <32 x half>, <32 x half>, i32) -declare <32 x half> @llvm.vp.select.v32f16(<32 x i1>, <32 x half>, <32 x half>, i32) - define <32 x half> @vfnmacc_vv_v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v32f16: ; CHECK: # %bb.0: @@ -639,11 +614,6 @@ define <32 x half> @vfnmacc_vf_v32f16_commute_ta(<32 x half> %a, half %b, <32 x ret <32 x half> %u } -declare <2 x float> @llvm.vp.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, <2 x i1>, i32) -declare <2 x float> @llvm.vp.fneg.v2f32(<2 x float>, <2 x i1>, i32) -declare <2 x float> @llvm.vp.merge.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) -declare <2 x float> @llvm.vp.select.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) - define <2 x float> @vfnmacc_vv_v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v2f32: ; CHECK: # %bb.0: @@ -766,11 +736,6 @@ define <2 x float> @vfnmacc_vf_v2f32_commute_ta(<2 x float> %a, float %b, <2 x f ret <2 x float> %u } -declare <4 x float> @llvm.vp.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32) -declare <4 x float> @llvm.vp.fneg.v4f32(<4 x float>, <4 x i1>, i32) -declare <4 x float> @llvm.vp.merge.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) -declare <4 x float> @llvm.vp.select.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) - define <4 x float> @vfnmacc_vv_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v4f32: ; CHECK: # %bb.0: @@ -893,11 +858,6 @@ define <4 x float> @vfnmacc_vf_v4f32_commute_ta(<4 x float> %a, float %b, <4 x f ret <4 x float> %u } -declare <8 x float> @llvm.vp.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, <8 x i1>, i32) -declare <8 x float> @llvm.vp.fneg.v8f32(<8 x float>, <8 x i1>, i32) -declare <8 x float> @llvm.vp.merge.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) -declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) - define <8 x float> @vfnmacc_vv_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v8f32: ; CHECK: # %bb.0: @@ -1020,11 +980,6 @@ define <8 x float> @vfnmacc_vf_v8f32_commute_ta(<8 x float> %a, float %b, <8 x f ret <8 x float> %u } -declare <16 x float> @llvm.vp.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, <16 x i1>, i32) -declare <16 x float> @llvm.vp.fneg.v16f32(<16 x float>, <16 x i1>, i32) -declare <16 x float> @llvm.vp.merge.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) -declare <16 x float> @llvm.vp.select.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) - define <16 x float> @vfnmacc_vv_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v16f32: ; CHECK: # %bb.0: @@ -1147,11 +1102,6 @@ define <16 x float> @vfnmacc_vf_v16f32_commute_ta(<16 x float> %a, float %b, <16 ret <16 x float> %u } -declare <2 x double> @llvm.vp.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, <2 x i1>, i32) -declare <2 x double> @llvm.vp.fneg.v2f64(<2 x double>, <2 x i1>, i32) -declare <2 x double> @llvm.vp.merge.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) -declare <2 x double> @llvm.vp.select.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) - define <2 x double> @vfnmacc_vv_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v2f64: ; CHECK: # %bb.0: @@ -1274,11 +1224,6 @@ define <2 x double> @vfnmacc_vf_v2f64_commute_ta(<2 x double> %a, double %b, <2 ret <2 x double> %u } -declare <4 x double> @llvm.vp.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, <4 x i1>, i32) -declare <4 x double> @llvm.vp.fneg.v4f64(<4 x double>, <4 x i1>, i32) -declare <4 x double> @llvm.vp.merge.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) -declare <4 x double> @llvm.vp.select.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) - define <4 x double> @vfnmacc_vv_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v4f64: ; CHECK: # %bb.0: @@ -1401,11 +1346,6 @@ define <4 x double> @vfnmacc_vf_v4f64_commute_ta(<4 x double> %a, double %b, <4 ret <4 x double> %u } -declare <8 x double> @llvm.vp.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, <8 x i1>, i32) -declare <8 x double> @llvm.vp.fneg.v8f64(<8 x double>, <8 x i1>, i32) -declare <8 x double> @llvm.vp.merge.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) -declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) - define <8 x double> @vfnmacc_vv_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmadd-constrained-sdnode.ll index afc89717596b2..48a3c9d695b56 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmadd-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmadd-constrained-sdnode.ll @@ -7,8 +7,6 @@ ; This tests a mix of vfnmacc and vfnmadd by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, metadata, metadata) - define <2 x half> @vfnmsub_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v2f16: ; CHECK: # %bb.0: @@ -35,8 +33,6 @@ define <2 x half> @vfnmsub_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) str ret <2 x half> %vd } -declare <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, metadata, metadata) - define <4 x half> @vfnmsub_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v4f16: ; CHECK: # %bb.0: @@ -63,8 +59,6 @@ define <4 x half> @vfnmsub_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) str ret <4 x half> %vd } -declare <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, metadata, metadata) - define <8 x half> @vfnmsub_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v8f16: ; CHECK: # %bb.0: @@ -91,8 +85,6 @@ define <8 x half> @vfnmsub_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) str ret <8 x half> %vd } -declare <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, metadata, metadata) - define <16 x half> @vfnmsub_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v16f16: ; CHECK: # %bb.0: @@ -119,8 +111,6 @@ define <16 x half> @vfnmsub_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c) ret <16 x half> %vd } -declare <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, metadata, metadata) - define <32 x half> @vfnmsub_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v32f16: ; CHECK: # %bb.0: @@ -149,8 +139,6 @@ define <32 x half> @vfnmsub_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c) ret <32 x half> %vd } -declare <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, metadata, metadata) - define <2 x float> @vfnmsub_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v2f32: ; CHECK: # %bb.0: @@ -177,8 +165,6 @@ define <2 x float> @vfnmsub_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c) ret <2 x float> %vd } -declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata) - define <4 x float> @vfnmsub_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v4f32: ; CHECK: # %bb.0: @@ -205,8 +191,6 @@ define <4 x float> @vfnmsub_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c) ret <4 x float> %vd } -declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata) - define <8 x float> @vfnmsub_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v8f32: ; CHECK: # %bb.0: @@ -233,8 +217,6 @@ define <8 x float> @vfnmsub_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c) ret <8 x float> %vd } -declare <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, metadata, metadata) - define <16 x float> @vfnmsub_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v16f32: ; CHECK: # %bb.0: @@ -261,8 +243,6 @@ define <16 x float> @vfnmsub_vf_v16f32(<16 x float> %va, <16 x float> %vb, float ret <16 x float> %vd } -declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata) - define <2 x double> @vfnmsub_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v2f64: ; CHECK: # %bb.0: @@ -289,8 +269,6 @@ define <2 x double> @vfnmsub_vf_v2f64(<2 x double> %va, <2 x double> %vb, double ret <2 x double> %vd } -declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata) - define <4 x double> @vfnmsub_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v4f64: ; CHECK: # %bb.0: @@ -317,8 +295,6 @@ define <4 x double> @vfnmsub_vf_v4f64(<4 x double> %va, <4 x double> %vb, double ret <4 x double> %vd } -declare <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, metadata, metadata) - define <8 x double> @vfnmsub_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsac-vp.ll index 4d9b002cc785c..6ecddefa70119 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsac-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.vp.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, <2 x i1>, i32) -declare <2 x half> @llvm.vp.fneg.v2f16(<2 x half>, <2 x i1>, i32) -declare <2 x half> @llvm.vp.merge.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) -declare <2 x half> @llvm.vp.select.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) - define <2 x half> @vfnmsac_vv_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v2f16: ; CHECK: # %bb.0: @@ -123,11 +118,6 @@ define <2 x half> @vfnmsac_vf_v2f16_commute_ta(<2 x half> %a, half %b, <2 x half ret <2 x half> %u } -declare <4 x half> @llvm.vp.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x i1>, i32) -declare <4 x half> @llvm.vp.fneg.v4f16(<4 x half>, <4 x i1>, i32) -declare <4 x half> @llvm.vp.merge.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) -declare <4 x half> @llvm.vp.select.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) - define <4 x half> @vfnmsac_vv_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v4f16: ; CHECK: # %bb.0: @@ -242,11 +232,6 @@ define <4 x half> @vfnmsac_vf_v4f16_commute_ta(<4 x half> %a, half %b, <4 x half ret <4 x half> %u } -declare <8 x half> @llvm.vp.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x i1>, i32) -declare <8 x half> @llvm.vp.fneg.v8f16(<8 x half>, <8 x i1>, i32) -declare <8 x half> @llvm.vp.merge.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) -declare <8 x half> @llvm.vp.select.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) - define <8 x half> @vfnmsac_vv_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v8f16: ; CHECK: # %bb.0: @@ -361,11 +346,6 @@ define <8 x half> @vfnmsac_vf_v8f16_commute_ta(<8 x half> %a, half %b, <8 x half ret <8 x half> %u } -declare <16 x half> @llvm.vp.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, <16 x i1>, i32) -declare <16 x half> @llvm.vp.fneg.v16f16(<16 x half>, <16 x i1>, i32) -declare <16 x half> @llvm.vp.merge.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) -declare <16 x half> @llvm.vp.select.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) - define <16 x half> @vfnmsac_vv_v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v16f16: ; CHECK: # %bb.0: @@ -480,11 +460,6 @@ define <16 x half> @vfnmsac_vf_v16f16_commute_ta(<16 x half> %a, half %b, <16 x ret <16 x half> %u } -declare <32 x half> @llvm.vp.fma.v26f16(<32 x half>, <32 x half>, <32 x half>, <32 x i1>, i32) -declare <32 x half> @llvm.vp.fneg.v26f16(<32 x half>, <32 x i1>, i32) -declare <32 x half> @llvm.vp.merge.v26f16(<32 x i1>, <32 x half>, <32 x half>, i32) -declare <32 x half> @llvm.vp.select.v26f16(<32 x i1>, <32 x half>, <32 x half>, i32) - define <32 x half> @vfnmsac_vv_v26f16(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v26f16: ; CHECK: # %bb.0: @@ -599,11 +574,6 @@ define <32 x half> @vfnmsac_vf_v26f16_commute_ta(<32 x half> %a, half %b, <32 x ret <32 x half> %u } -declare <2 x float> @llvm.vp.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, <2 x i1>, i32) -declare <2 x float> @llvm.vp.fneg.v2f32(<2 x float>, <2 x i1>, i32) -declare <2 x float> @llvm.vp.merge.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) -declare <2 x float> @llvm.vp.select.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) - define <2 x float> @vfnmsac_vv_v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v2f32: ; CHECK: # %bb.0: @@ -718,11 +688,6 @@ define <2 x float> @vfnmsac_vf_v2f32_commute_ta(<2 x float> %a, float %b, <2 x f ret <2 x float> %u } -declare <4 x float> @llvm.vp.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32) -declare <4 x float> @llvm.vp.fneg.v4f32(<4 x float>, <4 x i1>, i32) -declare <4 x float> @llvm.vp.merge.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) -declare <4 x float> @llvm.vp.select.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) - define <4 x float> @vfnmsac_vv_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v4f32: ; CHECK: # %bb.0: @@ -837,11 +802,6 @@ define <4 x float> @vfnmsac_vf_v4f32_commute_ta(<4 x float> %a, float %b, <4 x f ret <4 x float> %u } -declare <8 x float> @llvm.vp.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, <8 x i1>, i32) -declare <8 x float> @llvm.vp.fneg.v8f32(<8 x float>, <8 x i1>, i32) -declare <8 x float> @llvm.vp.merge.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) -declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) - define <8 x float> @vfnmsac_vv_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v8f32: ; CHECK: # %bb.0: @@ -956,11 +916,6 @@ define <8 x float> @vfnmsac_vf_v8f32_commute_ta(<8 x float> %a, float %b, <8 x f ret <8 x float> %u } -declare <16 x float> @llvm.vp.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, <16 x i1>, i32) -declare <16 x float> @llvm.vp.fneg.v16f32(<16 x float>, <16 x i1>, i32) -declare <16 x float> @llvm.vp.merge.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) -declare <16 x float> @llvm.vp.select.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) - define <16 x float> @vfnmsac_vv_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v16f32: ; CHECK: # %bb.0: @@ -1075,11 +1030,6 @@ define <16 x float> @vfnmsac_vf_v16f32_commute_ta(<16 x float> %a, float %b, <16 ret <16 x float> %u } -declare <2 x double> @llvm.vp.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, <2 x i1>, i32) -declare <2 x double> @llvm.vp.fneg.v2f64(<2 x double>, <2 x i1>, i32) -declare <2 x double> @llvm.vp.merge.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) -declare <2 x double> @llvm.vp.select.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) - define <2 x double> @vfnmsac_vv_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v2f64: ; CHECK: # %bb.0: @@ -1194,11 +1144,6 @@ define <2 x double> @vfnmsac_vf_v2f64_commute_ta(<2 x double> %a, double %b, <2 ret <2 x double> %u } -declare <4 x double> @llvm.vp.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, <4 x i1>, i32) -declare <4 x double> @llvm.vp.fneg.v4f64(<4 x double>, <4 x i1>, i32) -declare <4 x double> @llvm.vp.merge.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) -declare <4 x double> @llvm.vp.select.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) - define <4 x double> @vfnmsac_vv_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v4f64: ; CHECK: # %bb.0: @@ -1313,11 +1258,6 @@ define <4 x double> @vfnmsac_vf_v4f64_commute_ta(<4 x double> %a, double %b, <4 ret <4 x double> %u } -declare <8 x double> @llvm.vp.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, <8 x i1>, i32) -declare <8 x double> @llvm.vp.fneg.v8f64(<8 x double>, <8 x i1>, i32) -declare <8 x double> @llvm.vp.merge.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) -declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) - define <8 x double> @vfnmsac_vv_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsub-constrained-sdnode.ll index d9863bb36c739..9c9ca4375faf1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsub-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsub-constrained-sdnode.ll @@ -7,8 +7,6 @@ ; This tests a mix of vfnmsac and vfnmsub by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, metadata, metadata) - define <2 x half> @vfnmsub_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v2f16: ; CHECK: # %bb.0: @@ -33,8 +31,6 @@ define <2 x half> @vfnmsub_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) str ret <2 x half> %vd } -declare <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, metadata, metadata) - define <4 x half> @vfnmsub_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v4f16: ; CHECK: # %bb.0: @@ -59,8 +55,6 @@ define <4 x half> @vfnmsub_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) str ret <4 x half> %vd } -declare <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, metadata, metadata) - define <8 x half> @vfnmsub_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v8f16: ; CHECK: # %bb.0: @@ -85,8 +79,6 @@ define <8 x half> @vfnmsub_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) str ret <8 x half> %vd } -declare <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, metadata, metadata) - define <16 x half> @vfnmsub_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v16f16: ; CHECK: # %bb.0: @@ -111,8 +103,6 @@ define <16 x half> @vfnmsub_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c) ret <16 x half> %vd } -declare <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, metadata, metadata) - define <32 x half> @vfnmsub_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v32f16: ; CHECK: # %bb.0: @@ -139,8 +129,6 @@ define <32 x half> @vfnmsub_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c) ret <32 x half> %vd } -declare <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, metadata, metadata) - define <2 x float> @vfnmsub_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v2f32: ; CHECK: # %bb.0: @@ -165,8 +153,6 @@ define <2 x float> @vfnmsub_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c) ret <2 x float> %vd } -declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata) - define <4 x float> @vfnmsub_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v4f32: ; CHECK: # %bb.0: @@ -191,8 +177,6 @@ define <4 x float> @vfnmsub_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c) ret <4 x float> %vd } -declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata) - define <8 x float> @vfnmsub_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v8f32: ; CHECK: # %bb.0: @@ -217,8 +201,6 @@ define <8 x float> @vfnmsub_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c) ret <8 x float> %vd } -declare <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, metadata, metadata) - define <16 x float> @vfnmsub_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v16f32: ; CHECK: # %bb.0: @@ -243,8 +225,6 @@ define <16 x float> @vfnmsub_vf_v16f32(<16 x float> %va, <16 x float> %vb, float ret <16 x float> %vd } -declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata) - define <2 x double> @vfnmsub_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v2f64: ; CHECK: # %bb.0: @@ -269,8 +249,6 @@ define <2 x double> @vfnmsub_vf_v2f64(<2 x double> %va, <2 x double> %vb, double ret <2 x double> %vd } -declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata) - define <4 x double> @vfnmsub_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v4f64: ; CHECK: # %bb.0: @@ -295,8 +273,6 @@ define <4 x double> @vfnmsub_vf_v4f64(<4 x double> %va, <4 x double> %vb, double ret <4 x double> %vd } -declare <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, metadata, metadata) - define <8 x double> @vfnmsub_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfpext-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfpext-constrained-sdnode.ll index b8a6be40b3f32..91475222f7cff 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfpext-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfpext-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+zvfbfmin -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x float> @llvm.experimental.constrained.fpext.v2f32.v2f16(<2 x half>, metadata) define <2 x float> @vfpext_v2f16_v2f32(<2 x half> %va) strictfp { ; CHECK-LABEL: vfpext_v2f16_v2f32: ; CHECK: # %bb.0: @@ -16,7 +15,6 @@ define <2 x float> @vfpext_v2f16_v2f32(<2 x half> %va) strictfp { ret <2 x float> %evec } -declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f16(<2 x half>, metadata) define <2 x double> @vfpext_v2f16_v2f64(<2 x half> %va) strictfp { ; CHECK-LABEL: vfpext_v2f16_v2f64: ; CHECK: # %bb.0: @@ -29,7 +27,6 @@ define <2 x double> @vfpext_v2f16_v2f64(<2 x half> %va) strictfp { ret <2 x double> %evec } -declare <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half>, metadata) define <4 x float> @vfpext_v4f16_v4f32(<4 x half> %va) strictfp { ; CHECK-LABEL: vfpext_v4f16_v4f32: ; CHECK: # %bb.0: @@ -41,7 +38,6 @@ define <4 x float> @vfpext_v4f16_v4f32(<4 x half> %va) strictfp { ret <4 x float> %evec } -declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f16(<4 x half>, metadata) define <4 x double> @vfpext_v4f16_v4f64(<4 x half> %va) strictfp { ; CHECK-LABEL: vfpext_v4f16_v4f64: ; CHECK: # %bb.0: @@ -54,7 +50,6 @@ define <4 x double> @vfpext_v4f16_v4f64(<4 x half> %va) strictfp { ret <4 x double> %evec } -declare <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(<8 x half>, metadata) define <8 x float> @vfpext_v8f16_v8f32(<8 x half> %va) strictfp { ; CHECK-LABEL: vfpext_v8f16_v8f32: ; CHECK: # %bb.0: @@ -66,7 +61,6 @@ define <8 x float> @vfpext_v8f16_v8f32(<8 x half> %va) strictfp { ret <8 x float> %evec } -declare <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8f16(<8 x half>, metadata) define <8 x double> @vfpext_v8f16_v8f64(<8 x half> %va) strictfp { ; CHECK-LABEL: vfpext_v8f16_v8f64: ; CHECK: # %bb.0: @@ -79,7 +73,6 @@ define <8 x double> @vfpext_v8f16_v8f64(<8 x half> %va) strictfp { ret <8 x double> %evec } -declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float>, metadata) define <2 x double> @vfpext_v2f32_v2f64(<2 x float> %va) strictfp { ; CHECK-LABEL: vfpext_v2f32_v2f64: ; CHECK: # %bb.0: @@ -91,7 +84,6 @@ define <2 x double> @vfpext_v2f32_v2f64(<2 x float> %va) strictfp { ret <2 x double> %evec } -declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float>, metadata) define <4 x double> @vfpext_v4f32_v4f64(<4 x float> %va) strictfp { ; CHECK-LABEL: vfpext_v4f32_v4f64: ; CHECK: # %bb.0: @@ -103,7 +95,6 @@ define <4 x double> @vfpext_v4f32_v4f64(<4 x float> %va) strictfp { ret <4 x double> %evec } -declare <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8f32(<8 x float>, metadata) define <8 x double> @vfpext_v8f32_v8f64(<8 x float> %va) strictfp { ; CHECK-LABEL: vfpext_v8f32_v8f64: ; CHECK: # %bb.0: @@ -115,7 +106,6 @@ define <8 x double> @vfpext_v8f32_v8f64(<8 x float> %va) strictfp { ret <8 x double> %evec } -declare <2 x float> @llvm.experimental.constrained.fpext.v2f32.v2bf16(<2 x bfloat>, metadata) define <2 x float> @vfpext_v2bf16_v2f32(<2 x bfloat> %va) strictfp { ; CHECK-LABEL: vfpext_v2bf16_v2f32: ; CHECK: # %bb.0: @@ -127,7 +117,6 @@ define <2 x float> @vfpext_v2bf16_v2f32(<2 x bfloat> %va) strictfp { ret <2 x float> %evec } -declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2bf16(<2 x bfloat>, metadata) define <2 x double> @vfpext_v2bf16_v2f64(<2 x bfloat> %va) strictfp { ; CHECK-LABEL: vfpext_v2bf16_v2f64: ; CHECK: # %bb.0: @@ -140,7 +129,6 @@ define <2 x double> @vfpext_v2bf16_v2f64(<2 x bfloat> %va) strictfp { ret <2 x double> %evec } -declare <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4bf16(<4 x bfloat>, metadata) define <4 x float> @vfpext_v4bf16_v4f32(<4 x bfloat> %va) strictfp { ; CHECK-LABEL: vfpext_v4bf16_v4f32: ; CHECK: # %bb.0: @@ -152,7 +140,6 @@ define <4 x float> @vfpext_v4bf16_v4f32(<4 x bfloat> %va) strictfp { ret <4 x float> %evec } -declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4bf16(<4 x bfloat>, metadata) define <4 x double> @vfpext_v4bf16_v4f64(<4 x bfloat> %va) strictfp { ; CHECK-LABEL: vfpext_v4bf16_v4f64: ; CHECK: # %bb.0: @@ -165,7 +152,6 @@ define <4 x double> @vfpext_v4bf16_v4f64(<4 x bfloat> %va) strictfp { ret <4 x double> %evec } -declare <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8bf16(<8 x bfloat>, metadata) define <8 x float> @vfpext_v8bf16_v8f32(<8 x bfloat> %va) strictfp { ; CHECK-LABEL: vfpext_v8bf16_v8f32: ; CHECK: # %bb.0: @@ -177,7 +163,6 @@ define <8 x float> @vfpext_v8bf16_v8f32(<8 x bfloat> %va) strictfp { ret <8 x float> %evec } -declare <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8bf16(<8 x bfloat>, metadata) define <8 x double> @vfpext_v8bf16_v8f64(<8 x bfloat> %va) strictfp { ; CHECK-LABEL: vfpext_v8bf16_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll index ac58a597a0812..77a67f1619dd0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <1 x i1> @llvm.experimental.constrained.fptosi.v1i1.v1f16(<1 x half>, metadata) define <1 x i1> @vfptosi_v1f16_v1i1(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f16_v1i1: ; CHECK: # %bb.0: @@ -17,7 +16,6 @@ define <1 x i1> @vfptosi_v1f16_v1i1(<1 x half> %va) strictfp { ret <1 x i1> %evec } -declare <1 x i1> @llvm.experimental.constrained.fptoui.v1i1.v1f16(<1 x half>, metadata) define <1 x i1> @vfptoui_v1f16_v1i1(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f16_v1i1: ; CHECK: # %bb.0: @@ -30,7 +28,6 @@ define <1 x i1> @vfptoui_v1f16_v1i1(<1 x half> %va) strictfp { ret <1 x i1> %evec } -declare <1 x i7> @llvm.experimental.constrained.fptosi.v1i7.v1f16(<1 x half>, metadata) define <1 x i7> @vfptosi_v1f16_v1i7(<1 x half> %va) strictfp { ; RV32-LABEL: vfptosi_v1f16_v1i7: ; RV32: # %bb.0: @@ -49,7 +46,6 @@ define <1 x i7> @vfptosi_v1f16_v1i7(<1 x half> %va) strictfp { ret <1 x i7> %evec } -declare <1 x i7> @llvm.experimental.constrained.fptoui.v1i7.v1f16(<1 x half>, metadata) define <1 x i7> @vfptoui_v1f16_v1i7(<1 x half> %va) strictfp { ; RV32-LABEL: vfptoui_v1f16_v1i7: ; RV32: # %bb.0: @@ -68,7 +64,6 @@ define <1 x i7> @vfptoui_v1f16_v1i7(<1 x half> %va) strictfp { ret <1 x i7> %evec } -declare <1 x i8> @llvm.experimental.constrained.fptosi.v1i8.v1f16(<1 x half>, metadata) define <1 x i8> @vfptosi_v1f16_v1i8(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f16_v1i8: ; CHECK: # %bb.0: @@ -80,7 +75,6 @@ define <1 x i8> @vfptosi_v1f16_v1i8(<1 x half> %va) strictfp { ret <1 x i8> %evec } -declare <1 x i8> @llvm.experimental.constrained.fptoui.v1i8.v1f16(<1 x half>, metadata) define <1 x i8> @vfptoui_v1f16_v1i8(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f16_v1i8: ; CHECK: # %bb.0: @@ -92,7 +86,6 @@ define <1 x i8> @vfptoui_v1f16_v1i8(<1 x half> %va) strictfp { ret <1 x i8> %evec } -declare <1 x i16> @llvm.experimental.constrained.fptosi.v1i16.v1f16(<1 x half>, metadata) define <1 x i16> @vfptosi_v1f16_v1i16(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f16_v1i16: ; CHECK: # %bb.0: @@ -103,7 +96,6 @@ define <1 x i16> @vfptosi_v1f16_v1i16(<1 x half> %va) strictfp { ret <1 x i16> %evec } -declare <1 x i16> @llvm.experimental.constrained.fptoui.v1i16.v1f16(<1 x half>, metadata) define <1 x i16> @vfptoui_v1f16_v1i16(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f16_v1i16: ; CHECK: # %bb.0: @@ -114,7 +106,6 @@ define <1 x i16> @vfptoui_v1f16_v1i16(<1 x half> %va) strictfp { ret <1 x i16> %evec } -declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f16(<1 x half>, metadata) define <1 x i32> @vfptosi_v1f16_v1i32(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f16_v1i32: ; CHECK: # %bb.0: @@ -126,7 +117,6 @@ define <1 x i32> @vfptosi_v1f16_v1i32(<1 x half> %va) strictfp { ret <1 x i32> %evec } -declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f16(<1 x half>, metadata) define <1 x i32> @vfptoui_v1f16_v1i32(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f16_v1i32: ; CHECK: # %bb.0: @@ -138,7 +128,6 @@ define <1 x i32> @vfptoui_v1f16_v1i32(<1 x half> %va) strictfp { ret <1 x i32> %evec } -declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f16(<1 x half>, metadata) define <1 x i64> @vfptosi_v1f16_v1i64(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f16_v1i64: ; CHECK: # %bb.0: @@ -151,7 +140,6 @@ define <1 x i64> @vfptosi_v1f16_v1i64(<1 x half> %va) strictfp { ret <1 x i64> %evec } -declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f16(<1 x half>, metadata) define <1 x i64> @vfptoui_v1f16_v1i64(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f16_v1i64: ; CHECK: # %bb.0: @@ -164,7 +152,6 @@ define <1 x i64> @vfptoui_v1f16_v1i64(<1 x half> %va) strictfp { ret <1 x i64> %evec } -declare <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f16(<2 x half>, metadata) define <2 x i1> @vfptosi_v2f16_v2i1(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f16_v2i1: ; CHECK: # %bb.0: @@ -177,7 +164,6 @@ define <2 x i1> @vfptosi_v2f16_v2i1(<2 x half> %va) strictfp { ret <2 x i1> %evec } -declare <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f16(<2 x half>, metadata) define <2 x i1> @vfptoui_v2f16_v2i1(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f16_v2i1: ; CHECK: # %bb.0: @@ -190,7 +176,6 @@ define <2 x i1> @vfptoui_v2f16_v2i1(<2 x half> %va) strictfp { ret <2 x i1> %evec } -declare <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f16(<2 x half>, metadata) define <2 x i8> @vfptosi_v2f16_v2i8(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f16_v2i8: ; CHECK: # %bb.0: @@ -202,7 +187,6 @@ define <2 x i8> @vfptosi_v2f16_v2i8(<2 x half> %va) strictfp { ret <2 x i8> %evec } -declare <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f16(<2 x half>, metadata) define <2 x i8> @vfptoui_v2f16_v2i8(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f16_v2i8: ; CHECK: # %bb.0: @@ -214,7 +198,6 @@ define <2 x i8> @vfptoui_v2f16_v2i8(<2 x half> %va) strictfp { ret <2 x i8> %evec } -declare <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f16(<2 x half>, metadata) define <2 x i16> @vfptosi_v2f16_v2i16(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f16_v2i16: ; CHECK: # %bb.0: @@ -225,7 +208,6 @@ define <2 x i16> @vfptosi_v2f16_v2i16(<2 x half> %va) strictfp { ret <2 x i16> %evec } -declare <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f16(<2 x half>, metadata) define <2 x i16> @vfptoui_v2f16_v2i16(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f16_v2i16: ; CHECK: # %bb.0: @@ -236,7 +218,6 @@ define <2 x i16> @vfptoui_v2f16_v2i16(<2 x half> %va) strictfp { ret <2 x i16> %evec } -declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f16(<2 x half>, metadata) define <2 x i32> @vfptosi_v2f16_v2i32(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f16_v2i32: ; CHECK: # %bb.0: @@ -248,7 +229,6 @@ define <2 x i32> @vfptosi_v2f16_v2i32(<2 x half> %va) strictfp { ret <2 x i32> %evec } -declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f16(<2 x half>, metadata) define <2 x i32> @vfptoui_v2f16_v2i32(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f16_v2i32: ; CHECK: # %bb.0: @@ -260,7 +240,6 @@ define <2 x i32> @vfptoui_v2f16_v2i32(<2 x half> %va) strictfp { ret <2 x i32> %evec } -declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f16(<2 x half>, metadata) define <2 x i64> @vfptosi_v2f16_v2i64(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f16_v2i64: ; CHECK: # %bb.0: @@ -273,7 +252,6 @@ define <2 x i64> @vfptosi_v2f16_v2i64(<2 x half> %va) strictfp { ret <2 x i64> %evec } -declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f16(<2 x half>, metadata) define <2 x i64> @vfptoui_v2f16_v2i64(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f16_v2i64: ; CHECK: # %bb.0: @@ -286,7 +264,6 @@ define <2 x i64> @vfptoui_v2f16_v2i64(<2 x half> %va) strictfp { ret <2 x i64> %evec } -declare <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f16(<4 x half>, metadata) define <4 x i1> @vfptosi_v4f16_v4i1(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f16_v4i1: ; CHECK: # %bb.0: @@ -299,7 +276,6 @@ define <4 x i1> @vfptosi_v4f16_v4i1(<4 x half> %va) strictfp { ret <4 x i1> %evec } -declare <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f16(<4 x half>, metadata) define <4 x i1> @vfptoui_v4f16_v4i1(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f16_v4i1: ; CHECK: # %bb.0: @@ -312,7 +288,6 @@ define <4 x i1> @vfptoui_v4f16_v4i1(<4 x half> %va) strictfp { ret <4 x i1> %evec } -declare <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f16(<4 x half>, metadata) define <4 x i8> @vfptosi_v4f16_v4i8(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f16_v4i8: ; CHECK: # %bb.0: @@ -324,7 +299,6 @@ define <4 x i8> @vfptosi_v4f16_v4i8(<4 x half> %va) strictfp { ret <4 x i8> %evec } -declare <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f16(<4 x half>, metadata) define <4 x i8> @vfptoui_v4f16_v4i8(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f16_v4i8: ; CHECK: # %bb.0: @@ -336,7 +310,6 @@ define <4 x i8> @vfptoui_v4f16_v4i8(<4 x half> %va) strictfp { ret <4 x i8> %evec } -declare <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f16(<4 x half>, metadata) define <4 x i16> @vfptosi_v4f16_v4i16(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f16_v4i16: ; CHECK: # %bb.0: @@ -347,7 +320,6 @@ define <4 x i16> @vfptosi_v4f16_v4i16(<4 x half> %va) strictfp { ret <4 x i16> %evec } -declare <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f16(<4 x half>, metadata) define <4 x i16> @vfptoui_v4f16_v4i16(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f16_v4i16: ; CHECK: # %bb.0: @@ -358,7 +330,6 @@ define <4 x i16> @vfptoui_v4f16_v4i16(<4 x half> %va) strictfp { ret <4 x i16> %evec } -declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f16(<4 x half>, metadata) define <4 x i32> @vfptosi_v4f16_v4i32(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f16_v4i32: ; CHECK: # %bb.0: @@ -370,7 +341,6 @@ define <4 x i32> @vfptosi_v4f16_v4i32(<4 x half> %va) strictfp { ret <4 x i32> %evec } -declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f16(<4 x half>, metadata) define <4 x i32> @vfptoui_v4f16_v4i32(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f16_v4i32: ; CHECK: # %bb.0: @@ -382,7 +352,6 @@ define <4 x i32> @vfptoui_v4f16_v4i32(<4 x half> %va) strictfp { ret <4 x i32> %evec } -declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f16(<4 x half>, metadata) define <4 x i64> @vfptosi_v4f16_v4i64(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f16_v4i64: ; CHECK: # %bb.0: @@ -395,7 +364,6 @@ define <4 x i64> @vfptosi_v4f16_v4i64(<4 x half> %va) strictfp { ret <4 x i64> %evec } -declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f16(<4 x half>, metadata) define <4 x i64> @vfptoui_v4f16_v4i64(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f16_v4i64: ; CHECK: # %bb.0: @@ -408,7 +376,6 @@ define <4 x i64> @vfptoui_v4f16_v4i64(<4 x half> %va) strictfp { ret <4 x i64> %evec } -declare <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f16(<8 x half>, metadata) define <8 x i1> @vfptosi_v8f16_v8i1(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f16_v8i1: ; CHECK: # %bb.0: @@ -421,7 +388,6 @@ define <8 x i1> @vfptosi_v8f16_v8i1(<8 x half> %va) strictfp { ret <8 x i1> %evec } -declare <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f16(<8 x half>, metadata) define <8 x i1> @vfptoui_v8f16_v8i1(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f16_v8i1: ; CHECK: # %bb.0: @@ -434,7 +400,6 @@ define <8 x i1> @vfptoui_v8f16_v8i1(<8 x half> %va) strictfp { ret <8 x i1> %evec } -declare <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f16(<8 x half>, metadata) define <8 x i8> @vfptosi_v8f16_v8i8(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f16_v8i8: ; CHECK: # %bb.0: @@ -446,7 +411,6 @@ define <8 x i8> @vfptosi_v8f16_v8i8(<8 x half> %va) strictfp { ret <8 x i8> %evec } -declare <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f16(<8 x half>, metadata) define <8 x i8> @vfptoui_v8f16_v8i8(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f16_v8i8: ; CHECK: # %bb.0: @@ -458,7 +422,6 @@ define <8 x i8> @vfptoui_v8f16_v8i8(<8 x half> %va) strictfp { ret <8 x i8> %evec } -declare <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f16(<8 x half>, metadata) define <8 x i16> @vfptosi_v8f16_v8i16(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f16_v8i16: ; CHECK: # %bb.0: @@ -469,7 +432,6 @@ define <8 x i16> @vfptosi_v8f16_v8i16(<8 x half> %va) strictfp { ret <8 x i16> %evec } -declare <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f16(<8 x half>, metadata) define <8 x i16> @vfptoui_v8f16_v8i16(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f16_v8i16: ; CHECK: # %bb.0: @@ -480,7 +442,6 @@ define <8 x i16> @vfptoui_v8f16_v8i16(<8 x half> %va) strictfp { ret <8 x i16> %evec } -declare <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f16(<8 x half>, metadata) define <8 x i32> @vfptosi_v8f16_v8i32(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f16_v8i32: ; CHECK: # %bb.0: @@ -492,7 +453,6 @@ define <8 x i32> @vfptosi_v8f16_v8i32(<8 x half> %va) strictfp { ret <8 x i32> %evec } -declare <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f16(<8 x half>, metadata) define <8 x i32> @vfptoui_v8f16_v8i32(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f16_v8i32: ; CHECK: # %bb.0: @@ -504,7 +464,6 @@ define <8 x i32> @vfptoui_v8f16_v8i32(<8 x half> %va) strictfp { ret <8 x i32> %evec } -declare <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f16(<8 x half>, metadata) define <8 x i64> @vfptosi_v8f16_v8i64(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f16_v8i64: ; CHECK: # %bb.0: @@ -517,7 +476,6 @@ define <8 x i64> @vfptosi_v8f16_v8i64(<8 x half> %va) strictfp { ret <8 x i64> %evec } -declare <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f16(<8 x half>, metadata) define <8 x i64> @vfptoui_v8f16_v8i64(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f16_v8i64: ; CHECK: # %bb.0: @@ -530,7 +488,6 @@ define <8 x i64> @vfptoui_v8f16_v8i64(<8 x half> %va) strictfp { ret <8 x i64> %evec } -declare <16 x i1> @llvm.experimental.constrained.fptosi.v16i1.v16f16(<16 x half>, metadata) define <16 x i1> @vfptosi_v16f16_v16i1(<16 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v16f16_v16i1: ; CHECK: # %bb.0: @@ -543,7 +500,6 @@ define <16 x i1> @vfptosi_v16f16_v16i1(<16 x half> %va) strictfp { ret <16 x i1> %evec } -declare <16 x i1> @llvm.experimental.constrained.fptoui.v16i1.v16f16(<16 x half>, metadata) define <16 x i1> @vfptoui_v16f16_v16i1(<16 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v16f16_v16i1: ; CHECK: # %bb.0: @@ -556,7 +512,6 @@ define <16 x i1> @vfptoui_v16f16_v16i1(<16 x half> %va) strictfp { ret <16 x i1> %evec } -declare <16 x i8> @llvm.experimental.constrained.fptosi.v16i8.v16f16(<16 x half>, metadata) define <16 x i8> @vfptosi_v16f16_v16i8(<16 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v16f16_v16i8: ; CHECK: # %bb.0: @@ -568,7 +523,6 @@ define <16 x i8> @vfptosi_v16f16_v16i8(<16 x half> %va) strictfp { ret <16 x i8> %evec } -declare <16 x i8> @llvm.experimental.constrained.fptoui.v16i8.v16f16(<16 x half>, metadata) define <16 x i8> @vfptoui_v16f16_v16i8(<16 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v16f16_v16i8: ; CHECK: # %bb.0: @@ -580,7 +534,6 @@ define <16 x i8> @vfptoui_v16f16_v16i8(<16 x half> %va) strictfp { ret <16 x i8> %evec } -declare <16 x i16> @llvm.experimental.constrained.fptosi.v16i16.v16f16(<16 x half>, metadata) define <16 x i16> @vfptosi_v16f16_v16i16(<16 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v16f16_v16i16: ; CHECK: # %bb.0: @@ -591,7 +544,6 @@ define <16 x i16> @vfptosi_v16f16_v16i16(<16 x half> %va) strictfp { ret <16 x i16> %evec } -declare <16 x i16> @llvm.experimental.constrained.fptoui.v16i16.v16f16(<16 x half>, metadata) define <16 x i16> @vfptoui_v16f16_v16i16(<16 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v16f16_v16i16: ; CHECK: # %bb.0: @@ -602,7 +554,6 @@ define <16 x i16> @vfptoui_v16f16_v16i16(<16 x half> %va) strictfp { ret <16 x i16> %evec } -declare <16 x i32> @llvm.experimental.constrained.fptosi.v16i32.v16f16(<16 x half>, metadata) define <16 x i32> @vfptosi_v16f16_v16i32(<16 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v16f16_v16i32: ; CHECK: # %bb.0: @@ -614,7 +565,6 @@ define <16 x i32> @vfptosi_v16f16_v16i32(<16 x half> %va) strictfp { ret <16 x i32> %evec } -declare <16 x i32> @llvm.experimental.constrained.fptoui.v16i32.v16f16(<16 x half>, metadata) define <16 x i32> @vfptoui_v16f16_v16i32(<16 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v16f16_v16i32: ; CHECK: # %bb.0: @@ -626,7 +576,6 @@ define <16 x i32> @vfptoui_v16f16_v16i32(<16 x half> %va) strictfp { ret <16 x i32> %evec } -declare <32 x i1> @llvm.experimental.constrained.fptosi.v32i1.v32f16(<32 x half>, metadata) define <32 x i1> @vfptosi_v32f16_v32i1(<32 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v32f16_v32i1: ; CHECK: # %bb.0: @@ -640,7 +589,6 @@ define <32 x i1> @vfptosi_v32f16_v32i1(<32 x half> %va) strictfp { ret <32 x i1> %evec } -declare <32 x i1> @llvm.experimental.constrained.fptoui.v32i1.v32f16(<32 x half>, metadata) define <32 x i1> @vfptoui_v32f16_v32i1(<32 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v32f16_v32i1: ; CHECK: # %bb.0: @@ -654,7 +602,6 @@ define <32 x i1> @vfptoui_v32f16_v32i1(<32 x half> %va) strictfp { ret <32 x i1> %evec } -declare <32 x i8> @llvm.experimental.constrained.fptosi.v32i8.v32f16(<32 x half>, metadata) define <32 x i8> @vfptosi_v32f16_v32i8(<32 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v32f16_v32i8: ; CHECK: # %bb.0: @@ -667,7 +614,6 @@ define <32 x i8> @vfptosi_v32f16_v32i8(<32 x half> %va) strictfp { ret <32 x i8> %evec } -declare <32 x i8> @llvm.experimental.constrained.fptoui.v32i8.v32f16(<32 x half>, metadata) define <32 x i8> @vfptoui_v32f16_v32i8(<32 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v32f16_v32i8: ; CHECK: # %bb.0: @@ -680,7 +626,6 @@ define <32 x i8> @vfptoui_v32f16_v32i8(<32 x half> %va) strictfp { ret <32 x i8> %evec } -declare <32 x i16> @llvm.experimental.constrained.fptosi.v32i16.v32f16(<32 x half>, metadata) define <32 x i16> @vfptosi_v32f16_v32i16(<32 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v32f16_v32i16: ; CHECK: # %bb.0: @@ -692,7 +637,6 @@ define <32 x i16> @vfptosi_v32f16_v32i16(<32 x half> %va) strictfp { ret <32 x i16> %evec } -declare <32 x i16> @llvm.experimental.constrained.fptoui.v32i16.v32f16(<32 x half>, metadata) define <32 x i16> @vfptoui_v32f16_v32i16(<32 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v32f16_v32i16: ; CHECK: # %bb.0: @@ -704,7 +648,6 @@ define <32 x i16> @vfptoui_v32f16_v32i16(<32 x half> %va) strictfp { ret <32 x i16> %evec } -declare <1 x i1> @llvm.experimental.constrained.fptosi.v1i1.v1f32(<1 x float>, metadata) define <1 x i1> @vfptosi_v1f32_v1i1(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f32_v1i1: ; CHECK: # %bb.0: @@ -717,7 +660,6 @@ define <1 x i1> @vfptosi_v1f32_v1i1(<1 x float> %va) strictfp { ret <1 x i1> %evec } -declare <1 x i1> @llvm.experimental.constrained.fptoui.v1i1.v1f32(<1 x float>, metadata) define <1 x i1> @vfptoui_v1f32_v1i1(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f32_v1i1: ; CHECK: # %bb.0: @@ -730,7 +672,6 @@ define <1 x i1> @vfptoui_v1f32_v1i1(<1 x float> %va) strictfp { ret <1 x i1> %evec } -declare <1 x i8> @llvm.experimental.constrained.fptosi.v1i8.v1f32(<1 x float>, metadata) define <1 x i8> @vfptosi_v1f32_v1i8(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f32_v1i8: ; CHECK: # %bb.0: @@ -743,7 +684,6 @@ define <1 x i8> @vfptosi_v1f32_v1i8(<1 x float> %va) strictfp { ret <1 x i8> %evec } -declare <1 x i8> @llvm.experimental.constrained.fptoui.v1i8.v1f32(<1 x float>, metadata) define <1 x i8> @vfptoui_v1f32_v1i8(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f32_v1i8: ; CHECK: # %bb.0: @@ -756,7 +696,6 @@ define <1 x i8> @vfptoui_v1f32_v1i8(<1 x float> %va) strictfp { ret <1 x i8> %evec } -declare <1 x i16> @llvm.experimental.constrained.fptosi.v1i16.v1f32(<1 x float>, metadata) define <1 x i16> @vfptosi_v1f32_v1i16(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f32_v1i16: ; CHECK: # %bb.0: @@ -768,7 +707,6 @@ define <1 x i16> @vfptosi_v1f32_v1i16(<1 x float> %va) strictfp { ret <1 x i16> %evec } -declare <1 x i16> @llvm.experimental.constrained.fptoui.v1i16.v1f32(<1 x float>, metadata) define <1 x i16> @vfptoui_v1f32_v1i16(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f32_v1i16: ; CHECK: # %bb.0: @@ -780,7 +718,6 @@ define <1 x i16> @vfptoui_v1f32_v1i16(<1 x float> %va) strictfp { ret <1 x i16> %evec } -declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f32(<1 x float>, metadata) define <1 x i32> @vfptosi_v1f32_v1i32(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f32_v1i32: ; CHECK: # %bb.0: @@ -791,7 +728,6 @@ define <1 x i32> @vfptosi_v1f32_v1i32(<1 x float> %va) strictfp { ret <1 x i32> %evec } -declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f32(<1 x float>, metadata) define <1 x i32> @vfptoui_v1f32_v1i32(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f32_v1i32: ; CHECK: # %bb.0: @@ -802,7 +738,6 @@ define <1 x i32> @vfptoui_v1f32_v1i32(<1 x float> %va) strictfp { ret <1 x i32> %evec } -declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f32(<1 x float>, metadata) define <1 x i64> @vfptosi_v1f32_v1i64(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f32_v1i64: ; CHECK: # %bb.0: @@ -814,7 +749,6 @@ define <1 x i64> @vfptosi_v1f32_v1i64(<1 x float> %va) strictfp { ret <1 x i64> %evec } -declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f32(<1 x float>, metadata) define <1 x i64> @vfptoui_v1f32_v1i64(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f32_v1i64: ; CHECK: # %bb.0: @@ -826,7 +760,6 @@ define <1 x i64> @vfptoui_v1f32_v1i64(<1 x float> %va) strictfp { ret <1 x i64> %evec } -declare <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f32(<2 x float>, metadata) define <2 x i1> @vfptosi_v2f32_v2i1(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f32_v2i1: ; CHECK: # %bb.0: @@ -839,7 +772,6 @@ define <2 x i1> @vfptosi_v2f32_v2i1(<2 x float> %va) strictfp { ret <2 x i1> %evec } -declare <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f32(<2 x float>, metadata) define <2 x i1> @vfptoui_v2f32_v2i1(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f32_v2i1: ; CHECK: # %bb.0: @@ -852,7 +784,6 @@ define <2 x i1> @vfptoui_v2f32_v2i1(<2 x float> %va) strictfp { ret <2 x i1> %evec } -declare <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f32(<2 x float>, metadata) define <2 x i8> @vfptosi_v2f32_v2i8(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f32_v2i8: ; CHECK: # %bb.0: @@ -865,7 +796,6 @@ define <2 x i8> @vfptosi_v2f32_v2i8(<2 x float> %va) strictfp { ret <2 x i8> %evec } -declare <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f32(<2 x float>, metadata) define <2 x i8> @vfptoui_v2f32_v2i8(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f32_v2i8: ; CHECK: # %bb.0: @@ -878,7 +808,6 @@ define <2 x i8> @vfptoui_v2f32_v2i8(<2 x float> %va) strictfp { ret <2 x i8> %evec } -declare <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f32(<2 x float>, metadata) define <2 x i16> @vfptosi_v2f32_v2i16(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f32_v2i16: ; CHECK: # %bb.0: @@ -890,7 +819,6 @@ define <2 x i16> @vfptosi_v2f32_v2i16(<2 x float> %va) strictfp { ret <2 x i16> %evec } -declare <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f32(<2 x float>, metadata) define <2 x i16> @vfptoui_v2f32_v2i16(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f32_v2i16: ; CHECK: # %bb.0: @@ -902,7 +830,6 @@ define <2 x i16> @vfptoui_v2f32_v2i16(<2 x float> %va) strictfp { ret <2 x i16> %evec } -declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f32(<2 x float>, metadata) define <2 x i32> @vfptosi_v2f32_v2i32(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f32_v2i32: ; CHECK: # %bb.0: @@ -913,7 +840,6 @@ define <2 x i32> @vfptosi_v2f32_v2i32(<2 x float> %va) strictfp { ret <2 x i32> %evec } -declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f32(<2 x float>, metadata) define <2 x i32> @vfptoui_v2f32_v2i32(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f32_v2i32: ; CHECK: # %bb.0: @@ -924,7 +850,6 @@ define <2 x i32> @vfptoui_v2f32_v2i32(<2 x float> %va) strictfp { ret <2 x i32> %evec } -declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f32(<2 x float>, metadata) define <2 x i64> @vfptosi_v2f32_v2i64(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f32_v2i64: ; CHECK: # %bb.0: @@ -936,7 +861,6 @@ define <2 x i64> @vfptosi_v2f32_v2i64(<2 x float> %va) strictfp { ret <2 x i64> %evec } -declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(<2 x float>, metadata) define <2 x i64> @vfptoui_v2f32_v2i64(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f32_v2i64: ; CHECK: # %bb.0: @@ -948,7 +872,6 @@ define <2 x i64> @vfptoui_v2f32_v2i64(<2 x float> %va) strictfp { ret <2 x i64> %evec } -declare <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f32(<4 x float>, metadata) define <4 x i1> @vfptosi_v4f32_v4i1(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f32_v4i1: ; CHECK: # %bb.0: @@ -961,7 +884,6 @@ define <4 x i1> @vfptosi_v4f32_v4i1(<4 x float> %va) strictfp { ret <4 x i1> %evec } -declare <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f32(<4 x float>, metadata) define <4 x i1> @vfptoui_v4f32_v4i1(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f32_v4i1: ; CHECK: # %bb.0: @@ -974,7 +896,6 @@ define <4 x i1> @vfptoui_v4f32_v4i1(<4 x float> %va) strictfp { ret <4 x i1> %evec } -declare <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f32(<4 x float>, metadata) define <4 x i8> @vfptosi_v4f32_v4i8(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f32_v4i8: ; CHECK: # %bb.0: @@ -987,7 +908,6 @@ define <4 x i8> @vfptosi_v4f32_v4i8(<4 x float> %va) strictfp { ret <4 x i8> %evec } -declare <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f32(<4 x float>, metadata) define <4 x i8> @vfptoui_v4f32_v4i8(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f32_v4i8: ; CHECK: # %bb.0: @@ -1000,7 +920,6 @@ define <4 x i8> @vfptoui_v4f32_v4i8(<4 x float> %va) strictfp { ret <4 x i8> %evec } -declare <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f32(<4 x float>, metadata) define <4 x i16> @vfptosi_v4f32_v4i16(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f32_v4i16: ; CHECK: # %bb.0: @@ -1012,7 +931,6 @@ define <4 x i16> @vfptosi_v4f32_v4i16(<4 x float> %va) strictfp { ret <4 x i16> %evec } -declare <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f32(<4 x float>, metadata) define <4 x i16> @vfptoui_v4f32_v4i16(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f32_v4i16: ; CHECK: # %bb.0: @@ -1024,7 +942,6 @@ define <4 x i16> @vfptoui_v4f32_v4i16(<4 x float> %va) strictfp { ret <4 x i16> %evec } -declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float>, metadata) define <4 x i32> @vfptosi_v4f32_v4i32(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f32_v4i32: ; CHECK: # %bb.0: @@ -1035,7 +952,6 @@ define <4 x i32> @vfptosi_v4f32_v4i32(<4 x float> %va) strictfp { ret <4 x i32> %evec } -declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float>, metadata) define <4 x i32> @vfptoui_v4f32_v4i32(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f32_v4i32: ; CHECK: # %bb.0: @@ -1046,7 +962,6 @@ define <4 x i32> @vfptoui_v4f32_v4i32(<4 x float> %va) strictfp { ret <4 x i32> %evec } -declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x float>, metadata) define <4 x i64> @vfptosi_v4f32_v4i64(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f32_v4i64: ; CHECK: # %bb.0: @@ -1058,7 +973,6 @@ define <4 x i64> @vfptosi_v4f32_v4i64(<4 x float> %va) strictfp { ret <4 x i64> %evec } -declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x float>, metadata) define <4 x i64> @vfptoui_v4f32_v4i64(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f32_v4i64: ; CHECK: # %bb.0: @@ -1070,7 +984,6 @@ define <4 x i64> @vfptoui_v4f32_v4i64(<4 x float> %va) strictfp { ret <4 x i64> %evec } -declare <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f32(<8 x float>, metadata) define <8 x i1> @vfptosi_v8f32_v8i1(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f32_v8i1: ; CHECK: # %bb.0: @@ -1083,7 +996,6 @@ define <8 x i1> @vfptosi_v8f32_v8i1(<8 x float> %va) strictfp { ret <8 x i1> %evec } -declare <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f32(<8 x float>, metadata) define <8 x i1> @vfptoui_v8f32_v8i1(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f32_v8i1: ; CHECK: # %bb.0: @@ -1096,7 +1008,6 @@ define <8 x i1> @vfptoui_v8f32_v8i1(<8 x float> %va) strictfp { ret <8 x i1> %evec } -declare <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f32(<8 x float>, metadata) define <8 x i8> @vfptosi_v8f32_v8i8(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f32_v8i8: ; CHECK: # %bb.0: @@ -1109,7 +1020,6 @@ define <8 x i8> @vfptosi_v8f32_v8i8(<8 x float> %va) strictfp { ret <8 x i8> %evec } -declare <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f32(<8 x float>, metadata) define <8 x i8> @vfptoui_v8f32_v8i8(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f32_v8i8: ; CHECK: # %bb.0: @@ -1122,7 +1032,6 @@ define <8 x i8> @vfptoui_v8f32_v8i8(<8 x float> %va) strictfp { ret <8 x i8> %evec } -declare <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f32(<8 x float>, metadata) define <8 x i16> @vfptosi_v8f32_v8i16(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f32_v8i16: ; CHECK: # %bb.0: @@ -1134,7 +1043,6 @@ define <8 x i16> @vfptosi_v8f32_v8i16(<8 x float> %va) strictfp { ret <8 x i16> %evec } -declare <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f32(<8 x float>, metadata) define <8 x i16> @vfptoui_v8f32_v8i16(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f32_v8i16: ; CHECK: # %bb.0: @@ -1146,7 +1054,6 @@ define <8 x i16> @vfptoui_v8f32_v8i16(<8 x float> %va) strictfp { ret <8 x i16> %evec } -declare <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f32(<8 x float>, metadata) define <8 x i32> @vfptosi_v8f32_v8i32(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f32_v8i32: ; CHECK: # %bb.0: @@ -1157,7 +1064,6 @@ define <8 x i32> @vfptosi_v8f32_v8i32(<8 x float> %va) strictfp { ret <8 x i32> %evec } -declare <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f32(<8 x float>, metadata) define <8 x i32> @vfptoui_v8f32_v8i32(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f32_v8i32: ; CHECK: # %bb.0: @@ -1168,7 +1074,6 @@ define <8 x i32> @vfptoui_v8f32_v8i32(<8 x float> %va) strictfp { ret <8 x i32> %evec } -declare <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f32(<8 x float>, metadata) define <8 x i64> @vfptosi_v8f32_v8i64(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f32_v8i64: ; CHECK: # %bb.0: @@ -1180,7 +1085,6 @@ define <8 x i64> @vfptosi_v8f32_v8i64(<8 x float> %va) strictfp { ret <8 x i64> %evec } -declare <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f32(<8 x float>, metadata) define <8 x i64> @vfptoui_v8f32_v8i64(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f32_v8i64: ; CHECK: # %bb.0: @@ -1192,7 +1096,6 @@ define <8 x i64> @vfptoui_v8f32_v8i64(<8 x float> %va) strictfp { ret <8 x i64> %evec } -declare <16 x i1> @llvm.experimental.constrained.fptosi.v16i1.v16f32(<16 x float>, metadata) define <16 x i1> @vfptosi_v16f32_v16i1(<16 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v16f32_v16i1: ; CHECK: # %bb.0: @@ -1205,7 +1108,6 @@ define <16 x i1> @vfptosi_v16f32_v16i1(<16 x float> %va) strictfp { ret <16 x i1> %evec } -declare <16 x i1> @llvm.experimental.constrained.fptoui.v16i1.v16f32(<16 x float>, metadata) define <16 x i1> @vfptoui_v16f32_v16i1(<16 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v16f32_v16i1: ; CHECK: # %bb.0: @@ -1218,7 +1120,6 @@ define <16 x i1> @vfptoui_v16f32_v16i1(<16 x float> %va) strictfp { ret <16 x i1> %evec } -declare <16 x i8> @llvm.experimental.constrained.fptosi.v16i8.v16f32(<16 x float>, metadata) define <16 x i8> @vfptosi_v16f32_v16i8(<16 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v16f32_v16i8: ; CHECK: # %bb.0: @@ -1231,7 +1132,6 @@ define <16 x i8> @vfptosi_v16f32_v16i8(<16 x float> %va) strictfp { ret <16 x i8> %evec } -declare <16 x i8> @llvm.experimental.constrained.fptoui.v16i8.v16f32(<16 x float>, metadata) define <16 x i8> @vfptoui_v16f32_v16i8(<16 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v16f32_v16i8: ; CHECK: # %bb.0: @@ -1244,7 +1144,6 @@ define <16 x i8> @vfptoui_v16f32_v16i8(<16 x float> %va) strictfp { ret <16 x i8> %evec } -declare <16 x i16> @llvm.experimental.constrained.fptosi.v16i16.v16f32(<16 x float>, metadata) define <16 x i16> @vfptosi_v16f32_v16i16(<16 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v16f32_v16i16: ; CHECK: # %bb.0: @@ -1256,7 +1155,6 @@ define <16 x i16> @vfptosi_v16f32_v16i16(<16 x float> %va) strictfp { ret <16 x i16> %evec } -declare <16 x i16> @llvm.experimental.constrained.fptoui.v16i16.v16f32(<16 x float>, metadata) define <16 x i16> @vfptoui_v16f32_v16i16(<16 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v16f32_v16i16: ; CHECK: # %bb.0: @@ -1268,7 +1166,6 @@ define <16 x i16> @vfptoui_v16f32_v16i16(<16 x float> %va) strictfp { ret <16 x i16> %evec } -declare <16 x i32> @llvm.experimental.constrained.fptosi.v16i32.v16f32(<16 x float>, metadata) define <16 x i32> @vfptosi_v16f32_v16i32(<16 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v16f32_v16i32: ; CHECK: # %bb.0: @@ -1279,7 +1176,6 @@ define <16 x i32> @vfptosi_v16f32_v16i32(<16 x float> %va) strictfp { ret <16 x i32> %evec } -declare <16 x i32> @llvm.experimental.constrained.fptoui.v16i32.v16f32(<16 x float>, metadata) define <16 x i32> @vfptoui_v16f32_v16i32(<16 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v16f32_v16i32: ; CHECK: # %bb.0: @@ -1290,7 +1186,6 @@ define <16 x i32> @vfptoui_v16f32_v16i32(<16 x float> %va) strictfp { ret <16 x i32> %evec } -declare <1 x i1> @llvm.experimental.constrained.fptosi.v1i1.v1f64(<1 x double>, metadata) define <1 x i1> @vfptosi_v1f64_v1i1(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f64_v1i1: ; CHECK: # %bb.0: @@ -1303,7 +1198,6 @@ define <1 x i1> @vfptosi_v1f64_v1i1(<1 x double> %va) strictfp { ret <1 x i1> %evec } -declare <1 x i1> @llvm.experimental.constrained.fptoui.v1i1.v1f64(<1 x double>, metadata) define <1 x i1> @vfptoui_v1f64_v1i1(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f64_v1i1: ; CHECK: # %bb.0: @@ -1316,7 +1210,6 @@ define <1 x i1> @vfptoui_v1f64_v1i1(<1 x double> %va) strictfp { ret <1 x i1> %evec } -declare <1 x i8> @llvm.experimental.constrained.fptosi.v1i8.v1f64(<1 x double>, metadata) define <1 x i8> @vfptosi_v1f64_v1i8(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f64_v1i8: ; CHECK: # %bb.0: @@ -1331,7 +1224,6 @@ define <1 x i8> @vfptosi_v1f64_v1i8(<1 x double> %va) strictfp { ret <1 x i8> %evec } -declare <1 x i8> @llvm.experimental.constrained.fptoui.v1i8.v1f64(<1 x double>, metadata) define <1 x i8> @vfptoui_v1f64_v1i8(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f64_v1i8: ; CHECK: # %bb.0: @@ -1346,7 +1238,6 @@ define <1 x i8> @vfptoui_v1f64_v1i8(<1 x double> %va) strictfp { ret <1 x i8> %evec } -declare <1 x i16> @llvm.experimental.constrained.fptosi.v1i16.v1f64(<1 x double>, metadata) define <1 x i16> @vfptosi_v1f64_v1i16(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f64_v1i16: ; CHECK: # %bb.0: @@ -1359,7 +1250,6 @@ define <1 x i16> @vfptosi_v1f64_v1i16(<1 x double> %va) strictfp { ret <1 x i16> %evec } -declare <1 x i16> @llvm.experimental.constrained.fptoui.v1i16.v1f64(<1 x double>, metadata) define <1 x i16> @vfptoui_v1f64_v1i16(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f64_v1i16: ; CHECK: # %bb.0: @@ -1372,7 +1262,6 @@ define <1 x i16> @vfptoui_v1f64_v1i16(<1 x double> %va) strictfp { ret <1 x i16> %evec } -declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(<1 x double>, metadata) define <1 x i32> @vfptosi_v1f64_v1i32(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f64_v1i32: ; CHECK: # %bb.0: @@ -1384,7 +1273,6 @@ define <1 x i32> @vfptosi_v1f64_v1i32(<1 x double> %va) strictfp { ret <1 x i32> %evec } -declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(<1 x double>, metadata) define <1 x i32> @vfptoui_v1f64_v1i32(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f64_v1i32: ; CHECK: # %bb.0: @@ -1396,7 +1284,6 @@ define <1 x i32> @vfptoui_v1f64_v1i32(<1 x double> %va) strictfp { ret <1 x i32> %evec } -declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double>, metadata) define <1 x i64> @vfptosi_v1f64_v1i64(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f64_v1i64: ; CHECK: # %bb.0: @@ -1407,7 +1294,6 @@ define <1 x i64> @vfptosi_v1f64_v1i64(<1 x double> %va) strictfp { ret <1 x i64> %evec } -declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double>, metadata) define <1 x i64> @vfptoui_v1f64_v1i64(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f64_v1i64: ; CHECK: # %bb.0: @@ -1418,7 +1304,6 @@ define <1 x i64> @vfptoui_v1f64_v1i64(<1 x double> %va) strictfp { ret <1 x i64> %evec } -declare <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f64(<2 x double>, metadata) define <2 x i1> @vfptosi_v2f64_v2i1(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f64_v2i1: ; CHECK: # %bb.0: @@ -1431,7 +1316,6 @@ define <2 x i1> @vfptosi_v2f64_v2i1(<2 x double> %va) strictfp { ret <2 x i1> %evec } -declare <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f64(<2 x double>, metadata) define <2 x i1> @vfptoui_v2f64_v2i1(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f64_v2i1: ; CHECK: # %bb.0: @@ -1444,7 +1328,6 @@ define <2 x i1> @vfptoui_v2f64_v2i1(<2 x double> %va) strictfp { ret <2 x i1> %evec } -declare <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f64(<2 x double>, metadata) define <2 x i8> @vfptosi_v2f64_v2i8(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f64_v2i8: ; CHECK: # %bb.0: @@ -1459,7 +1342,6 @@ define <2 x i8> @vfptosi_v2f64_v2i8(<2 x double> %va) strictfp { ret <2 x i8> %evec } -declare <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f64(<2 x double>, metadata) define <2 x i8> @vfptoui_v2f64_v2i8(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f64_v2i8: ; CHECK: # %bb.0: @@ -1474,7 +1356,6 @@ define <2 x i8> @vfptoui_v2f64_v2i8(<2 x double> %va) strictfp { ret <2 x i8> %evec } -declare <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f64(<2 x double>, metadata) define <2 x i16> @vfptosi_v2f64_v2i16(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f64_v2i16: ; CHECK: # %bb.0: @@ -1487,7 +1368,6 @@ define <2 x i16> @vfptosi_v2f64_v2i16(<2 x double> %va) strictfp { ret <2 x i16> %evec } -declare <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f64(<2 x double>, metadata) define <2 x i16> @vfptoui_v2f64_v2i16(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f64_v2i16: ; CHECK: # %bb.0: @@ -1500,7 +1380,6 @@ define <2 x i16> @vfptoui_v2f64_v2i16(<2 x double> %va) strictfp { ret <2 x i16> %evec } -declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double>, metadata) define <2 x i32> @vfptosi_v2f64_v2i32(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f64_v2i32: ; CHECK: # %bb.0: @@ -1512,7 +1391,6 @@ define <2 x i32> @vfptosi_v2f64_v2i32(<2 x double> %va) strictfp { ret <2 x i32> %evec } -declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double>, metadata) define <2 x i32> @vfptoui_v2f64_v2i32(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f64_v2i32: ; CHECK: # %bb.0: @@ -1524,7 +1402,6 @@ define <2 x i32> @vfptoui_v2f64_v2i32(<2 x double> %va) strictfp { ret <2 x i32> %evec } -declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double>, metadata) define <2 x i64> @vfptosi_v2f64_v2i64(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f64_v2i64: ; CHECK: # %bb.0: @@ -1535,7 +1412,6 @@ define <2 x i64> @vfptosi_v2f64_v2i64(<2 x double> %va) strictfp { ret <2 x i64> %evec } -declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double>, metadata) define <2 x i64> @vfptoui_v2f64_v2i64(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f64_v2i64: ; CHECK: # %bb.0: @@ -1546,7 +1422,6 @@ define <2 x i64> @vfptoui_v2f64_v2i64(<2 x double> %va) strictfp { ret <2 x i64> %evec } -declare <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f64(<4 x double>, metadata) define <4 x i1> @vfptosi_v4f64_v4i1(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f64_v4i1: ; CHECK: # %bb.0: @@ -1559,7 +1434,6 @@ define <4 x i1> @vfptosi_v4f64_v4i1(<4 x double> %va) strictfp { ret <4 x i1> %evec } -declare <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f64(<4 x double>, metadata) define <4 x i1> @vfptoui_v4f64_v4i1(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f64_v4i1: ; CHECK: # %bb.0: @@ -1572,7 +1446,6 @@ define <4 x i1> @vfptoui_v4f64_v4i1(<4 x double> %va) strictfp { ret <4 x i1> %evec } -declare <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f64(<4 x double>, metadata) define <4 x i8> @vfptosi_v4f64_v4i8(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f64_v4i8: ; CHECK: # %bb.0: @@ -1587,7 +1460,6 @@ define <4 x i8> @vfptosi_v4f64_v4i8(<4 x double> %va) strictfp { ret <4 x i8> %evec } -declare <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f64(<4 x double>, metadata) define <4 x i8> @vfptoui_v4f64_v4i8(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f64_v4i8: ; CHECK: # %bb.0: @@ -1602,7 +1474,6 @@ define <4 x i8> @vfptoui_v4f64_v4i8(<4 x double> %va) strictfp { ret <4 x i8> %evec } -declare <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f64(<4 x double>, metadata) define <4 x i16> @vfptosi_v4f64_v4i16(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f64_v4i16: ; CHECK: # %bb.0: @@ -1615,7 +1486,6 @@ define <4 x i16> @vfptosi_v4f64_v4i16(<4 x double> %va) strictfp { ret <4 x i16> %evec } -declare <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f64(<4 x double>, metadata) define <4 x i16> @vfptoui_v4f64_v4i16(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f64_v4i16: ; CHECK: # %bb.0: @@ -1628,7 +1498,6 @@ define <4 x i16> @vfptoui_v4f64_v4i16(<4 x double> %va) strictfp { ret <4 x i16> %evec } -declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f64(<4 x double>, metadata) define <4 x i32> @vfptosi_v4f64_v4i32(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f64_v4i32: ; CHECK: # %bb.0: @@ -1640,7 +1509,6 @@ define <4 x i32> @vfptosi_v4f64_v4i32(<4 x double> %va) strictfp { ret <4 x i32> %evec } -declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f64(<4 x double>, metadata) define <4 x i32> @vfptoui_v4f64_v4i32(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f64_v4i32: ; CHECK: # %bb.0: @@ -1652,7 +1520,6 @@ define <4 x i32> @vfptoui_v4f64_v4i32(<4 x double> %va) strictfp { ret <4 x i32> %evec } -declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f64(<4 x double>, metadata) define <4 x i64> @vfptosi_v4f64_v4i64(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f64_v4i64: ; CHECK: # %bb.0: @@ -1663,7 +1530,6 @@ define <4 x i64> @vfptosi_v4f64_v4i64(<4 x double> %va) strictfp { ret <4 x i64> %evec } -declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f64(<4 x double>, metadata) define <4 x i64> @vfptoui_v4f64_v4i64(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f64_v4i64: ; CHECK: # %bb.0: @@ -1674,7 +1540,6 @@ define <4 x i64> @vfptoui_v4f64_v4i64(<4 x double> %va) strictfp { ret <4 x i64> %evec } -declare <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f64(<8 x double>, metadata) define <8 x i1> @vfptosi_v8f64_v8i1(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f64_v8i1: ; CHECK: # %bb.0: @@ -1687,7 +1552,6 @@ define <8 x i1> @vfptosi_v8f64_v8i1(<8 x double> %va) strictfp { ret <8 x i1> %evec } -declare <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f64(<8 x double>, metadata) define <8 x i1> @vfptoui_v8f64_v8i1(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f64_v8i1: ; CHECK: # %bb.0: @@ -1700,7 +1564,6 @@ define <8 x i1> @vfptoui_v8f64_v8i1(<8 x double> %va) strictfp { ret <8 x i1> %evec } -declare <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f64(<8 x double>, metadata) define <8 x i8> @vfptosi_v8f64_v8i8(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f64_v8i8: ; CHECK: # %bb.0: @@ -1715,7 +1578,6 @@ define <8 x i8> @vfptosi_v8f64_v8i8(<8 x double> %va) strictfp { ret <8 x i8> %evec } -declare <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f64(<8 x double>, metadata) define <8 x i8> @vfptoui_v8f64_v8i8(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f64_v8i8: ; CHECK: # %bb.0: @@ -1730,7 +1592,6 @@ define <8 x i8> @vfptoui_v8f64_v8i8(<8 x double> %va) strictfp { ret <8 x i8> %evec } -declare <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f64(<8 x double>, metadata) define <8 x i16> @vfptosi_v8f64_v8i16(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f64_v8i16: ; CHECK: # %bb.0: @@ -1743,7 +1604,6 @@ define <8 x i16> @vfptosi_v8f64_v8i16(<8 x double> %va) strictfp { ret <8 x i16> %evec } -declare <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f64(<8 x double>, metadata) define <8 x i16> @vfptoui_v8f64_v8i16(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f64_v8i16: ; CHECK: # %bb.0: @@ -1756,7 +1616,6 @@ define <8 x i16> @vfptoui_v8f64_v8i16(<8 x double> %va) strictfp { ret <8 x i16> %evec } -declare <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f64(<8 x double>, metadata) define <8 x i32> @vfptosi_v8f64_v8i32(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f64_v8i32: ; CHECK: # %bb.0: @@ -1768,7 +1627,6 @@ define <8 x i32> @vfptosi_v8f64_v8i32(<8 x double> %va) strictfp { ret <8 x i32> %evec } -declare <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f64(<8 x double>, metadata) define <8 x i32> @vfptoui_v8f64_v8i32(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f64_v8i32: ; CHECK: # %bb.0: @@ -1780,7 +1638,6 @@ define <8 x i32> @vfptoui_v8f64_v8i32(<8 x double> %va) strictfp { ret <8 x i32> %evec } -declare <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f64(<8 x double>, metadata) define <8 x i64> @vfptosi_v8f64_v8i64(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f64_v8i64: ; CHECK: # %bb.0: @@ -1791,7 +1648,6 @@ define <8 x i64> @vfptosi_v8f64_v8i64(<8 x double> %va) strictfp { ret <8 x i64> %evec } -declare <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f64(<8 x double>, metadata) define <8 x i64> @vfptoui_v8f64_v8i64(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f64_v8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptrunc-constrained-sdnode.ll index 1f74691437ad2..1aa8d5509d191 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptrunc-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptrunc-constrained-sdnode.ll @@ -8,7 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+zvfbfmin -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double>, metadata, metadata) define <2 x float> @vfptrunc_v2f64_v2f32(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v2f64_v2f32: ; CHECK: # %bb.0: @@ -20,7 +19,6 @@ define <2 x float> @vfptrunc_v2f64_v2f32(<2 x double> %va) strictfp { ret <2 x float> %evec } -declare <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f64(<2 x double>, metadata, metadata) define <2 x half> @vfptrunc_v2f64_v2f16(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v2f64_v2f16: ; CHECK: # %bb.0: @@ -33,7 +31,6 @@ define <2 x half> @vfptrunc_v2f64_v2f16(<2 x double> %va) strictfp { ret <2 x half> %evec } -declare <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f32(<2 x float>, metadata, metadata) define <2 x half> @vfptrunc_v2f32_v2f16(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptrunc_v2f32_v2f16: ; CHECK: # %bb.0: @@ -45,7 +42,6 @@ define <2 x half> @vfptrunc_v2f32_v2f16(<2 x float> %va) strictfp { ret <2 x half> %evec } -declare <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(<4 x double>, metadata, metadata) define <4 x float> @vfptrunc_v4f64_v4f32(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v4f64_v4f32: ; CHECK: # %bb.0: @@ -57,7 +53,6 @@ define <4 x float> @vfptrunc_v4f64_v4f32(<4 x double> %va) strictfp { ret <4 x float> %evec } -declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double>, metadata, metadata) define <4 x half> @vfptrunc_v4f64_v4f16(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v4f64_v4f16: ; CHECK: # %bb.0: @@ -70,7 +65,6 @@ define <4 x half> @vfptrunc_v4f64_v4f16(<4 x double> %va) strictfp { ret <4 x half> %evec } -declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f32(<4 x float>, metadata, metadata) define <4 x half> @vfptrunc_v4f32_v4f16(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptrunc_v4f32_v4f16: ; CHECK: # %bb.0: @@ -82,7 +76,6 @@ define <4 x half> @vfptrunc_v4f32_v4f16(<4 x float> %va) strictfp { ret <4 x half> %evec } -declare <8 x float> @llvm.experimental.constrained.fptrunc.v8f32.v8f64(<8 x double>, metadata, metadata) define <8 x float> @vfptrunc_v8f64_v8f32(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v8f64_v8f32: ; CHECK: # %bb.0: @@ -94,7 +87,6 @@ define <8 x float> @vfptrunc_v8f64_v8f32(<8 x double> %va) strictfp { ret <8 x float> %evec } -declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f64(<8 x double>, metadata, metadata) define <8 x half> @vfptrunc_v8f64_v8f16(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v8f64_v8f16: ; CHECK: # %bb.0: @@ -107,7 +99,6 @@ define <8 x half> @vfptrunc_v8f64_v8f16(<8 x double> %va) strictfp { ret <8 x half> %evec } -declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f32(<8 x float>, metadata, metadata) define <8 x half> @vfptrunc_v8f32_v8f16(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptrunc_v8f32_v8f16: ; CHECK: # %bb.0: @@ -119,7 +110,6 @@ define <8 x half> @vfptrunc_v8f32_v8f16(<8 x float> %va) strictfp { ret <8 x half> %evec } -declare <2 x bfloat> @llvm.experimental.constrained.fptrunc.v2bf16.v2f64(<2 x double>, metadata, metadata) define <2 x bfloat> @vfptrunc_v2f64_v2bf16(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v2f64_v2bf16: ; CHECK: # %bb.0: @@ -132,7 +122,6 @@ define <2 x bfloat> @vfptrunc_v2f64_v2bf16(<2 x double> %va) strictfp { ret <2 x bfloat> %evec } -declare <2 x bfloat> @llvm.experimental.constrained.fptrunc.v2bf16.v2f32(<2 x float>, metadata, metadata) define <2 x bfloat> @vfptrunc_v2f32_v2bf16(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptrunc_v2f32_v2bf16: ; CHECK: # %bb.0: @@ -144,7 +133,6 @@ define <2 x bfloat> @vfptrunc_v2f32_v2bf16(<2 x float> %va) strictfp { ret <2 x bfloat> %evec } -declare <4 x bfloat> @llvm.experimental.constrained.fptrunc.v4bf16.v4f64(<4 x double>, metadata, metadata) define <4 x bfloat> @vfptrunc_v4f64_v4bf16(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v4f64_v4bf16: ; CHECK: # %bb.0: @@ -157,7 +145,6 @@ define <4 x bfloat> @vfptrunc_v4f64_v4bf16(<4 x double> %va) strictfp { ret <4 x bfloat> %evec } -declare <4 x bfloat> @llvm.experimental.constrained.fptrunc.v4bf16.v4f32(<4 x float>, metadata, metadata) define <4 x bfloat> @vfptrunc_v4f32_v4bf16(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptrunc_v4f32_v4bf16: ; CHECK: # %bb.0: @@ -169,7 +156,6 @@ define <4 x bfloat> @vfptrunc_v4f32_v4bf16(<4 x float> %va) strictfp { ret <4 x bfloat> %evec } -declare <8 x bfloat> @llvm.experimental.constrained.fptrunc.v8bf16.v8f64(<8 x double>, metadata, metadata) define <8 x bfloat> @vfptrunc_v8f64_v8bf16(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v8f64_v8bf16: ; CHECK: # %bb.0: @@ -182,7 +168,6 @@ define <8 x bfloat> @vfptrunc_v8f64_v8bf16(<8 x double> %va) strictfp { ret <8 x bfloat> %evec } -declare <8 x bfloat> @llvm.experimental.constrained.fptrunc.v8bf16.v8f32(<8 x float>, metadata, metadata) define <8 x bfloat> @vfptrunc_v8f32_v8bf16(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptrunc_v8f32_v8bf16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll index fb813d4381a7d..52e09f6f10a87 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.vp.fdiv.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfrdiv_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v2f16: ; CHECK: # %bb.0: @@ -30,8 +28,6 @@ define <2 x half> @vfrdiv_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext ret <2 x half> %v } -declare <4 x half> @llvm.vp.fdiv.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfrdiv_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v4f16: ; CHECK: # %bb.0: @@ -56,8 +52,6 @@ define <4 x half> @vfrdiv_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext ret <4 x half> %v } -declare <8 x half> @llvm.vp.fdiv.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfrdiv_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v8f16: ; CHECK: # %bb.0: @@ -82,8 +76,6 @@ define <8 x half> @vfrdiv_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext ret <8 x half> %v } -declare <16 x half> @llvm.vp.fdiv.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfrdiv_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v16f16: ; CHECK: # %bb.0: @@ -108,8 +100,6 @@ define <16 x half> @vfrdiv_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zero ret <16 x half> %v } -declare <2 x float> @llvm.vp.fdiv.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfrdiv_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v2f32: ; CHECK: # %bb.0: @@ -134,8 +124,6 @@ define <2 x float> @vfrdiv_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zero ret <2 x float> %v } -declare <4 x float> @llvm.vp.fdiv.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfrdiv_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v4f32: ; CHECK: # %bb.0: @@ -160,8 +148,6 @@ define <4 x float> @vfrdiv_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zero ret <4 x float> %v } -declare <8 x float> @llvm.vp.fdiv.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfrdiv_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v8f32: ; CHECK: # %bb.0: @@ -186,8 +172,6 @@ define <8 x float> @vfrdiv_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zero ret <8 x float> %v } -declare <16 x float> @llvm.vp.fdiv.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfrdiv_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v16f32: ; CHECK: # %bb.0: @@ -212,8 +196,6 @@ define <16 x float> @vfrdiv_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 z ret <16 x float> %v } -declare <2 x double> @llvm.vp.fdiv.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfrdiv_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v2f64: ; CHECK: # %bb.0: @@ -238,8 +220,6 @@ define <2 x double> @vfrdiv_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 z ret <2 x double> %v } -declare <4 x double> @llvm.vp.fdiv.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfrdiv_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v4f64: ; CHECK: # %bb.0: @@ -264,8 +244,6 @@ define <4 x double> @vfrdiv_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 z ret <4 x double> %v } -declare <8 x double> @llvm.vp.fdiv.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfrdiv_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v8f64: ; CHECK: # %bb.0: @@ -290,8 +268,6 @@ define <8 x double> @vfrdiv_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 z ret <8 x double> %v } -declare <16 x double> @llvm.vp.fdiv.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfrdiv_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll index 63c2d1f2e7db3..30b840a2f6b4e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.vp.fsub.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfrsub_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v2f16: ; CHECK: # %bb.0: @@ -30,8 +28,6 @@ define <2 x half> @vfrsub_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext ret <2 x half> %v } -declare <4 x half> @llvm.vp.fsub.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfrsub_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v4f16: ; CHECK: # %bb.0: @@ -56,8 +52,6 @@ define <4 x half> @vfrsub_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext ret <4 x half> %v } -declare <8 x half> @llvm.vp.fsub.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfrsub_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v8f16: ; CHECK: # %bb.0: @@ -82,8 +76,6 @@ define <8 x half> @vfrsub_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext ret <8 x half> %v } -declare <16 x half> @llvm.vp.fsub.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfrsub_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v16f16: ; CHECK: # %bb.0: @@ -108,8 +100,6 @@ define <16 x half> @vfrsub_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zero ret <16 x half> %v } -declare <2 x float> @llvm.vp.fsub.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfrsub_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v2f32: ; CHECK: # %bb.0: @@ -134,8 +124,6 @@ define <2 x float> @vfrsub_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zero ret <2 x float> %v } -declare <4 x float> @llvm.vp.fsub.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfrsub_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v4f32: ; CHECK: # %bb.0: @@ -160,8 +148,6 @@ define <4 x float> @vfrsub_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zero ret <4 x float> %v } -declare <8 x float> @llvm.vp.fsub.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfrsub_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v8f32: ; CHECK: # %bb.0: @@ -186,8 +172,6 @@ define <8 x float> @vfrsub_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zero ret <8 x float> %v } -declare <16 x float> @llvm.vp.fsub.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfrsub_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v16f32: ; CHECK: # %bb.0: @@ -212,8 +196,6 @@ define <16 x float> @vfrsub_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 z ret <16 x float> %v } -declare <2 x double> @llvm.vp.fsub.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfrsub_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v2f64: ; CHECK: # %bb.0: @@ -238,8 +220,6 @@ define <2 x double> @vfrsub_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 z ret <2 x double> %v } -declare <4 x double> @llvm.vp.fsub.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfrsub_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v4f64: ; CHECK: # %bb.0: @@ -264,8 +244,6 @@ define <4 x double> @vfrsub_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 z ret <4 x double> %v } -declare <8 x double> @llvm.vp.fsub.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfrsub_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v8f64: ; CHECK: # %bb.0: @@ -290,8 +268,6 @@ define <8 x double> @vfrsub_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 z ret <8 x double> %v } -declare <16 x double> @llvm.vp.fsub.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfrsub_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll index 62d03e1ab588a..bdc061ef1732b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.experimental.constrained.sqrt.v2f16(<2 x half>, metadata, metadata) - define <2 x half> @vfsqrt_v2f16(<2 x half> %v) strictfp { ; CHECK-LABEL: vfsqrt_v2f16: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <2 x half> @vfsqrt_v2f16(<2 x half> %v) strictfp { ret <2 x half> %r } -declare <4 x half> @llvm.experimental.constrained.sqrt.v4f16(<4 x half>, metadata, metadata) - define <4 x half> @vfsqrt_v4f16(<4 x half> %v) strictfp { ; CHECK-LABEL: vfsqrt_v4f16: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define <4 x half> @vfsqrt_v4f16(<4 x half> %v) strictfp { ret <4 x half> %r } -declare <8 x half> @llvm.experimental.constrained.sqrt.v8f16(<8 x half>, metadata, metadata) - define <8 x half> @vfsqrt_v8f16(<8 x half> %v) strictfp { ; CHECK-LABEL: vfsqrt_v8f16: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define <8 x half> @vfsqrt_v8f16(<8 x half> %v) strictfp { ret <8 x half> %r } -declare <16 x half> @llvm.experimental.constrained.sqrt.v16f16(<16 x half>, metadata, metadata) - define <16 x half> @vfsqrt_v16f16(<16 x half> %v) strictfp { ; CHECK-LABEL: vfsqrt_v16f16: ; CHECK: # %bb.0: @@ -52,8 +44,6 @@ define <16 x half> @vfsqrt_v16f16(<16 x half> %v) strictfp { ret <16 x half> %r } -declare <32 x half> @llvm.experimental.constrained.sqrt.v32f16(<32 x half>, metadata, metadata) - define <32 x half> @vfsqrt_v32f16(<32 x half> %v) strictfp { ; CHECK-LABEL: vfsqrt_v32f16: ; CHECK: # %bb.0: @@ -65,8 +55,6 @@ define <32 x half> @vfsqrt_v32f16(<32 x half> %v) strictfp { ret <32 x half> %r } -declare <2 x float> @llvm.experimental.constrained.sqrt.v2f32(<2 x float>, metadata, metadata) - define <2 x float> @vfsqrt_v2f32(<2 x float> %v) strictfp { ; CHECK-LABEL: vfsqrt_v2f32: ; CHECK: # %bb.0: @@ -77,8 +65,6 @@ define <2 x float> @vfsqrt_v2f32(<2 x float> %v) strictfp { ret <2 x float> %r } -declare <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float>, metadata, metadata) - define <4 x float> @vfsqrt_v4f32(<4 x float> %v) strictfp { ; CHECK-LABEL: vfsqrt_v4f32: ; CHECK: # %bb.0: @@ -89,8 +75,6 @@ define <4 x float> @vfsqrt_v4f32(<4 x float> %v) strictfp { ret <4 x float> %r } -declare <8 x float> @llvm.experimental.constrained.sqrt.v8f32(<8 x float>, metadata, metadata) - define <8 x float> @vfsqrt_v8f32(<8 x float> %v) strictfp { ; CHECK-LABEL: vfsqrt_v8f32: ; CHECK: # %bb.0: @@ -101,8 +85,6 @@ define <8 x float> @vfsqrt_v8f32(<8 x float> %v) strictfp { ret <8 x float> %r } -declare <16 x float> @llvm.experimental.constrained.sqrt.v16f32(<16 x float>, metadata, metadata) - define <16 x float> @vfsqrt_v16f32(<16 x float> %v) strictfp { ; CHECK-LABEL: vfsqrt_v16f32: ; CHECK: # %bb.0: @@ -113,8 +95,6 @@ define <16 x float> @vfsqrt_v16f32(<16 x float> %v) strictfp { ret <16 x float> %r } -declare <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double>, metadata, metadata) - define <2 x double> @vfsqrt_v2f64(<2 x double> %v) strictfp { ; CHECK-LABEL: vfsqrt_v2f64: ; CHECK: # %bb.0: @@ -125,8 +105,6 @@ define <2 x double> @vfsqrt_v2f64(<2 x double> %v) strictfp { ret <2 x double> %r } -declare <4 x double> @llvm.experimental.constrained.sqrt.v4f64(<4 x double>, metadata, metadata) - define <4 x double> @vfsqrt_v4f64(<4 x double> %v) strictfp { ; CHECK-LABEL: vfsqrt_v4f64: ; CHECK: # %bb.0: @@ -137,8 +115,6 @@ define <4 x double> @vfsqrt_v4f64(<4 x double> %v) strictfp { ret <4 x double> %r } -declare <8 x double> @llvm.experimental.constrained.sqrt.v8f64(<8 x double>, metadata, metadata) - define <8 x double> @vfsqrt_v8f64(<8 x double> %v) strictfp { ; CHECK-LABEL: vfsqrt_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll index 6244419de65b1..b431d4873fa1b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.sqrt.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vfsqrt_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_v2f16: ; ZVFH: # %bb.0: @@ -50,8 +48,6 @@ define <2 x half> @vfsqrt_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ret <2 x half> %v } -declare <4 x half> @llvm.vp.sqrt.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vfsqrt_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_v4f16: ; ZVFH: # %bb.0: @@ -92,8 +88,6 @@ define <4 x half> @vfsqrt_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <8 x half> @llvm.vp.sqrt.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vfsqrt_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_v8f16: ; ZVFH: # %bb.0: @@ -134,8 +128,6 @@ define <8 x half> @vfsqrt_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ret <8 x half> %v } -declare <16 x half> @llvm.vp.sqrt.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vfsqrt_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_v16f16: ; ZVFH: # %bb.0: @@ -176,8 +168,6 @@ define <16 x half> @vfsqrt_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) ret <16 x half> %v } -declare <2 x float> @llvm.vp.sqrt.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vfsqrt_vv_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v2f32: ; CHECK: # %bb.0: @@ -198,8 +188,6 @@ define <2 x float> @vfsqrt_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) ret <2 x float> %v } -declare <4 x float> @llvm.vp.sqrt.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vfsqrt_vv_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v4f32: ; CHECK: # %bb.0: @@ -220,8 +208,6 @@ define <4 x float> @vfsqrt_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) ret <4 x float> %v } -declare <8 x float> @llvm.vp.sqrt.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vfsqrt_vv_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v8f32: ; CHECK: # %bb.0: @@ -242,8 +228,6 @@ define <8 x float> @vfsqrt_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) ret <8 x float> %v } -declare <16 x float> @llvm.vp.sqrt.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vfsqrt_vv_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v16f32: ; CHECK: # %bb.0: @@ -264,8 +248,6 @@ define <16 x float> @vfsqrt_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %ev ret <16 x float> %v } -declare <2 x double> @llvm.vp.sqrt.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vfsqrt_vv_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v2f64: ; CHECK: # %bb.0: @@ -286,8 +268,6 @@ define <2 x double> @vfsqrt_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl ret <2 x double> %v } -declare <4 x double> @llvm.vp.sqrt.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vfsqrt_vv_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v4f64: ; CHECK: # %bb.0: @@ -308,8 +288,6 @@ define <4 x double> @vfsqrt_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl ret <4 x double> %v } -declare <8 x double> @llvm.vp.sqrt.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vfsqrt_vv_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v8f64: ; CHECK: # %bb.0: @@ -330,8 +308,6 @@ define <8 x double> @vfsqrt_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl ret <8 x double> %v } -declare <15 x double> @llvm.vp.sqrt.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vfsqrt_vv_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v15f64: ; CHECK: # %bb.0: @@ -352,8 +328,6 @@ define <15 x double> @vfsqrt_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext % ret <15 x double> %v } -declare <16 x double> @llvm.vp.sqrt.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vfsqrt_vv_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v16f64: ; CHECK: # %bb.0: @@ -374,8 +348,6 @@ define <16 x double> @vfsqrt_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext % ret <16 x double> %v } -declare <32 x double> @llvm.vp.sqrt.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vfsqrt_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll index e6001352a237b..32a0d2407c955 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.experimental.constrained.fsub.v2f16(<2 x half>, <2 x half>, metadata, metadata) define <2 x half> @vfsub_vv_v2f16(<2 x half> %va, <2 x half> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v2f16: ; CHECK: # %bb.0: # %entry @@ -28,7 +27,6 @@ define <2 x half> @vfsub_vf_v2f16(<2 x half> %va, half %b) strictfp { ret <2 x half> %vc } -declare <4 x half> @llvm.experimental.constrained.fsub.v4f16(<4 x half>, <4 x half>, metadata, metadata) define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v4f16: ; CHECK: # %bb.0: # %entry @@ -52,7 +50,6 @@ define <4 x half> @vfsub_vf_v4f16(<4 x half> %va, half %b) strictfp { ret <4 x half> %vc } -declare <8 x half> @llvm.experimental.constrained.fsub.v8f16(<8 x half>, <8 x half>, metadata, metadata) define <8 x half> @vfsub_vv_v8f16(<8 x half> %va, <8 x half> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v8f16: ; CHECK: # %bb.0: # %entry @@ -88,7 +85,6 @@ define <8 x half> @vfsub_fv_v8f16(<8 x half> %va, half %b) strictfp { ret <8 x half> %vc } -declare <16 x half> @llvm.experimental.constrained.fsub.v16f16(<16 x half>, <16 x half>, metadata, metadata) define <16 x half> @vfsub_vv_v16f16(<16 x half> %va, <16 x half> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v16f16: ; CHECK: # %bb.0: # %entry @@ -112,7 +108,6 @@ define <16 x half> @vfsub_vf_v16f16(<16 x half> %va, half %b) strictfp { ret <16 x half> %vc } -declare <32 x half> @llvm.experimental.constrained.fsub.v32f16(<32 x half>, <32 x half>, metadata, metadata) define <32 x half> @vfsub_vv_v32f16(<32 x half> %va, <32 x half> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v32f16: ; CHECK: # %bb.0: # %entry @@ -138,7 +133,6 @@ define <32 x half> @vfsub_vf_v32f16(<32 x half> %va, half %b) strictfp { ret <32 x half> %vc } -declare <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float>, <2 x float>, metadata, metadata) define <2 x float> @vfsub_vv_v2f32(<2 x float> %va, <2 x float> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v2f32: ; CHECK: # %bb.0: # %entry @@ -162,7 +156,6 @@ define <2 x float> @vfsub_vf_v2f32(<2 x float> %va, float %b) strictfp { ret <2 x float> %vc } -declare <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float>, <4 x float>, metadata, metadata) define <4 x float> @vfsub_vv_v4f32(<4 x float> %va, <4 x float> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v4f32: ; CHECK: # %bb.0: # %entry @@ -186,7 +179,6 @@ define <4 x float> @vfsub_vf_v4f32(<4 x float> %va, float %b) strictfp { ret <4 x float> %vc } -declare <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float>, <8 x float>, metadata, metadata) define <8 x float> @vfsub_vv_v8f32(<8 x float> %va, <8 x float> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v8f32: ; CHECK: # %bb.0: # %entry @@ -222,7 +214,6 @@ define <8 x float> @vfsub_fv_v8f32(<8 x float> %va, float %b) strictfp { ret <8 x float> %vc } -declare <16 x float> @llvm.experimental.constrained.fsub.v16f32(<16 x float>, <16 x float>, metadata, metadata) define <16 x float> @vfsub_vv_v16f32(<16 x float> %va, <16 x float> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v16f32: ; CHECK: # %bb.0: # %entry @@ -246,7 +237,6 @@ define <16 x float> @vfsub_vf_v16f32(<16 x float> %va, float %b) strictfp { ret <16 x float> %vc } -declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata) define <2 x double> @vfsub_vv_v2f64(<2 x double> %va, <2 x double> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v2f64: ; CHECK: # %bb.0: # %entry @@ -270,7 +260,6 @@ define <2 x double> @vfsub_vf_v2f64(<2 x double> %va, double %b) strictfp { ret <2 x double> %vc } -declare <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double>, <4 x double>, metadata, metadata) define <4 x double> @vfsub_vv_v4f64(<4 x double> %va, <4 x double> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v4f64: ; CHECK: # %bb.0: # %entry @@ -294,7 +283,6 @@ define <4 x double> @vfsub_vf_v4f64(<4 x double> %va, double %b) strictfp { ret <4 x double> %vc } -declare <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double>, <8 x double>, metadata, metadata) define <8 x double> @vfsub_vv_v8f64(<8 x double> %va, <8 x double> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll index 0f3a6de4f4a90..6299d5d86acdd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.fsub.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfsub_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_v2f16: ; ZVFH: # %bb.0: @@ -104,8 +102,6 @@ define <2 x half> @vfsub_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext ret <2 x half> %v } -declare <3 x half> @llvm.vp.fsub.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32) - define <3 x half> @vfsub_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_v3f16: ; ZVFH: # %bb.0: @@ -127,8 +123,6 @@ define <3 x half> @vfsub_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i3 ret <3 x half> %v } -declare <4 x half> @llvm.vp.fsub.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_v4f16: ; ZVFH: # %bb.0: @@ -223,8 +217,6 @@ define <4 x half> @vfsub_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext ret <4 x half> %v } -declare <8 x half> @llvm.vp.fsub.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfsub_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_v8f16: ; ZVFH: # %bb.0: @@ -319,8 +311,6 @@ define <8 x half> @vfsub_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext ret <8 x half> %v } -declare <16 x half> @llvm.vp.fsub.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfsub_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_v16f16: ; ZVFH: # %bb.0: @@ -415,8 +405,6 @@ define <16 x half> @vfsub_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroe ret <16 x half> %v } -declare <2 x float> @llvm.vp.fsub.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfsub_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v2f32: ; CHECK: # %bb.0: @@ -461,8 +449,6 @@ define <2 x float> @vfsub_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroe ret <2 x float> %v } -declare <4 x float> @llvm.vp.fsub.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfsub_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v4f32: ; CHECK: # %bb.0: @@ -507,8 +493,6 @@ define <4 x float> @vfsub_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroe ret <4 x float> %v } -declare <8 x float> @llvm.vp.fsub.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfsub_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v8f32: ; CHECK: # %bb.0: @@ -553,8 +537,6 @@ define <8 x float> @vfsub_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroe ret <8 x float> %v } -declare <16 x float> @llvm.vp.fsub.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfsub_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v16f32: ; CHECK: # %bb.0: @@ -599,8 +581,6 @@ define <16 x float> @vfsub_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 ze ret <16 x float> %v } -declare <2 x double> @llvm.vp.fsub.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfsub_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v2f64: ; CHECK: # %bb.0: @@ -645,8 +625,6 @@ define <2 x double> @vfsub_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 ze ret <2 x double> %v } -declare <4 x double> @llvm.vp.fsub.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfsub_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v4f64: ; CHECK: # %bb.0: @@ -691,8 +669,6 @@ define <4 x double> @vfsub_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 ze ret <4 x double> %v } -declare <8 x double> @llvm.vp.fsub.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfsub_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v8f64: ; CHECK: # %bb.0: @@ -737,8 +713,6 @@ define <8 x double> @vfsub_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 ze ret <8 x double> %v } -declare <16 x double> @llvm.vp.fsub.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfsub_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll index a9e9b757f372e..b5f844b7c7e6d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <1 x float> @llvm.fma.v1f32(<1 x float>, <1 x float>, <1 x float>) - define <1 x float> @vfwmacc_vv_v1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_v1f32: ; CHECK: # %bb.0: @@ -149,8 +147,6 @@ define <1 x float> @vfwnmsac_fv_v1f32(<1 x float> %va, <1 x half> %vb, half %c) ret <1 x float> %vg } -declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) - define <2 x float> @vfwmacc_vv_v2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_v2f32: ; CHECK: # %bb.0: @@ -294,9 +290,6 @@ define <2 x float> @vfwnmsac_fv_v2f32(<2 x float> %va, <2 x half> %vb, half %c) ret <2 x float> %vg } - -declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) - define <4 x float> @vfwmacc_vv_v4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_v4f32: ; CHECK: # %bb.0: @@ -440,8 +433,6 @@ define <4 x float> @vfwnmsac_fv_v4f32(<4 x float> %va, <4 x half> %vb, half %c) ret <4 x float> %vg } -declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) - define <8 x float> @vfwmacc_vv_v8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_v8f32: ; CHECK: # %bb.0: @@ -585,8 +576,6 @@ define <8 x float> @vfwnmsac_fv_v8f32(<8 x float> %va, <8 x half> %vb, half %c) ret <8 x float> %vg } -declare <16 x float> @llvm.fma.v16f32(<16 x float>, <16 x float>, <16 x float>) - define <16 x float> @vfwmacc_vv_v16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_v16f32: ; CHECK: # %bb.0: @@ -730,8 +719,6 @@ define <16 x float> @vfwnmsac_fv_v16f32(<16 x float> %va, <16 x half> %vb, half ret <16 x float> %vg } -declare <1 x double> @llvm.fma.v1f64(<1 x double>, <1 x double>, <1 x double>) - define <1 x double> @vfwmacc_vv_v1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) { ; CHECK-LABEL: vfwmacc_vv_v1f64: ; CHECK: # %bb.0: @@ -875,8 +862,6 @@ define <1 x double> @vfwnmsac_fv_v1f64(<1 x double> %va, <1 x float> %vb, float ret <1 x double> %vg } -declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) - define <2 x double> @vfwmacc_vv_v2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) { ; CHECK-LABEL: vfwmacc_vv_v2f64: ; CHECK: # %bb.0: @@ -1020,9 +1005,6 @@ define <2 x double> @vfwnmsac_fv_v2f64(<2 x double> %va, <2 x float> %vb, float ret <2 x double> %vg } - -declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>) - define <4 x double> @vfwmacc_vv_v4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) { ; CHECK-LABEL: vfwmacc_vv_v4f64: ; CHECK: # %bb.0: @@ -1166,8 +1148,6 @@ define <4 x double> @vfwnmsac_fv_v4f64(<4 x double> %va, <4 x float> %vb, float ret <4 x double> %vg } -declare <8 x double> @llvm.fma.v8f64(<8 x double>, <8 x double>, <8 x double>) - define <8 x double> @vfwmacc_vv_v8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) { ; CHECK-LABEL: vfwmacc_vv_v8f64: ; CHECK: # %bb.0: @@ -1669,7 +1649,6 @@ define <2 x double> @vfwnmsac_fv_v2f64_v2f16(<2 x double> %va, <2 x half> %vb, h ret <2 x double> %vg } - define <4 x double> @vfwmacc_vv_v4f64_v4f16(<4 x double> %va, <4 x half> %vb, <4 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_v4f64_v4f16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll index ce5483e3ae8cc..8b436d080f065 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i1(<1 x i1>, metadata, metadata) define <1 x half> @vsitofp_v1i1_v1f16(<1 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i1_v1f16: ; CHECK: # %bb.0: @@ -17,7 +16,6 @@ define <1 x half> @vsitofp_v1i1_v1f16(<1 x i1> %va) strictfp { ret <1 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i1(<1 x i1>, metadata, metadata) define <1 x half> @vuitofp_v1i1_v1f16(<1 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i1_v1f16: ; CHECK: # %bb.0: @@ -30,7 +28,6 @@ define <1 x half> @vuitofp_v1i1_v1f16(<1 x i1> %va) strictfp { ret <1 x half> %evec } -declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i1(<1 x i1>, metadata, metadata) define <1 x float> @vsitofp_v1i1_v1f32(<1 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i1_v1f32: ; CHECK: # %bb.0: @@ -43,7 +40,6 @@ define <1 x float> @vsitofp_v1i1_v1f32(<1 x i1> %va) strictfp { ret <1 x float> %evec } -declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i1(<1 x i1>, metadata, metadata) define <1 x float> @vuitofp_v1i1_v1f32(<1 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i1_v1f32: ; CHECK: # %bb.0: @@ -56,7 +52,6 @@ define <1 x float> @vuitofp_v1i1_v1f32(<1 x i1> %va) strictfp { ret <1 x float> %evec } -declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i1(<1 x i1>, metadata, metadata) define <1 x double> @vsitofp_v1i1_v1f64(<1 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i1_v1f64: ; CHECK: # %bb.0: @@ -69,7 +64,6 @@ define <1 x double> @vsitofp_v1i1_v1f64(<1 x i1> %va) strictfp { ret <1 x double> %evec } -declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i1(<1 x i1>, metadata, metadata) define <1 x double> @vuitofp_v1i1_v1f64(<1 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i1_v1f64: ; CHECK: # %bb.0: @@ -82,7 +76,6 @@ define <1 x double> @vuitofp_v1i1_v1f64(<1 x i1> %va) strictfp { ret <1 x double> %evec } -declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i1(<2 x i1>, metadata, metadata) define <2 x half> @vsitofp_v2i1_v2f16(<2 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i1_v2f16: ; CHECK: # %bb.0: @@ -95,7 +88,6 @@ define <2 x half> @vsitofp_v2i1_v2f16(<2 x i1> %va) strictfp { ret <2 x half> %evec } -declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i1(<2 x i1>, metadata, metadata) define <2 x half> @vuitofp_v2i1_v2f16(<2 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i1_v2f16: ; CHECK: # %bb.0: @@ -108,7 +100,6 @@ define <2 x half> @vuitofp_v2i1_v2f16(<2 x i1> %va) strictfp { ret <2 x half> %evec } -declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i1(<2 x i1>, metadata, metadata) define <2 x float> @vsitofp_v2i1_v2f32(<2 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i1_v2f32: ; CHECK: # %bb.0: @@ -121,7 +112,6 @@ define <2 x float> @vsitofp_v2i1_v2f32(<2 x i1> %va) strictfp { ret <2 x float> %evec } -declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i1(<2 x i1>, metadata, metadata) define <2 x float> @vuitofp_v2i1_v2f32(<2 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i1_v2f32: ; CHECK: # %bb.0: @@ -134,7 +124,6 @@ define <2 x float> @vuitofp_v2i1_v2f32(<2 x i1> %va) strictfp { ret <2 x float> %evec } -declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i1(<2 x i1>, metadata, metadata) define <2 x double> @vsitofp_v2i1_v2f64(<2 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i1_v2f64: ; CHECK: # %bb.0: @@ -147,7 +136,6 @@ define <2 x double> @vsitofp_v2i1_v2f64(<2 x i1> %va) strictfp { ret <2 x double> %evec } -declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i1(<2 x i1>, metadata, metadata) define <2 x double> @vuitofp_v2i1_v2f64(<2 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i1_v2f64: ; CHECK: # %bb.0: @@ -160,7 +148,6 @@ define <2 x double> @vuitofp_v2i1_v2f64(<2 x i1> %va) strictfp { ret <2 x double> %evec } -declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i1(<4 x i1>, metadata, metadata) define <4 x half> @vsitofp_v4i1_v4f16(<4 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i1_v4f16: ; CHECK: # %bb.0: @@ -173,7 +160,6 @@ define <4 x half> @vsitofp_v4i1_v4f16(<4 x i1> %va) strictfp { ret <4 x half> %evec } -declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i1(<4 x i1>, metadata, metadata) define <4 x half> @vuitofp_v4i1_v4f16(<4 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i1_v4f16: ; CHECK: # %bb.0: @@ -186,7 +172,6 @@ define <4 x half> @vuitofp_v4i1_v4f16(<4 x i1> %va) strictfp { ret <4 x half> %evec } -declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i1(<4 x i1>, metadata, metadata) define <4 x float> @vsitofp_v4i1_v4f32(<4 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i1_v4f32: ; CHECK: # %bb.0: @@ -199,7 +184,6 @@ define <4 x float> @vsitofp_v4i1_v4f32(<4 x i1> %va) strictfp { ret <4 x float> %evec } -declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i1(<4 x i1>, metadata, metadata) define <4 x float> @vuitofp_v4i1_v4f32(<4 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i1_v4f32: ; CHECK: # %bb.0: @@ -212,7 +196,6 @@ define <4 x float> @vuitofp_v4i1_v4f32(<4 x i1> %va) strictfp { ret <4 x float> %evec } -declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i1(<4 x i1>, metadata, metadata) define <4 x double> @vsitofp_v4i1_v4f64(<4 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i1_v4f64: ; CHECK: # %bb.0: @@ -225,7 +208,6 @@ define <4 x double> @vsitofp_v4i1_v4f64(<4 x i1> %va) strictfp { ret <4 x double> %evec } -declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i1(<4 x i1>, metadata, metadata) define <4 x double> @vuitofp_v4i1_v4f64(<4 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i1_v4f64: ; CHECK: # %bb.0: @@ -238,7 +220,6 @@ define <4 x double> @vuitofp_v4i1_v4f64(<4 x i1> %va) strictfp { ret <4 x double> %evec } -declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i1(<8 x i1>, metadata, metadata) define <8 x half> @vsitofp_v8i1_v8f16(<8 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i1_v8f16: ; CHECK: # %bb.0: @@ -251,7 +232,6 @@ define <8 x half> @vsitofp_v8i1_v8f16(<8 x i1> %va) strictfp { ret <8 x half> %evec } -declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i1(<8 x i1>, metadata, metadata) define <8 x half> @vuitofp_v8i1_v8f16(<8 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i1_v8f16: ; CHECK: # %bb.0: @@ -264,7 +244,6 @@ define <8 x half> @vuitofp_v8i1_v8f16(<8 x i1> %va) strictfp { ret <8 x half> %evec } -declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i1(<8 x i1>, metadata, metadata) define <8 x float> @vsitofp_v8i1_v8f32(<8 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i1_v8f32: ; CHECK: # %bb.0: @@ -277,7 +256,6 @@ define <8 x float> @vsitofp_v8i1_v8f32(<8 x i1> %va) strictfp { ret <8 x float> %evec } -declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i1(<8 x i1>, metadata, metadata) define <8 x float> @vuitofp_v8i1_v8f32(<8 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i1_v8f32: ; CHECK: # %bb.0: @@ -290,7 +268,6 @@ define <8 x float> @vuitofp_v8i1_v8f32(<8 x i1> %va) strictfp { ret <8 x float> %evec } -declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i1(<8 x i1>, metadata, metadata) define <8 x double> @vsitofp_v8i1_v8f64(<8 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i1_v8f64: ; CHECK: # %bb.0: @@ -303,7 +280,6 @@ define <8 x double> @vsitofp_v8i1_v8f64(<8 x i1> %va) strictfp { ret <8 x double> %evec } -declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i1(<8 x i1>, metadata, metadata) define <8 x double> @vuitofp_v8i1_v8f64(<8 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i1_v8f64: ; CHECK: # %bb.0: @@ -316,7 +292,6 @@ define <8 x double> @vuitofp_v8i1_v8f64(<8 x i1> %va) strictfp { ret <8 x double> %evec } -declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i1(<16 x i1>, metadata, metadata) define <16 x half> @vsitofp_v16i1_v16f16(<16 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v16i1_v16f16: ; CHECK: # %bb.0: @@ -329,7 +304,6 @@ define <16 x half> @vsitofp_v16i1_v16f16(<16 x i1> %va) strictfp { ret <16 x half> %evec } -declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i1(<16 x i1>, metadata, metadata) define <16 x half> @vuitofp_v16i1_v16f16(<16 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v16i1_v16f16: ; CHECK: # %bb.0: @@ -342,7 +316,6 @@ define <16 x half> @vuitofp_v16i1_v16f16(<16 x i1> %va) strictfp { ret <16 x half> %evec } -declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i1(<16 x i1>, metadata, metadata) define <16 x float> @vsitofp_v16i1_v16f32(<16 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v16i1_v16f32: ; CHECK: # %bb.0: @@ -355,7 +328,6 @@ define <16 x float> @vsitofp_v16i1_v16f32(<16 x i1> %va) strictfp { ret <16 x float> %evec } -declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i1(<16 x i1>, metadata, metadata) define <16 x float> @vuitofp_v16i1_v16f32(<16 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v16i1_v16f32: ; CHECK: # %bb.0: @@ -368,7 +340,6 @@ define <16 x float> @vuitofp_v16i1_v16f32(<16 x i1> %va) strictfp { ret <16 x float> %evec } -declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i1(<32 x i1>, metadata, metadata) define <32 x half> @vsitofp_v32i1_v32f16(<32 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v32i1_v32f16: ; CHECK: # %bb.0: @@ -382,7 +353,6 @@ define <32 x half> @vsitofp_v32i1_v32f16(<32 x i1> %va) strictfp { ret <32 x half> %evec } -declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i1(<32 x i1>, metadata, metadata) define <32 x half> @vuitofp_v32i1_v32f16(<32 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v32i1_v32f16: ; CHECK: # %bb.0: @@ -396,7 +366,6 @@ define <32 x half> @vuitofp_v32i1_v32f16(<32 x i1> %va) strictfp { ret <32 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i8(<1 x i8>, metadata, metadata) define <1 x half> @vsitofp_v1i8_v1f16(<1 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i8_v1f16: ; CHECK: # %bb.0: @@ -408,7 +377,6 @@ define <1 x half> @vsitofp_v1i8_v1f16(<1 x i8> %va) strictfp { ret <1 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i7(<1 x i7>, metadata, metadata) define <1 x half> @vsitofp_v1i7_v1f16(<1 x i7> %va) strictfp { ; RV32-LABEL: vsitofp_v1i7_v1f16: ; RV32: # %bb.0: @@ -431,7 +399,6 @@ define <1 x half> @vsitofp_v1i7_v1f16(<1 x i7> %va) strictfp { ret <1 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i7(<1 x i7>, metadata, metadata) define <1 x half> @vuitofp_v1i7_v1f16(<1 x i7> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i7_v1f16: ; CHECK: # %bb.0: @@ -444,7 +411,6 @@ define <1 x half> @vuitofp_v1i7_v1f16(<1 x i7> %va) strictfp { ret <1 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i8(<1 x i8>, metadata, metadata) define <1 x half> @vuitofp_v1i8_v1f16(<1 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i8_v1f16: ; CHECK: # %bb.0: @@ -456,7 +422,6 @@ define <1 x half> @vuitofp_v1i8_v1f16(<1 x i8> %va) strictfp { ret <1 x half> %evec } -declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i8(<1 x i8>, metadata, metadata) define <1 x float> @vsitofp_v1i8_v1f32(<1 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i8_v1f32: ; CHECK: # %bb.0: @@ -468,7 +433,6 @@ define <1 x float> @vsitofp_v1i8_v1f32(<1 x i8> %va) strictfp { ret <1 x float> %evec } -declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i8(<1 x i8>, metadata, metadata) define <1 x float> @vuitofp_v1i8_v1f32(<1 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i8_v1f32: ; CHECK: # %bb.0: @@ -480,7 +444,6 @@ define <1 x float> @vuitofp_v1i8_v1f32(<1 x i8> %va) strictfp { ret <1 x float> %evec } -declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i8(<1 x i8>, metadata, metadata) define <1 x double> @vsitofp_v1i8_v1f64(<1 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i8_v1f64: ; CHECK: # %bb.0: @@ -492,7 +455,6 @@ define <1 x double> @vsitofp_v1i8_v1f64(<1 x i8> %va) strictfp { ret <1 x double> %evec } -declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i8(<1 x i8>, metadata, metadata) define <1 x double> @vuitofp_v1i8_v1f64(<1 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i8_v1f64: ; CHECK: # %bb.0: @@ -504,7 +466,6 @@ define <1 x double> @vuitofp_v1i8_v1f64(<1 x i8> %va) strictfp { ret <1 x double> %evec } -declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i8(<2 x i8>, metadata, metadata) define <2 x half> @vsitofp_v2i8_v2f16(<2 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i8_v2f16: ; CHECK: # %bb.0: @@ -516,7 +477,6 @@ define <2 x half> @vsitofp_v2i8_v2f16(<2 x i8> %va) strictfp { ret <2 x half> %evec } -declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i8(<2 x i8>, metadata, metadata) define <2 x half> @vuitofp_v2i8_v2f16(<2 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i8_v2f16: ; CHECK: # %bb.0: @@ -528,7 +488,6 @@ define <2 x half> @vuitofp_v2i8_v2f16(<2 x i8> %va) strictfp { ret <2 x half> %evec } -declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i8(<2 x i8>, metadata, metadata) define <2 x float> @vsitofp_v2i8_v2f32(<2 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i8_v2f32: ; CHECK: # %bb.0: @@ -540,7 +499,6 @@ define <2 x float> @vsitofp_v2i8_v2f32(<2 x i8> %va) strictfp { ret <2 x float> %evec } -declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i8(<2 x i8>, metadata, metadata) define <2 x float> @vuitofp_v2i8_v2f32(<2 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i8_v2f32: ; CHECK: # %bb.0: @@ -552,7 +510,6 @@ define <2 x float> @vuitofp_v2i8_v2f32(<2 x i8> %va) strictfp { ret <2 x float> %evec } -declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i8(<2 x i8>, metadata, metadata) define <2 x double> @vsitofp_v2i8_v2f64(<2 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i8_v2f64: ; CHECK: # %bb.0: @@ -564,7 +521,6 @@ define <2 x double> @vsitofp_v2i8_v2f64(<2 x i8> %va) strictfp { ret <2 x double> %evec } -declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i8(<2 x i8>, metadata, metadata) define <2 x double> @vuitofp_v2i8_v2f64(<2 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i8_v2f64: ; CHECK: # %bb.0: @@ -576,7 +532,6 @@ define <2 x double> @vuitofp_v2i8_v2f64(<2 x i8> %va) strictfp { ret <2 x double> %evec } -declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i8(<4 x i8>, metadata, metadata) define <4 x half> @vsitofp_v4i8_v4f16(<4 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i8_v4f16: ; CHECK: # %bb.0: @@ -588,7 +543,6 @@ define <4 x half> @vsitofp_v4i8_v4f16(<4 x i8> %va) strictfp { ret <4 x half> %evec } -declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i8(<4 x i8>, metadata, metadata) define <4 x half> @vuitofp_v4i8_v4f16(<4 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i8_v4f16: ; CHECK: # %bb.0: @@ -600,7 +554,6 @@ define <4 x half> @vuitofp_v4i8_v4f16(<4 x i8> %va) strictfp { ret <4 x half> %evec } -declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i8(<4 x i8>, metadata, metadata) define <4 x float> @vsitofp_v4i8_v4f32(<4 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i8_v4f32: ; CHECK: # %bb.0: @@ -612,7 +565,6 @@ define <4 x float> @vsitofp_v4i8_v4f32(<4 x i8> %va) strictfp { ret <4 x float> %evec } -declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i8(<4 x i8>, metadata, metadata) define <4 x float> @vuitofp_v4i8_v4f32(<4 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i8_v4f32: ; CHECK: # %bb.0: @@ -624,7 +576,6 @@ define <4 x float> @vuitofp_v4i8_v4f32(<4 x i8> %va) strictfp { ret <4 x float> %evec } -declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i8(<4 x i8>, metadata, metadata) define <4 x double> @vsitofp_v4i8_v4f64(<4 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i8_v4f64: ; CHECK: # %bb.0: @@ -636,7 +587,6 @@ define <4 x double> @vsitofp_v4i8_v4f64(<4 x i8> %va) strictfp { ret <4 x double> %evec } -declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i8(<4 x i8>, metadata, metadata) define <4 x double> @vuitofp_v4i8_v4f64(<4 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i8_v4f64: ; CHECK: # %bb.0: @@ -648,7 +598,6 @@ define <4 x double> @vuitofp_v4i8_v4f64(<4 x i8> %va) strictfp { ret <4 x double> %evec } -declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i8(<8 x i8>, metadata, metadata) define <8 x half> @vsitofp_v8i8_v8f16(<8 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i8_v8f16: ; CHECK: # %bb.0: @@ -660,7 +609,6 @@ define <8 x half> @vsitofp_v8i8_v8f16(<8 x i8> %va) strictfp { ret <8 x half> %evec } -declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i8(<8 x i8>, metadata, metadata) define <8 x half> @vuitofp_v8i8_v8f16(<8 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i8_v8f16: ; CHECK: # %bb.0: @@ -672,7 +620,6 @@ define <8 x half> @vuitofp_v8i8_v8f16(<8 x i8> %va) strictfp { ret <8 x half> %evec } -declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i8(<8 x i8>, metadata, metadata) define <8 x float> @vsitofp_v8i8_v8f32(<8 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i8_v8f32: ; CHECK: # %bb.0: @@ -684,7 +631,6 @@ define <8 x float> @vsitofp_v8i8_v8f32(<8 x i8> %va) strictfp { ret <8 x float> %evec } -declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i8(<8 x i8>, metadata, metadata) define <8 x float> @vuitofp_v8i8_v8f32(<8 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i8_v8f32: ; CHECK: # %bb.0: @@ -696,7 +642,6 @@ define <8 x float> @vuitofp_v8i8_v8f32(<8 x i8> %va) strictfp { ret <8 x float> %evec } -declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i8(<8 x i8>, metadata, metadata) define <8 x double> @vsitofp_v8i8_v8f64(<8 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i8_v8f64: ; CHECK: # %bb.0: @@ -708,7 +653,6 @@ define <8 x double> @vsitofp_v8i8_v8f64(<8 x i8> %va) strictfp { ret <8 x double> %evec } -declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i8(<8 x i8>, metadata, metadata) define <8 x double> @vuitofp_v8i8_v8f64(<8 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i8_v8f64: ; CHECK: # %bb.0: @@ -720,7 +664,6 @@ define <8 x double> @vuitofp_v8i8_v8f64(<8 x i8> %va) strictfp { ret <8 x double> %evec } -declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i8(<16 x i8>, metadata, metadata) define <16 x half> @vsitofp_v16i8_v16f16(<16 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v16i8_v16f16: ; CHECK: # %bb.0: @@ -732,7 +675,6 @@ define <16 x half> @vsitofp_v16i8_v16f16(<16 x i8> %va) strictfp { ret <16 x half> %evec } -declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i8(<16 x i8>, metadata, metadata) define <16 x half> @vuitofp_v16i8_v16f16(<16 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v16i8_v16f16: ; CHECK: # %bb.0: @@ -744,7 +686,6 @@ define <16 x half> @vuitofp_v16i8_v16f16(<16 x i8> %va) strictfp { ret <16 x half> %evec } -declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i8(<16 x i8>, metadata, metadata) define <16 x float> @vsitofp_v16i8_v16f32(<16 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v16i8_v16f32: ; CHECK: # %bb.0: @@ -756,7 +697,6 @@ define <16 x float> @vsitofp_v16i8_v16f32(<16 x i8> %va) strictfp { ret <16 x float> %evec } -declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i8(<16 x i8>, metadata, metadata) define <16 x float> @vuitofp_v16i8_v16f32(<16 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v16i8_v16f32: ; CHECK: # %bb.0: @@ -768,7 +708,6 @@ define <16 x float> @vuitofp_v16i8_v16f32(<16 x i8> %va) strictfp { ret <16 x float> %evec } -declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i8(<32 x i8>, metadata, metadata) define <32 x half> @vsitofp_v32i8_v32f16(<32 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v32i8_v32f16: ; CHECK: # %bb.0: @@ -782,7 +721,6 @@ define <32 x half> @vsitofp_v32i8_v32f16(<32 x i8> %va) strictfp { ret <32 x half> %evec } -declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i8(<32 x i8>, metadata, metadata) define <32 x half> @vuitofp_v32i8_v32f16(<32 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v32i8_v32f16: ; CHECK: # %bb.0: @@ -796,7 +734,6 @@ define <32 x half> @vuitofp_v32i8_v32f16(<32 x i8> %va) strictfp { ret <32 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i16(<1 x i16>, metadata, metadata) define <1 x half> @vsitofp_v1i16_v1f16(<1 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i16_v1f16: ; CHECK: # %bb.0: @@ -807,7 +744,6 @@ define <1 x half> @vsitofp_v1i16_v1f16(<1 x i16> %va) strictfp { ret <1 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i16(<1 x i16>, metadata, metadata) define <1 x half> @vuitofp_v1i16_v1f16(<1 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i16_v1f16: ; CHECK: # %bb.0: @@ -818,7 +754,6 @@ define <1 x half> @vuitofp_v1i16_v1f16(<1 x i16> %va) strictfp { ret <1 x half> %evec } -declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i16(<1 x i16>, metadata, metadata) define <1 x float> @vsitofp_v1i16_v1f32(<1 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i16_v1f32: ; CHECK: # %bb.0: @@ -830,7 +765,6 @@ define <1 x float> @vsitofp_v1i16_v1f32(<1 x i16> %va) strictfp { ret <1 x float> %evec } -declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i16(<1 x i16>, metadata, metadata) define <1 x float> @vuitofp_v1i16_v1f32(<1 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i16_v1f32: ; CHECK: # %bb.0: @@ -842,7 +776,6 @@ define <1 x float> @vuitofp_v1i16_v1f32(<1 x i16> %va) strictfp { ret <1 x float> %evec } -declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i16(<1 x i16>, metadata, metadata) define <1 x double> @vsitofp_v1i16_v1f64(<1 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i16_v1f64: ; CHECK: # %bb.0: @@ -854,7 +787,6 @@ define <1 x double> @vsitofp_v1i16_v1f64(<1 x i16> %va) strictfp { ret <1 x double> %evec } -declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i16(<1 x i16>, metadata, metadata) define <1 x double> @vuitofp_v1i16_v1f64(<1 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i16_v1f64: ; CHECK: # %bb.0: @@ -866,7 +798,6 @@ define <1 x double> @vuitofp_v1i16_v1f64(<1 x i16> %va) strictfp { ret <1 x double> %evec } -declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i16(<2 x i16>, metadata, metadata) define <2 x half> @vsitofp_v2i16_v2f16(<2 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i16_v2f16: ; CHECK: # %bb.0: @@ -877,7 +808,6 @@ define <2 x half> @vsitofp_v2i16_v2f16(<2 x i16> %va) strictfp { ret <2 x half> %evec } -declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i16(<2 x i16>, metadata, metadata) define <2 x half> @vuitofp_v2i16_v2f16(<2 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i16_v2f16: ; CHECK: # %bb.0: @@ -888,7 +818,6 @@ define <2 x half> @vuitofp_v2i16_v2f16(<2 x i16> %va) strictfp { ret <2 x half> %evec } -declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i16(<2 x i16>, metadata, metadata) define <2 x float> @vsitofp_v2i16_v2f32(<2 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i16_v2f32: ; CHECK: # %bb.0: @@ -900,7 +829,6 @@ define <2 x float> @vsitofp_v2i16_v2f32(<2 x i16> %va) strictfp { ret <2 x float> %evec } -declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i16(<2 x i16>, metadata, metadata) define <2 x float> @vuitofp_v2i16_v2f32(<2 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i16_v2f32: ; CHECK: # %bb.0: @@ -912,7 +840,6 @@ define <2 x float> @vuitofp_v2i16_v2f32(<2 x i16> %va) strictfp { ret <2 x float> %evec } -declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata) define <2 x double> @vsitofp_v2i16_v2f64(<2 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i16_v2f64: ; CHECK: # %bb.0: @@ -924,7 +851,6 @@ define <2 x double> @vsitofp_v2i16_v2f64(<2 x i16> %va) strictfp { ret <2 x double> %evec } -declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16>, metadata, metadata) define <2 x double> @vuitofp_v2i16_v2f64(<2 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i16_v2f64: ; CHECK: # %bb.0: @@ -936,7 +862,6 @@ define <2 x double> @vuitofp_v2i16_v2f64(<2 x i16> %va) strictfp { ret <2 x double> %evec } -declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i16(<4 x i16>, metadata, metadata) define <4 x half> @vsitofp_v4i16_v4f16(<4 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i16_v4f16: ; CHECK: # %bb.0: @@ -947,7 +872,6 @@ define <4 x half> @vsitofp_v4i16_v4f16(<4 x i16> %va) strictfp { ret <4 x half> %evec } -declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i16(<4 x i16>, metadata, metadata) define <4 x half> @vuitofp_v4i16_v4f16(<4 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i16_v4f16: ; CHECK: # %bb.0: @@ -958,7 +882,6 @@ define <4 x half> @vuitofp_v4i16_v4f16(<4 x i16> %va) strictfp { ret <4 x half> %evec } -declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16>, metadata, metadata) define <4 x float> @vsitofp_v4i16_v4f32(<4 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i16_v4f32: ; CHECK: # %bb.0: @@ -970,7 +893,6 @@ define <4 x float> @vsitofp_v4i16_v4f32(<4 x i16> %va) strictfp { ret <4 x float> %evec } -declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i16(<4 x i16>, metadata, metadata) define <4 x float> @vuitofp_v4i16_v4f32(<4 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i16_v4f32: ; CHECK: # %bb.0: @@ -982,7 +904,6 @@ define <4 x float> @vuitofp_v4i16_v4f32(<4 x i16> %va) strictfp { ret <4 x float> %evec } -declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i16(<4 x i16>, metadata, metadata) define <4 x double> @vsitofp_v4i16_v4f64(<4 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i16_v4f64: ; CHECK: # %bb.0: @@ -994,7 +915,6 @@ define <4 x double> @vsitofp_v4i16_v4f64(<4 x i16> %va) strictfp { ret <4 x double> %evec } -declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i16(<4 x i16>, metadata, metadata) define <4 x double> @vuitofp_v4i16_v4f64(<4 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i16_v4f64: ; CHECK: # %bb.0: @@ -1006,7 +926,6 @@ define <4 x double> @vuitofp_v4i16_v4f64(<4 x i16> %va) strictfp { ret <4 x double> %evec } -declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i16(<8 x i16>, metadata, metadata) define <8 x half> @vsitofp_v8i16_v8f16(<8 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i16_v8f16: ; CHECK: # %bb.0: @@ -1017,7 +936,6 @@ define <8 x half> @vsitofp_v8i16_v8f16(<8 x i16> %va) strictfp { ret <8 x half> %evec } -declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i16(<8 x i16>, metadata, metadata) define <8 x half> @vuitofp_v8i16_v8f16(<8 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i16_v8f16: ; CHECK: # %bb.0: @@ -1028,7 +946,6 @@ define <8 x half> @vuitofp_v8i16_v8f16(<8 x i16> %va) strictfp { ret <8 x half> %evec } -declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i16(<8 x i16>, metadata, metadata) define <8 x float> @vsitofp_v8i16_v8f32(<8 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i16_v8f32: ; CHECK: # %bb.0: @@ -1040,7 +957,6 @@ define <8 x float> @vsitofp_v8i16_v8f32(<8 x i16> %va) strictfp { ret <8 x float> %evec } -declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i16(<8 x i16>, metadata, metadata) define <8 x float> @vuitofp_v8i16_v8f32(<8 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i16_v8f32: ; CHECK: # %bb.0: @@ -1052,7 +968,6 @@ define <8 x float> @vuitofp_v8i16_v8f32(<8 x i16> %va) strictfp { ret <8 x float> %evec } -declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i16(<8 x i16>, metadata, metadata) define <8 x double> @vsitofp_v8i16_v8f64(<8 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i16_v8f64: ; CHECK: # %bb.0: @@ -1064,7 +979,6 @@ define <8 x double> @vsitofp_v8i16_v8f64(<8 x i16> %va) strictfp { ret <8 x double> %evec } -declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i16(<8 x i16>, metadata, metadata) define <8 x double> @vuitofp_v8i16_v8f64(<8 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i16_v8f64: ; CHECK: # %bb.0: @@ -1076,7 +990,6 @@ define <8 x double> @vuitofp_v8i16_v8f64(<8 x i16> %va) strictfp { ret <8 x double> %evec } -declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i16(<16 x i16>, metadata, metadata) define <16 x half> @vsitofp_v16i16_v16f16(<16 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v16i16_v16f16: ; CHECK: # %bb.0: @@ -1087,7 +1000,6 @@ define <16 x half> @vsitofp_v16i16_v16f16(<16 x i16> %va) strictfp { ret <16 x half> %evec } -declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i16(<16 x i16>, metadata, metadata) define <16 x half> @vuitofp_v16i16_v16f16(<16 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v16i16_v16f16: ; CHECK: # %bb.0: @@ -1098,7 +1010,6 @@ define <16 x half> @vuitofp_v16i16_v16f16(<16 x i16> %va) strictfp { ret <16 x half> %evec } -declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i16(<16 x i16>, metadata, metadata) define <16 x float> @vsitofp_v16i16_v16f32(<16 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v16i16_v16f32: ; CHECK: # %bb.0: @@ -1110,7 +1021,6 @@ define <16 x float> @vsitofp_v16i16_v16f32(<16 x i16> %va) strictfp { ret <16 x float> %evec } -declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i16(<16 x i16>, metadata, metadata) define <16 x float> @vuitofp_v16i16_v16f32(<16 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v16i16_v16f32: ; CHECK: # %bb.0: @@ -1122,7 +1032,6 @@ define <16 x float> @vuitofp_v16i16_v16f32(<16 x i16> %va) strictfp { ret <16 x float> %evec } -declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i16(<32 x i16>, metadata, metadata) define <32 x half> @vsitofp_v32i16_v32f16(<32 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v32i16_v32f16: ; CHECK: # %bb.0: @@ -1134,7 +1043,6 @@ define <32 x half> @vsitofp_v32i16_v32f16(<32 x i16> %va) strictfp { ret <32 x half> %evec } -declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i16(<32 x i16>, metadata, metadata) define <32 x half> @vuitofp_v32i16_v32f16(<32 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v32i16_v32f16: ; CHECK: # %bb.0: @@ -1146,7 +1054,6 @@ define <32 x half> @vuitofp_v32i16_v32f16(<32 x i16> %va) strictfp { ret <32 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i32(<1 x i32>, metadata, metadata) define <1 x half> @vsitofp_v1i32_v1f16(<1 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i32_v1f16: ; CHECK: # %bb.0: @@ -1158,7 +1065,6 @@ define <1 x half> @vsitofp_v1i32_v1f16(<1 x i32> %va) strictfp { ret <1 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i32(<1 x i32>, metadata, metadata) define <1 x half> @vuitofp_v1i32_v1f16(<1 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i32_v1f16: ; CHECK: # %bb.0: @@ -1170,7 +1076,6 @@ define <1 x half> @vuitofp_v1i32_v1f16(<1 x i32> %va) strictfp { ret <1 x half> %evec } -declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32>, metadata, metadata) define <1 x float> @vsitofp_v1i32_v1f32(<1 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i32_v1f32: ; CHECK: # %bb.0: @@ -1181,7 +1086,6 @@ define <1 x float> @vsitofp_v1i32_v1f32(<1 x i32> %va) strictfp { ret <1 x float> %evec } -declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i32(<1 x i32>, metadata, metadata) define <1 x float> @vuitofp_v1i32_v1f32(<1 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i32_v1f32: ; CHECK: # %bb.0: @@ -1192,7 +1096,6 @@ define <1 x float> @vuitofp_v1i32_v1f32(<1 x i32> %va) strictfp { ret <1 x float> %evec } -declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata) define <1 x double> @vsitofp_v1i32_v1f64(<1 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i32_v1f64: ; CHECK: # %bb.0: @@ -1204,7 +1107,6 @@ define <1 x double> @vsitofp_v1i32_v1f64(<1 x i32> %va) strictfp { ret <1 x double> %evec } -declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32>, metadata, metadata) define <1 x double> @vuitofp_v1i32_v1f64(<1 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i32_v1f64: ; CHECK: # %bb.0: @@ -1216,7 +1118,6 @@ define <1 x double> @vuitofp_v1i32_v1f64(<1 x i32> %va) strictfp { ret <1 x double> %evec } -declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i32(<2 x i32>, metadata, metadata) define <2 x half> @vsitofp_v2i32_v2f16(<2 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i32_v2f16: ; CHECK: # %bb.0: @@ -1228,7 +1129,6 @@ define <2 x half> @vsitofp_v2i32_v2f16(<2 x i32> %va) strictfp { ret <2 x half> %evec } -declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i32(<2 x i32>, metadata, metadata) define <2 x half> @vuitofp_v2i32_v2f16(<2 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i32_v2f16: ; CHECK: # %bb.0: @@ -1240,7 +1140,6 @@ define <2 x half> @vuitofp_v2i32_v2f16(<2 x i32> %va) strictfp { ret <2 x half> %evec } -declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32>, metadata, metadata) define <2 x float> @vsitofp_v2i32_v2f32(<2 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i32_v2f32: ; CHECK: # %bb.0: @@ -1251,7 +1150,6 @@ define <2 x float> @vsitofp_v2i32_v2f32(<2 x i32> %va) strictfp { ret <2 x float> %evec } -declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i32(<2 x i32>, metadata, metadata) define <2 x float> @vuitofp_v2i32_v2f32(<2 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i32_v2f32: ; CHECK: # %bb.0: @@ -1262,7 +1160,6 @@ define <2 x float> @vuitofp_v2i32_v2f32(<2 x i32> %va) strictfp { ret <2 x float> %evec } -declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata) define <2 x double> @vsitofp_v2i32_v2f64(<2 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i32_v2f64: ; CHECK: # %bb.0: @@ -1274,7 +1171,6 @@ define <2 x double> @vsitofp_v2i32_v2f64(<2 x i32> %va) strictfp { ret <2 x double> %evec } -declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32>, metadata, metadata) define <2 x double> @vuitofp_v2i32_v2f64(<2 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i32_v2f64: ; CHECK: # %bb.0: @@ -1286,7 +1182,6 @@ define <2 x double> @vuitofp_v2i32_v2f64(<2 x i32> %va) strictfp { ret <2 x double> %evec } -declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i32(<4 x i32>, metadata, metadata) define <4 x half> @vsitofp_v4i32_v4f16(<4 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i32_v4f16: ; CHECK: # %bb.0: @@ -1298,7 +1193,6 @@ define <4 x half> @vsitofp_v4i32_v4f16(<4 x i32> %va) strictfp { ret <4 x half> %evec } -declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i32(<4 x i32>, metadata, metadata) define <4 x half> @vuitofp_v4i32_v4f16(<4 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i32_v4f16: ; CHECK: # %bb.0: @@ -1310,7 +1204,6 @@ define <4 x half> @vuitofp_v4i32_v4f16(<4 x i32> %va) strictfp { ret <4 x half> %evec } -declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata) define <4 x float> @vsitofp_v4i32_v4f32(<4 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i32_v4f32: ; CHECK: # %bb.0: @@ -1321,7 +1214,6 @@ define <4 x float> @vsitofp_v4i32_v4f32(<4 x i32> %va) strictfp { ret <4 x float> %evec } -declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata) define <4 x float> @vuitofp_v4i32_v4f32(<4 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i32_v4f32: ; CHECK: # %bb.0: @@ -1332,7 +1224,6 @@ define <4 x float> @vuitofp_v4i32_v4f32(<4 x i32> %va) strictfp { ret <4 x float> %evec } -declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32>, metadata, metadata) define <4 x double> @vsitofp_v4i32_v4f64(<4 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i32_v4f64: ; CHECK: # %bb.0: @@ -1344,7 +1235,6 @@ define <4 x double> @vsitofp_v4i32_v4f64(<4 x i32> %va) strictfp { ret <4 x double> %evec } -declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i32(<4 x i32>, metadata, metadata) define <4 x double> @vuitofp_v4i32_v4f64(<4 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i32_v4f64: ; CHECK: # %bb.0: @@ -1356,7 +1246,6 @@ define <4 x double> @vuitofp_v4i32_v4f64(<4 x i32> %va) strictfp { ret <4 x double> %evec } -declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i32(<8 x i32>, metadata, metadata) define <8 x half> @vsitofp_v8i32_v8f16(<8 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i32_v8f16: ; CHECK: # %bb.0: @@ -1368,7 +1257,6 @@ define <8 x half> @vsitofp_v8i32_v8f16(<8 x i32> %va) strictfp { ret <8 x half> %evec } -declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i32(<8 x i32>, metadata, metadata) define <8 x half> @vuitofp_v8i32_v8f16(<8 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i32_v8f16: ; CHECK: # %bb.0: @@ -1380,7 +1268,6 @@ define <8 x half> @vuitofp_v8i32_v8f16(<8 x i32> %va) strictfp { ret <8 x half> %evec } -declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i32(<8 x i32>, metadata, metadata) define <8 x float> @vsitofp_v8i32_v8f32(<8 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i32_v8f32: ; CHECK: # %bb.0: @@ -1391,7 +1278,6 @@ define <8 x float> @vsitofp_v8i32_v8f32(<8 x i32> %va) strictfp { ret <8 x float> %evec } -declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i32(<8 x i32>, metadata, metadata) define <8 x float> @vuitofp_v8i32_v8f32(<8 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i32_v8f32: ; CHECK: # %bb.0: @@ -1402,7 +1288,6 @@ define <8 x float> @vuitofp_v8i32_v8f32(<8 x i32> %va) strictfp { ret <8 x float> %evec } -declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i32(<8 x i32>, metadata, metadata) define <8 x double> @vsitofp_v8i32_v8f64(<8 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i32_v8f64: ; CHECK: # %bb.0: @@ -1414,7 +1299,6 @@ define <8 x double> @vsitofp_v8i32_v8f64(<8 x i32> %va) strictfp { ret <8 x double> %evec } -declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i32(<8 x i32>, metadata, metadata) define <8 x double> @vuitofp_v8i32_v8f64(<8 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i32_v8f64: ; CHECK: # %bb.0: @@ -1426,7 +1310,6 @@ define <8 x double> @vuitofp_v8i32_v8f64(<8 x i32> %va) strictfp { ret <8 x double> %evec } -declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i32(<16 x i32>, metadata, metadata) define <16 x half> @vsitofp_v16i32_v16f16(<16 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v16i32_v16f16: ; CHECK: # %bb.0: @@ -1438,7 +1321,6 @@ define <16 x half> @vsitofp_v16i32_v16f16(<16 x i32> %va) strictfp { ret <16 x half> %evec } -declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i32(<16 x i32>, metadata, metadata) define <16 x half> @vuitofp_v16i32_v16f16(<16 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v16i32_v16f16: ; CHECK: # %bb.0: @@ -1450,7 +1332,6 @@ define <16 x half> @vuitofp_v16i32_v16f16(<16 x i32> %va) strictfp { ret <16 x half> %evec } -declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i32(<16 x i32>, metadata, metadata) define <16 x float> @vsitofp_v16i32_v16f32(<16 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v16i32_v16f32: ; CHECK: # %bb.0: @@ -1461,7 +1342,6 @@ define <16 x float> @vsitofp_v16i32_v16f32(<16 x i32> %va) strictfp { ret <16 x float> %evec } -declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i32(<16 x i32>, metadata, metadata) define <16 x float> @vuitofp_v16i32_v16f32(<16 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v16i32_v16f32: ; CHECK: # %bb.0: @@ -1472,7 +1352,6 @@ define <16 x float> @vuitofp_v16i32_v16f32(<16 x i32> %va) strictfp { ret <16 x float> %evec } -declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i64(<1 x i64>, metadata, metadata) define <1 x half> @vsitofp_v1i64_v1f16(<1 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i64_v1f16: ; CHECK: # %bb.0: @@ -1485,7 +1364,6 @@ define <1 x half> @vsitofp_v1i64_v1f16(<1 x i64> %va) strictfp { ret <1 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i64(<1 x i64>, metadata, metadata) define <1 x half> @vuitofp_v1i64_v1f16(<1 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i64_v1f16: ; CHECK: # %bb.0: @@ -1498,7 +1376,6 @@ define <1 x half> @vuitofp_v1i64_v1f16(<1 x i64> %va) strictfp { ret <1 x half> %evec } -declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i64(<1 x i64>, metadata, metadata) define <1 x float> @vsitofp_v1i64_v1f32(<1 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i64_v1f32: ; CHECK: # %bb.0: @@ -1510,7 +1387,6 @@ define <1 x float> @vsitofp_v1i64_v1f32(<1 x i64> %va) strictfp { ret <1 x float> %evec } -declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i64(<1 x i64>, metadata, metadata) define <1 x float> @vuitofp_v1i64_v1f32(<1 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i64_v1f32: ; CHECK: # %bb.0: @@ -1522,7 +1398,6 @@ define <1 x float> @vuitofp_v1i64_v1f32(<1 x i64> %va) strictfp { ret <1 x float> %evec } -declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata) define <1 x double> @vsitofp_v1i64_v1f64(<1 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i64_v1f64: ; CHECK: # %bb.0: @@ -1533,7 +1408,6 @@ define <1 x double> @vsitofp_v1i64_v1f64(<1 x i64> %va) strictfp { ret <1 x double> %evec } -declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64>, metadata, metadata) define <1 x double> @vuitofp_v1i64_v1f64(<1 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i64_v1f64: ; CHECK: # %bb.0: @@ -1544,8 +1418,6 @@ define <1 x double> @vuitofp_v1i64_v1f64(<1 x i64> %va) strictfp { ret <1 x double> %evec } - -declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i64(<2 x i64>, metadata, metadata) define <2 x half> @vsitofp_v2i64_v2f16(<2 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i64_v2f16: ; CHECK: # %bb.0: @@ -1558,7 +1430,6 @@ define <2 x half> @vsitofp_v2i64_v2f16(<2 x i64> %va) strictfp { ret <2 x half> %evec } -declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i64(<2 x i64>, metadata, metadata) define <2 x half> @vuitofp_v2i64_v2f16(<2 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i64_v2f16: ; CHECK: # %bb.0: @@ -1571,7 +1442,6 @@ define <2 x half> @vuitofp_v2i64_v2f16(<2 x i64> %va) strictfp { ret <2 x half> %evec } -declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64>, metadata, metadata) define <2 x float> @vsitofp_v2i64_v2f32(<2 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i64_v2f32: ; CHECK: # %bb.0: @@ -1583,7 +1453,6 @@ define <2 x float> @vsitofp_v2i64_v2f32(<2 x i64> %va) strictfp { ret <2 x float> %evec } -declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64>, metadata, metadata) define <2 x float> @vuitofp_v2i64_v2f32(<2 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i64_v2f32: ; CHECK: # %bb.0: @@ -1595,7 +1464,6 @@ define <2 x float> @vuitofp_v2i64_v2f32(<2 x i64> %va) strictfp { ret <2 x float> %evec } -declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata) define <2 x double> @vsitofp_v2i64_v2f64(<2 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i64_v2f64: ; CHECK: # %bb.0: @@ -1606,7 +1474,6 @@ define <2 x double> @vsitofp_v2i64_v2f64(<2 x i64> %va) strictfp { ret <2 x double> %evec } -declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64>, metadata, metadata) define <2 x double> @vuitofp_v2i64_v2f64(<2 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i64_v2f64: ; CHECK: # %bb.0: @@ -1617,7 +1484,6 @@ define <2 x double> @vuitofp_v2i64_v2f64(<2 x i64> %va) strictfp { ret <2 x double> %evec } -declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i64(<4 x i64>, metadata, metadata) define <4 x half> @vsitofp_v4i64_v4f16(<4 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i64_v4f16: ; CHECK: # %bb.0: @@ -1630,7 +1496,6 @@ define <4 x half> @vsitofp_v4i64_v4f16(<4 x i64> %va) strictfp { ret <4 x half> %evec } -declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i64(<4 x i64>, metadata, metadata) define <4 x half> @vuitofp_v4i64_v4f16(<4 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i64_v4f16: ; CHECK: # %bb.0: @@ -1643,7 +1508,6 @@ define <4 x half> @vuitofp_v4i64_v4f16(<4 x i64> %va) strictfp { ret <4 x half> %evec } -declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64>, metadata, metadata) define <4 x float> @vsitofp_v4i64_v4f32(<4 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i64_v4f32: ; CHECK: # %bb.0: @@ -1655,7 +1519,6 @@ define <4 x float> @vsitofp_v4i64_v4f32(<4 x i64> %va) strictfp { ret <4 x float> %evec } -declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64>, metadata, metadata) define <4 x float> @vuitofp_v4i64_v4f32(<4 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i64_v4f32: ; CHECK: # %bb.0: @@ -1667,7 +1530,6 @@ define <4 x float> @vuitofp_v4i64_v4f32(<4 x i64> %va) strictfp { ret <4 x float> %evec } -declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i64(<4 x i64>, metadata, metadata) define <4 x double> @vsitofp_v4i64_v4f64(<4 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i64_v4f64: ; CHECK: # %bb.0: @@ -1678,7 +1540,6 @@ define <4 x double> @vsitofp_v4i64_v4f64(<4 x i64> %va) strictfp { ret <4 x double> %evec } -declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i64(<4 x i64>, metadata, metadata) define <4 x double> @vuitofp_v4i64_v4f64(<4 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i64_v4f64: ; CHECK: # %bb.0: @@ -1689,7 +1550,6 @@ define <4 x double> @vuitofp_v4i64_v4f64(<4 x i64> %va) strictfp { ret <4 x double> %evec } -declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i64(<8 x i64>, metadata, metadata) define <8 x half> @vsitofp_v8i64_v8f16(<8 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i64_v8f16: ; CHECK: # %bb.0: @@ -1702,7 +1562,6 @@ define <8 x half> @vsitofp_v8i64_v8f16(<8 x i64> %va) strictfp { ret <8 x half> %evec } -declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i64(<8 x i64>, metadata, metadata) define <8 x half> @vuitofp_v8i64_v8f16(<8 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i64_v8f16: ; CHECK: # %bb.0: @@ -1715,7 +1574,6 @@ define <8 x half> @vuitofp_v8i64_v8f16(<8 x i64> %va) strictfp { ret <8 x half> %evec } -declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i64(<8 x i64>, metadata, metadata) define <8 x float> @vsitofp_v8i64_v8f32(<8 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i64_v8f32: ; CHECK: # %bb.0: @@ -1727,7 +1585,6 @@ define <8 x float> @vsitofp_v8i64_v8f32(<8 x i64> %va) strictfp { ret <8 x float> %evec } -declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i64(<8 x i64>, metadata, metadata) define <8 x float> @vuitofp_v8i64_v8f32(<8 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i64_v8f32: ; CHECK: # %bb.0: @@ -1739,7 +1596,6 @@ define <8 x float> @vuitofp_v8i64_v8f32(<8 x i64> %va) strictfp { ret <8 x float> %evec } -declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64>, metadata, metadata) define <8 x double> @vsitofp_v8i64_v8f64(<8 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i64_v8f64: ; CHECK: # %bb.0: @@ -1750,7 +1606,6 @@ define <8 x double> @vsitofp_v8i64_v8f64(<8 x i64> %va) strictfp { ret <8 x double> %evec } -declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64>, metadata, metadata) define <8 x double> @vuitofp_v8i64_v8f64(<8 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i64_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmacc-vp.ll index f5a31d7eaadbe..3595eec2000e1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmacc-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) -declare <2 x i8> @llvm.vp.add.nxv2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) -declare <2 x i8> @llvm.vp.merge.nxv2i8(<2 x i1>, <2 x i8>, <2 x i8>, i32) -declare <2 x i8> @llvm.vp.select.nxv2i8(<2 x i1>, <2 x i8>, <2 x i8>, i32) - define <2 x i8> @vmacc_vv_nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv2i8: ; CHECK: # %bb.0: @@ -93,11 +88,6 @@ define <2 x i8> @vmacc_vx_nxv2i8_ta(<2 x i8> %a, i8 %b, <2 x i8> %c, <2 x i1> % ret <2 x i8> %u } -declare <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) -declare <4 x i8> @llvm.vp.add.nxv4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) -declare <4 x i8> @llvm.vp.merge.nxv4i8(<4 x i1>, <4 x i8>, <4 x i8>, i32) -declare <4 x i8> @llvm.vp.select.nxv4i8(<4 x i1>, <4 x i8>, <4 x i8>, i32) - define <4 x i8> @vmacc_vv_nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv4i8: ; CHECK: # %bb.0: @@ -182,11 +172,6 @@ define <4 x i8> @vmacc_vx_nxv4i8_ta(<4 x i8> %a, i8 %b, <4 x i8> %c, <4 x i1> % ret <4 x i8> %u } -declare <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) -declare <8 x i8> @llvm.vp.add.nxv8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) -declare <8 x i8> @llvm.vp.merge.nxv8i8(<8 x i1>, <8 x i8>, <8 x i8>, i32) -declare <8 x i8> @llvm.vp.select.nxv8i8(<8 x i1>, <8 x i8>, <8 x i8>, i32) - define <8 x i8> @vmacc_vv_nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv8i8: ; CHECK: # %bb.0: @@ -271,11 +256,6 @@ define <8 x i8> @vmacc_vx_nxv8i8_ta(<8 x i8> %a, i8 %b, <8 x i8> %c, <8 x i1> % ret <8 x i8> %u } -declare <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) -declare <16 x i8> @llvm.vp.add.nxv16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) -declare <16 x i8> @llvm.vp.merge.nxv16i8(<16 x i1>, <16 x i8>, <16 x i8>, i32) -declare <16 x i8> @llvm.vp.select.nxv16i8(<16 x i1>, <16 x i8>, <16 x i8>, i32) - define <16 x i8> @vmacc_vv_nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv16i8: ; CHECK: # %bb.0: @@ -360,11 +340,6 @@ define <16 x i8> @vmacc_vx_nxv16i8_ta(<16 x i8> %a, i8 %b, <16 x i8> %c, <16 x ret <16 x i8> %u } -declare <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8>, <32 x i8>, <32 x i1>, i32) -declare <32 x i8> @llvm.vp.add.nxv32i8(<32 x i8>, <32 x i8>, <32 x i1>, i32) -declare <32 x i8> @llvm.vp.merge.nxv32i8(<32 x i1>, <32 x i8>, <32 x i8>, i32) -declare <32 x i8> @llvm.vp.select.nxv32i8(<32 x i1>, <32 x i8>, <32 x i8>, i32) - define <32 x i8> @vmacc_vv_nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv32i8: ; CHECK: # %bb.0: @@ -449,11 +424,6 @@ define <32 x i8> @vmacc_vx_nxv32i8_ta(<32 x i8> %a, i8 %b, <32 x i8> %c, <32 x ret <32 x i8> %u } -declare <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8>, <64 x i8>, <64 x i1>, i32) -declare <64 x i8> @llvm.vp.add.nxv64i8(<64 x i8>, <64 x i8>, <64 x i1>, i32) -declare <64 x i8> @llvm.vp.merge.nxv64i8(<64 x i1>, <64 x i8>, <64 x i8>, i32) -declare <64 x i8> @llvm.vp.select.nxv64i8(<64 x i1>, <64 x i8>, <64 x i8>, i32) - define <64 x i8> @vmacc_vv_nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv64i8: ; CHECK: # %bb.0: @@ -538,11 +508,6 @@ define <64 x i8> @vmacc_vx_nxv64i8_ta(<64 x i8> %a, i8 %b, <64 x i8> %c, <64 x ret <64 x i8> %u } -declare <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) -declare <2 x i16> @llvm.vp.add.nxv2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) -declare <2 x i16> @llvm.vp.merge.nxv2i16(<2 x i1>, <2 x i16>, <2 x i16>, i32) -declare <2 x i16> @llvm.vp.select.nxv2i16(<2 x i1>, <2 x i16>, <2 x i16>, i32) - define <2 x i16> @vmacc_vv_nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv2i16: ; CHECK: # %bb.0: @@ -627,11 +592,6 @@ define <2 x i16> @vmacc_vx_nxv2i16_ta(<2 x i16> %a, i16 %b, <2 x i16> %c, <2 x ret <2 x i16> %u } -declare <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) -declare <4 x i16> @llvm.vp.add.nxv4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) -declare <4 x i16> @llvm.vp.merge.nxv4i16(<4 x i1>, <4 x i16>, <4 x i16>, i32) -declare <4 x i16> @llvm.vp.select.nxv4i16(<4 x i1>, <4 x i16>, <4 x i16>, i32) - define <4 x i16> @vmacc_vv_nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv4i16: ; CHECK: # %bb.0: @@ -716,11 +676,6 @@ define <4 x i16> @vmacc_vx_nxv4i16_ta(<4 x i16> %a, i16 %b, <4 x i16> %c, <4 x ret <4 x i16> %u } -declare <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) -declare <8 x i16> @llvm.vp.add.nxv8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) -declare <8 x i16> @llvm.vp.merge.nxv8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) -declare <8 x i16> @llvm.vp.select.nxv8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) - define <8 x i16> @vmacc_vv_nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv8i16: ; CHECK: # %bb.0: @@ -805,11 +760,6 @@ define <8 x i16> @vmacc_vx_nxv8i16_ta(<8 x i16> %a, i16 %b, <8 x i16> %c, <8 x ret <8 x i16> %u } -declare <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) -declare <16 x i16> @llvm.vp.add.nxv16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) -declare <16 x i16> @llvm.vp.merge.nxv16i16(<16 x i1>, <16 x i16>, <16 x i16>, i32) -declare <16 x i16> @llvm.vp.select.nxv16i16(<16 x i1>, <16 x i16>, <16 x i16>, i32) - define <16 x i16> @vmacc_vv_nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv16i16: ; CHECK: # %bb.0: @@ -894,11 +844,6 @@ define <16 x i16> @vmacc_vx_nxv16i16_ta(<16 x i16> %a, i16 %b, <16 x i16> %c, < ret <16 x i16> %u } -declare <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16>, <32 x i16>, <32 x i1>, i32) -declare <32 x i16> @llvm.vp.add.nxv32i16(<32 x i16>, <32 x i16>, <32 x i1>, i32) -declare <32 x i16> @llvm.vp.merge.nxv32i16(<32 x i1>, <32 x i16>, <32 x i16>, i32) -declare <32 x i16> @llvm.vp.select.nxv32i16(<32 x i1>, <32 x i16>, <32 x i16>, i32) - define <32 x i16> @vmacc_vv_nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv32i16: ; CHECK: # %bb.0: @@ -983,11 +928,6 @@ define <32 x i16> @vmacc_vx_nxv32i16_ta(<32 x i16> %a, i16 %b, <32 x i16> %c, < ret <32 x i16> %u } -declare <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) -declare <2 x i32> @llvm.vp.add.nxv2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) -declare <2 x i32> @llvm.vp.merge.nxv2i32(<2 x i1>, <2 x i32>, <2 x i32>, i32) -declare <2 x i32> @llvm.vp.select.nxv2i32(<2 x i1>, <2 x i32>, <2 x i32>, i32) - define <2 x i32> @vmacc_vv_nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1072,11 +1012,6 @@ define <2 x i32> @vmacc_vx_nxv2i32_ta(<2 x i32> %a, i32 %b, <2 x i32> %c, <2 x ret <2 x i32> %u } -declare <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) -declare <4 x i32> @llvm.vp.add.nxv4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) -declare <4 x i32> @llvm.vp.merge.nxv4i32(<4 x i1>, <4 x i32>, <4 x i32>, i32) -declare <4 x i32> @llvm.vp.select.nxv4i32(<4 x i1>, <4 x i32>, <4 x i32>, i32) - define <4 x i32> @vmacc_vv_nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1161,11 +1096,6 @@ define <4 x i32> @vmacc_vx_nxv4i32_ta(<4 x i32> %a, i32 %b, <4 x i32> %c, <4 x ret <4 x i32> %u } -declare <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) -declare <8 x i32> @llvm.vp.add.nxv8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) -declare <8 x i32> @llvm.vp.merge.nxv8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) -declare <8 x i32> @llvm.vp.select.nxv8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) - define <8 x i32> @vmacc_vv_nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1250,11 +1180,6 @@ define <8 x i32> @vmacc_vx_nxv8i32_ta(<8 x i32> %a, i32 %b, <8 x i32> %c, <8 x ret <8 x i32> %u } -declare <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) -declare <16 x i32> @llvm.vp.add.nxv16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) -declare <16 x i32> @llvm.vp.merge.nxv16i32(<16 x i1>, <16 x i32>, <16 x i32>, i32) -declare <16 x i32> @llvm.vp.select.nxv16i32(<16 x i1>, <16 x i32>, <16 x i32>, i32) - define <16 x i32> @vmacc_vv_nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1339,11 +1264,6 @@ define <16 x i32> @vmacc_vx_nxv16i32_ta(<16 x i32> %a, i32 %b, <16 x i32> %c, < ret <16 x i32> %u } -declare <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) -declare <2 x i64> @llvm.vp.add.nxv2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) -declare <2 x i64> @llvm.vp.merge.nxv2i64(<2 x i1>, <2 x i64>, <2 x i64>, i32) -declare <2 x i64> @llvm.vp.select.nxv2i64(<2 x i1>, <2 x i64>, <2 x i64>, i32) - define <2 x i64> @vmacc_vv_nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1476,11 +1396,6 @@ define <2 x i64> @vmacc_vx_nxv2i64_ta(<2 x i64> %a, i64 %b, <2 x i64> %c, <2 x ret <2 x i64> %u } -declare <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) -declare <4 x i64> @llvm.vp.add.nxv4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) -declare <4 x i64> @llvm.vp.merge.nxv4i64(<4 x i1>, <4 x i64>, <4 x i64>, i32) -declare <4 x i64> @llvm.vp.select.nxv4i64(<4 x i1>, <4 x i64>, <4 x i64>, i32) - define <4 x i64> @vmacc_vv_nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1613,11 +1528,6 @@ define <4 x i64> @vmacc_vx_nxv4i64_ta(<4 x i64> %a, i64 %b, <4 x i64> %c, <4 x ret <4 x i64> %u } -declare <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) -declare <8 x i64> @llvm.vp.add.nxv8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) -declare <8 x i64> @llvm.vp.merge.nxv8i64(<8 x i1>, <8 x i64>, <8 x i64>, i32) -declare <8 x i64> @llvm.vp.select.nxv8i64(<8 x i1>, <8 x i64>, <8 x i64>, i32) - define <8 x i64> @vmacc_vv_nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll index ec5845752c29c..f5978de080082 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.smax.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vmax_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v8i7: ; CHECK: # %bb.0: @@ -20,8 +18,6 @@ define <8 x i7> @vmax_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.smax.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vmax_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v2i8: ; CHECK: # %bb.0: @@ -66,8 +62,6 @@ define <2 x i8> @vmax_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.smax.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vmax_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v4i8: ; CHECK: # %bb.0: @@ -124,8 +118,6 @@ define <4 x i8> @vmax_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.smax.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vmax_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v5i8: ; CHECK: # %bb.0: @@ -170,8 +162,6 @@ define <5 x i8> @vmax_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.smax.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vmax_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v8i8: ; CHECK: # %bb.0: @@ -216,8 +206,6 @@ define <8 x i8> @vmax_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.smax.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vmax_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v16i8: ; CHECK: # %bb.0: @@ -262,8 +250,6 @@ define <16 x i8> @vmax_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.smax.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vmax_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vx_v258i8: ; CHECK: # %bb.0: @@ -351,8 +337,6 @@ define <256 x i8> @vmax_vx_v258i8_evl128(<256 x i8> %va, i8 %b, <256 x i1> %m) { ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.smax.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vmax_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v2i16: ; CHECK: # %bb.0: @@ -397,8 +381,6 @@ define <2 x i16> @vmax_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.smax.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vmax_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v4i16: ; CHECK: # %bb.0: @@ -443,8 +425,6 @@ define <4 x i16> @vmax_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.smax.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vmax_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v8i16: ; CHECK: # %bb.0: @@ -489,8 +469,6 @@ define <8 x i16> @vmax_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.smax.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vmax_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v16i16: ; CHECK: # %bb.0: @@ -535,8 +513,6 @@ define <16 x i16> @vmax_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext % ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.smax.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vmax_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v2i32: ; CHECK: # %bb.0: @@ -581,8 +557,6 @@ define <2 x i32> @vmax_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.smax.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vmax_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v4i32: ; CHECK: # %bb.0: @@ -627,8 +601,6 @@ define <4 x i32> @vmax_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.smax.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vmax_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v8i32: ; CHECK: # %bb.0: @@ -673,8 +645,6 @@ define <8 x i32> @vmax_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.smax.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vmax_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v16i32: ; CHECK: # %bb.0: @@ -719,8 +689,6 @@ define <16 x i32> @vmax_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext % ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.smax.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vmax_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v2i64: ; CHECK: # %bb.0: @@ -795,8 +763,6 @@ define <2 x i64> @vmax_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.smax.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vmax_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v4i64: ; CHECK: # %bb.0: @@ -871,8 +837,6 @@ define <4 x i64> @vmax_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.smax.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vmax_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v8i64: ; CHECK: # %bb.0: @@ -947,8 +911,6 @@ define <8 x i64> @vmax_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.smax.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vmax_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v16i64: ; CHECK: # %bb.0: @@ -1025,8 +987,6 @@ define <16 x i64> @vmax_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext % ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.smax.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vmax_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll index 2ffd3318d8759..7450a70df66ba 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.umax.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vmaxu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v8i7: ; CHECK: # %bb.0: @@ -19,8 +17,6 @@ define <8 x i7> @vmaxu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroe ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.umax.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vmaxu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v2i8: ; CHECK: # %bb.0: @@ -65,8 +61,6 @@ define <2 x i8> @vmaxu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.umax.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vmaxu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v4i8: ; CHECK: # %bb.0: @@ -123,8 +117,6 @@ define <4 x i8> @vmaxu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.umax.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vmaxu_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v5i8: ; CHECK: # %bb.0: @@ -169,8 +161,6 @@ define <5 x i8> @vmaxu_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.umax.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vmaxu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v8i8: ; CHECK: # %bb.0: @@ -215,8 +205,6 @@ define <8 x i8> @vmaxu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.umax.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vmaxu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v16i8: ; CHECK: # %bb.0: @@ -261,8 +249,6 @@ define <16 x i8> @vmaxu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.umax.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vmaxu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vx_v258i8: ; CHECK: # %bb.0: @@ -350,8 +336,6 @@ define <256 x i8> @vmaxu_vx_v258i8_evl128(<256 x i8> %va, i8 %b, <256 x i1> %m) ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.umax.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vmaxu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v2i16: ; CHECK: # %bb.0: @@ -396,8 +380,6 @@ define <2 x i16> @vmaxu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.umax.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vmaxu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v4i16: ; CHECK: # %bb.0: @@ -442,8 +424,6 @@ define <4 x i16> @vmaxu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.umax.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vmaxu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v8i16: ; CHECK: # %bb.0: @@ -488,8 +468,6 @@ define <8 x i16> @vmaxu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.umax.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vmaxu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v16i16: ; CHECK: # %bb.0: @@ -534,8 +512,6 @@ define <16 x i16> @vmaxu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.umax.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vmaxu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v2i32: ; CHECK: # %bb.0: @@ -580,8 +556,6 @@ define <2 x i32> @vmaxu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.umax.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vmaxu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v4i32: ; CHECK: # %bb.0: @@ -626,8 +600,6 @@ define <4 x i32> @vmaxu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.umax.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vmaxu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v8i32: ; CHECK: # %bb.0: @@ -672,8 +644,6 @@ define <8 x i32> @vmaxu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.umax.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vmaxu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v16i32: ; CHECK: # %bb.0: @@ -718,8 +688,6 @@ define <16 x i32> @vmaxu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.umax.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vmaxu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v2i64: ; CHECK: # %bb.0: @@ -794,8 +762,6 @@ define <2 x i64> @vmaxu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.umax.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vmaxu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v4i64: ; CHECK: # %bb.0: @@ -870,8 +836,6 @@ define <4 x i64> @vmaxu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.umax.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vmaxu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v8i64: ; CHECK: # %bb.0: @@ -946,8 +910,6 @@ define <8 x i64> @vmaxu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.umax.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vmaxu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v16i64: ; CHECK: # %bb.0: @@ -1024,8 +986,6 @@ define <16 x i64> @vmaxu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.umax.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vmaxu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll index 53649c77098f2..31d19304c2909 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.smin.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vmin_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v8i7: ; CHECK: # %bb.0: @@ -20,8 +18,6 @@ define <8 x i7> @vmin_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.smin.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vmin_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v2i8: ; CHECK: # %bb.0: @@ -66,8 +62,6 @@ define <2 x i8> @vmin_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.smin.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vmin_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v4i8: ; CHECK: # %bb.0: @@ -124,8 +118,6 @@ define <4 x i8> @vmin_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.smin.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vmin_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v5i8: ; CHECK: # %bb.0: @@ -170,8 +162,6 @@ define <5 x i8> @vmin_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.smin.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vmin_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v8i8: ; CHECK: # %bb.0: @@ -216,8 +206,6 @@ define <8 x i8> @vmin_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.smin.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vmin_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v16i8: ; CHECK: # %bb.0: @@ -262,8 +250,6 @@ define <16 x i8> @vmin_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.smin.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vmin_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vx_v258i8: ; CHECK: # %bb.0: @@ -351,8 +337,6 @@ define <256 x i8> @vmin_vx_v258i8_evl128(<256 x i8> %va, i8 %b, <256 x i1> %m) { ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.smin.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vmin_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v2i16: ; CHECK: # %bb.0: @@ -397,8 +381,6 @@ define <2 x i16> @vmin_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.smin.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vmin_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v4i16: ; CHECK: # %bb.0: @@ -443,8 +425,6 @@ define <4 x i16> @vmin_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.smin.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vmin_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v8i16: ; CHECK: # %bb.0: @@ -489,8 +469,6 @@ define <8 x i16> @vmin_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.smin.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vmin_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v16i16: ; CHECK: # %bb.0: @@ -535,8 +513,6 @@ define <16 x i16> @vmin_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext % ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.smin.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vmin_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v2i32: ; CHECK: # %bb.0: @@ -581,8 +557,6 @@ define <2 x i32> @vmin_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.smin.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vmin_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v4i32: ; CHECK: # %bb.0: @@ -627,8 +601,6 @@ define <4 x i32> @vmin_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.smin.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vmin_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v8i32: ; CHECK: # %bb.0: @@ -673,8 +645,6 @@ define <8 x i32> @vmin_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.smin.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vmin_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v16i32: ; CHECK: # %bb.0: @@ -719,8 +689,6 @@ define <16 x i32> @vmin_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext % ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.smin.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vmin_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v2i64: ; CHECK: # %bb.0: @@ -795,8 +763,6 @@ define <2 x i64> @vmin_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.smin.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vmin_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v4i64: ; CHECK: # %bb.0: @@ -871,8 +837,6 @@ define <4 x i64> @vmin_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.smin.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vmin_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v8i64: ; CHECK: # %bb.0: @@ -947,8 +911,6 @@ define <8 x i64> @vmin_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.smin.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vmin_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v16i64: ; CHECK: # %bb.0: @@ -1025,8 +987,6 @@ define <16 x i64> @vmin_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext % ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.smin.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vmin_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll index 76b5be39f2d93..dda69ec8a7d2e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.umin.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vminu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v8i7: ; CHECK: # %bb.0: @@ -19,8 +17,6 @@ define <8 x i7> @vminu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroe ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.umin.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vminu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v2i8: ; CHECK: # %bb.0: @@ -65,8 +61,6 @@ define <2 x i8> @vminu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.umin.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vminu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v4i8: ; CHECK: # %bb.0: @@ -123,8 +117,6 @@ define <4 x i8> @vminu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.umin.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vminu_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v5i8: ; CHECK: # %bb.0: @@ -169,8 +161,6 @@ define <5 x i8> @vminu_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.umin.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vminu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v8i8: ; CHECK: # %bb.0: @@ -215,8 +205,6 @@ define <8 x i8> @vminu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.umin.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vminu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v16i8: ; CHECK: # %bb.0: @@ -261,8 +249,6 @@ define <16 x i8> @vminu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.umin.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vminu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vx_v258i8: ; CHECK: # %bb.0: @@ -350,8 +336,6 @@ define <256 x i8> @vminu_vx_v258i8_evl128(<256 x i8> %va, i8 %b, <256 x i1> %m) ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.umin.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vminu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v2i16: ; CHECK: # %bb.0: @@ -396,8 +380,6 @@ define <2 x i16> @vminu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.umin.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vminu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v4i16: ; CHECK: # %bb.0: @@ -442,8 +424,6 @@ define <4 x i16> @vminu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.umin.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vminu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v8i16: ; CHECK: # %bb.0: @@ -488,8 +468,6 @@ define <8 x i16> @vminu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.umin.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vminu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v16i16: ; CHECK: # %bb.0: @@ -534,8 +512,6 @@ define <16 x i16> @vminu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.umin.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vminu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v2i32: ; CHECK: # %bb.0: @@ -580,8 +556,6 @@ define <2 x i32> @vminu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.umin.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vminu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v4i32: ; CHECK: # %bb.0: @@ -626,8 +600,6 @@ define <4 x i32> @vminu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.umin.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vminu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v8i32: ; CHECK: # %bb.0: @@ -672,8 +644,6 @@ define <8 x i32> @vminu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.umin.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vminu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v16i32: ; CHECK: # %bb.0: @@ -718,8 +688,6 @@ define <16 x i32> @vminu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.umin.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vminu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v2i64: ; CHECK: # %bb.0: @@ -794,8 +762,6 @@ define <2 x i64> @vminu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.umin.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vminu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v4i64: ; CHECK: # %bb.0: @@ -870,8 +836,6 @@ define <4 x i64> @vminu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.umin.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vminu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v8i64: ; CHECK: # %bb.0: @@ -946,8 +910,6 @@ define <8 x i64> @vminu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.umin.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vminu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v16i64: ; CHECK: # %bb.0: @@ -1024,8 +986,6 @@ define <16 x i64> @vminu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.umin.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vminu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp-mask.ll index 3824ed76625df..40119a20ad07c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp-mask.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK -declare <2 x i1> @llvm.vp.mul.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @vmul_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i1: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <2 x i1> @vmul_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroex ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.mul.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @vmul_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i1: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define <4 x i1> @vmul_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroex ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.mul.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @vmul_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i1: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define <8 x i1> @vmul_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroex ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.mul.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @vmul_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i1: ; CHECK: # %bb.0: @@ -52,8 +44,6 @@ define <16 x i1> @vmul_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 z ret <16 x i1> %v } -declare <32 x i1> @llvm.vp.mul.v32i1(<32 x i1>, <32 x i1>, <32 x i1>, i32) - define <32 x i1> @vmul_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v32i1: ; CHECK: # %bb.0: @@ -64,8 +54,6 @@ define <32 x i1> @vmul_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 z ret <32 x i1> %v } -declare <64 x i1> @llvm.vp.mul.v64i1(<64 x i1>, <64 x i1>, <64 x i1>, i32) - define <64 x i1> @vmul_vv_v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v64i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll index 26000033bd1db..9920a34520664 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.mul.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vmul_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i7: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <8 x i7> @vmul_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.mul.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vmul_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i8: ; CHECK: # %bb.0: @@ -62,8 +58,6 @@ define <2 x i8> @vmul_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.mul.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vmul_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i8: ; CHECK: # %bb.0: @@ -108,8 +102,6 @@ define <4 x i8> @vmul_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.mul.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vmul_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i8: ; CHECK: # %bb.0: @@ -154,8 +146,6 @@ define <8 x i8> @vmul_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.mul.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vmul_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i8: ; CHECK: # %bb.0: @@ -200,8 +190,6 @@ define <16 x i8> @vmul_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.mul.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vmul_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i16: ; CHECK: # %bb.0: @@ -246,8 +234,6 @@ define <2 x i16> @vmul_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.mul.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vmul_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i16: ; CHECK: # %bb.0: @@ -292,8 +278,6 @@ define <4 x i16> @vmul_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.mul.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vmul_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i16: ; CHECK: # %bb.0: @@ -350,8 +334,6 @@ define <8 x i16> @vmul_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl ret <8 x i16> %v } -declare <12 x i16> @llvm.vp.mul.v12i16(<12 x i16>, <12 x i16>, <12 x i1>, i32) - define <12 x i16> @vmul_vv_v12i16(<12 x i16> %va, <12 x i16> %b, <12 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v12i16: ; CHECK: # %bb.0: @@ -396,8 +378,6 @@ define <12 x i16> @vmul_vx_v12i16_unmasked(<12 x i16> %va, i16 %b, i32 zeroext % ret <12 x i16> %v } -declare <16 x i16> @llvm.vp.mul.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vmul_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i16: ; CHECK: # %bb.0: @@ -442,8 +422,6 @@ define <16 x i16> @vmul_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext % ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.mul.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vmul_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i32: ; CHECK: # %bb.0: @@ -488,8 +466,6 @@ define <2 x i32> @vmul_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.mul.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vmul_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i32: ; CHECK: # %bb.0: @@ -534,8 +510,6 @@ define <4 x i32> @vmul_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.mul.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vmul_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i32: ; CHECK: # %bb.0: @@ -580,8 +554,6 @@ define <8 x i32> @vmul_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.mul.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vmul_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i32: ; CHECK: # %bb.0: @@ -626,8 +598,6 @@ define <16 x i32> @vmul_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext % ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.mul.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vmul_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i64: ; CHECK: # %bb.0: @@ -702,8 +672,6 @@ define <2 x i64> @vmul_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.mul.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vmul_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i64: ; CHECK: # %bb.0: @@ -778,8 +746,6 @@ define <4 x i64> @vmul_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.mul.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vmul_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i64: ; CHECK: # %bb.0: @@ -854,8 +820,6 @@ define <8 x i64> @vmul_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.mul.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vmul_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i64: ; CHECK: # %bb.0: @@ -930,7 +894,6 @@ define <16 x i64> @vmul_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext % ret <16 x i64> %v } - define <8 x i64> @vmul_vv_undef_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vmul_vv_undef_v8i64: ; RV32: # %bb.0: @@ -1105,8 +1068,6 @@ define <8 x i64> @vmul_vx_negpow2_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl ret <8 x i64> %v } -declare <8 x i64> @llvm.vp.shl.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vmul_vshl_vx_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vshl_vx_v8i64: ; CHECK: # %bb.0: @@ -1172,8 +1133,6 @@ define <8 x i64> @vmul_vshl_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %vb, i32 ret <8 x i64> %v } -declare <8 x i64> @llvm.vp.add.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vmul_vadd_vx_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vadd_vx_v8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnmsac-vp.ll index b8798fe6c63dc..cc492f3c6dcb6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnmsac-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) -declare <2 x i8> @llvm.vp.sub.nxv2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) -declare <2 x i8> @llvm.vp.merge.nxv2i8(<2 x i1>, <2 x i8>, <2 x i8>, i32) -declare <2 x i8> @llvm.vp.select.nxv2i8(<2 x i1>, <2 x i8>, <2 x i8>, i32) - define <2 x i8> @vnmsac_vv_nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv2i8: ; CHECK: # %bb.0: @@ -93,11 +88,6 @@ define <2 x i8> @vnmsac_vx_nxv2i8_ta(<2 x i8> %a, i8 %b, <2 x i8> %c, <2 x i1> ret <2 x i8> %u } -declare <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) -declare <4 x i8> @llvm.vp.sub.nxv4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) -declare <4 x i8> @llvm.vp.merge.nxv4i8(<4 x i1>, <4 x i8>, <4 x i8>, i32) -declare <4 x i8> @llvm.vp.select.nxv4i8(<4 x i1>, <4 x i8>, <4 x i8>, i32) - define <4 x i8> @vnmsac_vv_nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv4i8: ; CHECK: # %bb.0: @@ -182,11 +172,6 @@ define <4 x i8> @vnmsac_vx_nxv4i8_ta(<4 x i8> %a, i8 %b, <4 x i8> %c, <4 x i1> ret <4 x i8> %u } -declare <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) -declare <8 x i8> @llvm.vp.sub.nxv8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) -declare <8 x i8> @llvm.vp.merge.nxv8i8(<8 x i1>, <8 x i8>, <8 x i8>, i32) -declare <8 x i8> @llvm.vp.select.nxv8i8(<8 x i1>, <8 x i8>, <8 x i8>, i32) - define <8 x i8> @vnmsac_vv_nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv8i8: ; CHECK: # %bb.0: @@ -271,11 +256,6 @@ define <8 x i8> @vnmsac_vx_nxv8i8_ta(<8 x i8> %a, i8 %b, <8 x i8> %c, <8 x i1> ret <8 x i8> %u } -declare <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) -declare <16 x i8> @llvm.vp.sub.nxv16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) -declare <16 x i8> @llvm.vp.merge.nxv16i8(<16 x i1>, <16 x i8>, <16 x i8>, i32) -declare <16 x i8> @llvm.vp.select.nxv16i8(<16 x i1>, <16 x i8>, <16 x i8>, i32) - define <16 x i8> @vnmsac_vv_nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv16i8: ; CHECK: # %bb.0: @@ -360,11 +340,6 @@ define <16 x i8> @vnmsac_vx_nxv16i8_ta(<16 x i8> %a, i8 %b, <16 x i8> %c, <16 x ret <16 x i8> %u } -declare <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8>, <32 x i8>, <32 x i1>, i32) -declare <32 x i8> @llvm.vp.sub.nxv32i8(<32 x i8>, <32 x i8>, <32 x i1>, i32) -declare <32 x i8> @llvm.vp.merge.nxv32i8(<32 x i1>, <32 x i8>, <32 x i8>, i32) -declare <32 x i8> @llvm.vp.select.nxv32i8(<32 x i1>, <32 x i8>, <32 x i8>, i32) - define <32 x i8> @vnmsac_vv_nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv32i8: ; CHECK: # %bb.0: @@ -449,11 +424,6 @@ define <32 x i8> @vnmsac_vx_nxv32i8_ta(<32 x i8> %a, i8 %b, <32 x i8> %c, <32 x ret <32 x i8> %u } -declare <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8>, <64 x i8>, <64 x i1>, i32) -declare <64 x i8> @llvm.vp.sub.nxv64i8(<64 x i8>, <64 x i8>, <64 x i1>, i32) -declare <64 x i8> @llvm.vp.merge.nxv64i8(<64 x i1>, <64 x i8>, <64 x i8>, i32) -declare <64 x i8> @llvm.vp.select.nxv64i8(<64 x i1>, <64 x i8>, <64 x i8>, i32) - define <64 x i8> @vnmsac_vv_nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv64i8: ; CHECK: # %bb.0: @@ -538,11 +508,6 @@ define <64 x i8> @vnmsac_vx_nxv64i8_ta(<64 x i8> %a, i8 %b, <64 x i8> %c, <64 x ret <64 x i8> %u } -declare <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) -declare <2 x i16> @llvm.vp.sub.nxv2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) -declare <2 x i16> @llvm.vp.merge.nxv2i16(<2 x i1>, <2 x i16>, <2 x i16>, i32) -declare <2 x i16> @llvm.vp.select.nxv2i16(<2 x i1>, <2 x i16>, <2 x i16>, i32) - define <2 x i16> @vnmsac_vv_nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv2i16: ; CHECK: # %bb.0: @@ -627,11 +592,6 @@ define <2 x i16> @vnmsac_vx_nxv2i16_ta(<2 x i16> %a, i16 %b, <2 x i16> %c, <2 x ret <2 x i16> %u } -declare <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) -declare <4 x i16> @llvm.vp.sub.nxv4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) -declare <4 x i16> @llvm.vp.merge.nxv4i16(<4 x i1>, <4 x i16>, <4 x i16>, i32) -declare <4 x i16> @llvm.vp.select.nxv4i16(<4 x i1>, <4 x i16>, <4 x i16>, i32) - define <4 x i16> @vnmsac_vv_nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv4i16: ; CHECK: # %bb.0: @@ -716,11 +676,6 @@ define <4 x i16> @vnmsac_vx_nxv4i16_ta(<4 x i16> %a, i16 %b, <4 x i16> %c, <4 x ret <4 x i16> %u } -declare <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) -declare <8 x i16> @llvm.vp.sub.nxv8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) -declare <8 x i16> @llvm.vp.merge.nxv8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) -declare <8 x i16> @llvm.vp.select.nxv8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) - define <8 x i16> @vnmsac_vv_nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv8i16: ; CHECK: # %bb.0: @@ -805,11 +760,6 @@ define <8 x i16> @vnmsac_vx_nxv8i16_ta(<8 x i16> %a, i16 %b, <8 x i16> %c, <8 x ret <8 x i16> %u } -declare <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) -declare <16 x i16> @llvm.vp.sub.nxv16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) -declare <16 x i16> @llvm.vp.merge.nxv16i16(<16 x i1>, <16 x i16>, <16 x i16>, i32) -declare <16 x i16> @llvm.vp.select.nxv16i16(<16 x i1>, <16 x i16>, <16 x i16>, i32) - define <16 x i16> @vnmsac_vv_nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv16i16: ; CHECK: # %bb.0: @@ -894,11 +844,6 @@ define <16 x i16> @vnmsac_vx_nxv16i16_ta(<16 x i16> %a, i16 %b, <16 x i16> %c, ret <16 x i16> %u } -declare <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16>, <32 x i16>, <32 x i1>, i32) -declare <32 x i16> @llvm.vp.sub.nxv32i16(<32 x i16>, <32 x i16>, <32 x i1>, i32) -declare <32 x i16> @llvm.vp.merge.nxv32i16(<32 x i1>, <32 x i16>, <32 x i16>, i32) -declare <32 x i16> @llvm.vp.select.nxv32i16(<32 x i1>, <32 x i16>, <32 x i16>, i32) - define <32 x i16> @vnmsac_vv_nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv32i16: ; CHECK: # %bb.0: @@ -983,11 +928,6 @@ define <32 x i16> @vnmsac_vx_nxv32i16_ta(<32 x i16> %a, i16 %b, <32 x i16> %c, ret <32 x i16> %u } -declare <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) -declare <2 x i32> @llvm.vp.sub.nxv2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) -declare <2 x i32> @llvm.vp.merge.nxv2i32(<2 x i1>, <2 x i32>, <2 x i32>, i32) -declare <2 x i32> @llvm.vp.select.nxv2i32(<2 x i1>, <2 x i32>, <2 x i32>, i32) - define <2 x i32> @vnmsac_vv_nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1072,11 +1012,6 @@ define <2 x i32> @vnmsac_vx_nxv2i32_ta(<2 x i32> %a, i32 %b, <2 x i32> %c, <2 x ret <2 x i32> %u } -declare <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) -declare <4 x i32> @llvm.vp.sub.nxv4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) -declare <4 x i32> @llvm.vp.merge.nxv4i32(<4 x i1>, <4 x i32>, <4 x i32>, i32) -declare <4 x i32> @llvm.vp.select.nxv4i32(<4 x i1>, <4 x i32>, <4 x i32>, i32) - define <4 x i32> @vnmsac_vv_nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1161,11 +1096,6 @@ define <4 x i32> @vnmsac_vx_nxv4i32_ta(<4 x i32> %a, i32 %b, <4 x i32> %c, <4 x ret <4 x i32> %u } -declare <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) -declare <8 x i32> @llvm.vp.sub.nxv8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) -declare <8 x i32> @llvm.vp.merge.nxv8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) -declare <8 x i32> @llvm.vp.select.nxv8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) - define <8 x i32> @vnmsac_vv_nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1250,11 +1180,6 @@ define <8 x i32> @vnmsac_vx_nxv8i32_ta(<8 x i32> %a, i32 %b, <8 x i32> %c, <8 x ret <8 x i32> %u } -declare <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) -declare <16 x i32> @llvm.vp.sub.nxv16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) -declare <16 x i32> @llvm.vp.merge.nxv16i32(<16 x i1>, <16 x i32>, <16 x i32>, i32) -declare <16 x i32> @llvm.vp.select.nxv16i32(<16 x i1>, <16 x i32>, <16 x i32>, i32) - define <16 x i32> @vnmsac_vv_nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1339,11 +1264,6 @@ define <16 x i32> @vnmsac_vx_nxv16i32_ta(<16 x i32> %a, i32 %b, <16 x i32> %c, ret <16 x i32> %u } -declare <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) -declare <2 x i64> @llvm.vp.sub.nxv2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) -declare <2 x i64> @llvm.vp.merge.nxv2i64(<2 x i1>, <2 x i64>, <2 x i64>, i32) -declare <2 x i64> @llvm.vp.select.nxv2i64(<2 x i1>, <2 x i64>, <2 x i64>, i32) - define <2 x i64> @vnmsac_vv_nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1476,11 +1396,6 @@ define <2 x i64> @vnmsac_vx_nxv2i64_ta(<2 x i64> %a, i64 %b, <2 x i64> %c, <2 x ret <2 x i64> %u } -declare <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) -declare <4 x i64> @llvm.vp.sub.nxv4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) -declare <4 x i64> @llvm.vp.merge.nxv4i64(<4 x i1>, <4 x i64>, <4 x i64>, i32) -declare <4 x i64> @llvm.vp.select.nxv4i64(<4 x i1>, <4 x i64>, <4 x i64>, i32) - define <4 x i64> @vnmsac_vv_nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1613,11 +1528,6 @@ define <4 x i64> @vnmsac_vx_nxv4i64_ta(<4 x i64> %a, i64 %b, <4 x i64> %c, <4 x ret <4 x i64> %u } -declare <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) -declare <8 x i64> @llvm.vp.sub.nxv8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) -declare <8 x i64> @llvm.vp.merge.nxv8i64(<8 x i1>, <8 x i64>, <8 x i64>, i32) -declare <8 x i64> @llvm.vp.select.nxv8i64(<8 x i1>, <8 x i64>, <8 x i64>, i32) - define <8 x i64> @vnmsac_vv_nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll index 2ebd008f8dbe7..7127d2318d39b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.or.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vor_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i7: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <8 x i7> @vor_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.or.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vor_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v2i8: ; CHECK: # %bb.0: @@ -82,8 +78,6 @@ define <2 x i8> @vor_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.or.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vor_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v4i8: ; CHECK: # %bb.0: @@ -160,8 +154,6 @@ define <4 x i8> @vor_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <7 x i8> @llvm.vp.or.v5i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) - define <7 x i8> @vor_vv_v5i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v5i8: ; CHECK: # %bb.0: @@ -226,8 +218,6 @@ define <7 x i8> @vor_vi_v5i8_unmasked(<7 x i8> %va, i32 zeroext %evl) { ret <7 x i8> %v } -declare <8 x i8> @llvm.vp.or.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vor_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i8: ; CHECK: # %bb.0: @@ -292,8 +282,6 @@ define <8 x i8> @vor_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.or.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vor_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v16i8: ; CHECK: # %bb.0: @@ -358,8 +346,6 @@ define <16 x i8> @vor_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.or.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vor_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v2i16: ; CHECK: # %bb.0: @@ -424,8 +410,6 @@ define <2 x i16> @vor_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.or.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vor_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v4i16: ; CHECK: # %bb.0: @@ -490,8 +474,6 @@ define <4 x i16> @vor_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.or.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vor_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i16: ; CHECK: # %bb.0: @@ -556,8 +538,6 @@ define <8 x i16> @vor_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.or.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vor_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v16i16: ; CHECK: # %bb.0: @@ -622,8 +602,6 @@ define <16 x i16> @vor_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.or.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vor_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v2i32: ; CHECK: # %bb.0: @@ -688,8 +666,6 @@ define <2 x i32> @vor_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.or.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vor_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v4i32: ; CHECK: # %bb.0: @@ -754,8 +730,6 @@ define <4 x i32> @vor_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.or.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vor_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i32: ; CHECK: # %bb.0: @@ -820,8 +794,6 @@ define <8 x i32> @vor_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.or.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vor_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v16i32: ; CHECK: # %bb.0: @@ -886,8 +858,6 @@ define <16 x i32> @vor_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.or.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vor_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v2i64: ; CHECK: # %bb.0: @@ -982,8 +952,6 @@ define <2 x i64> @vor_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.or.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vor_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v4i64: ; CHECK: # %bb.0: @@ -1078,8 +1046,6 @@ define <4 x i64> @vor_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.or.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vor_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i64: ; CHECK: # %bb.0: @@ -1174,8 +1140,6 @@ define <8 x i64> @vor_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.or.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vor_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-reverse-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-reverse-int.ll index 27f16f0285e12..60fc1771de09b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-reverse-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-reverse-int.ll @@ -120,7 +120,3 @@ define <16 x i8> @test_vp_reverse_v16i8(<16 x i8> %src, i32 zeroext %evl) { ret <16 x i8> %dst } -declare <2 x i64> @llvm.experimental.vp.reverse.v2i64(<2 x i64>,<2 x i1>,i32) -declare <4 x i32> @llvm.experimental.vp.reverse.v4i32(<4 x i32>,<4 x i1>,i32) -declare <8 x i16> @llvm.experimental.vp.reverse.v8i16(<8 x i16>,<8 x i1>,i32) -declare <16 x i8> @llvm.experimental.vp.reverse.v16i8(<16 x i8>,<16 x i1>,i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll index 352666de57881..3f5751aaa2cad 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 -declare <2 x i8> @llvm.vp.gather.v2i8.v2p0(<2 x ptr>, <2 x i1>, i32) - define <2 x i8> @vpgather_v2i8(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i8: ; RV32: # %bb.0: @@ -154,8 +152,6 @@ define <2 x i64> @vpgather_v2i8_zextload_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, i32 ret <2 x i64> %ev } -declare <3 x i8> @llvm.vp.gather.v3i8.v3p0(<3 x ptr>, <3 x i1>, i32) - define <3 x i8> @vpgather_v3i8(<3 x ptr> %ptrs, <3 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v3i8: ; RV32: # %bb.0: @@ -192,8 +188,6 @@ define <3 x i8> @vpgather_truemask_v3i8(<3 x ptr> %ptrs, i32 zeroext %evl) { ret <3 x i8> %v } -declare <4 x i8> @llvm.vp.gather.v4i8.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x i8> @vpgather_v4i8(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4i8: ; RV32: # %bb.0: @@ -230,8 +224,6 @@ define <4 x i8> @vpgather_truemask_v4i8(<4 x ptr> %ptrs, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.gather.v8i8.v8p0(<8 x ptr>, <8 x i1>, i32) - define <8 x i8> @vpgather_v8i8(<8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8i8: ; RV32: # %bb.0: @@ -271,8 +263,6 @@ define <8 x i8> @vpgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, i ret <8 x i8> %v } -declare <32 x i8> @llvm.vp.gather.v32i8.v32p0(<32 x ptr>, <32 x i1>, i32) - define <32 x i8> @vpgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v32i8: ; RV32: # %bb.0: @@ -317,8 +307,6 @@ define <32 x i8> @vpgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> % ret <32 x i8> %v } -declare <2 x i16> @llvm.vp.gather.v2i16.v2p0(<2 x ptr>, <2 x i1>, i32) - define <2 x i16> @vpgather_v2i16(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i16: ; RV32: # %bb.0: @@ -421,8 +409,6 @@ define <2 x i64> @vpgather_v2i16_zextload_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, i3 ret <2 x i64> %ev } -declare <4 x i16> @llvm.vp.gather.v4i16.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x i16> @vpgather_v4i16(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4i16: ; RV32: # %bb.0: @@ -459,8 +445,6 @@ define <4 x i16> @vpgather_truemask_v4i16(<4 x ptr> %ptrs, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.gather.v8i16.v8p0(<8 x ptr>, <8 x i1>, i32) - define <8 x i16> @vpgather_v8i16(<8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8i16: ; RV32: # %bb.0: @@ -570,8 +554,6 @@ define <8 x i16> @vpgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m ret <8 x i16> %v } -declare <2 x i32> @llvm.vp.gather.v2i32.v2p0(<2 x ptr>, <2 x i1>, i32) - define <2 x i32> @vpgather_v2i32(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i32: ; RV32: # %bb.0: @@ -631,8 +613,6 @@ define <2 x i64> @vpgather_v2i32_zextload_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, i3 ret <2 x i64> %ev } -declare <4 x i32> @llvm.vp.gather.v4i32.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x i32> @vpgather_v4i32(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4i32: ; RV32: # %bb.0: @@ -667,8 +647,6 @@ define <4 x i32> @vpgather_truemask_v4i32(<4 x ptr> %ptrs, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.gather.v8i32.v8p0(<8 x ptr>, <8 x i1>, i32) - define <8 x i32> @vpgather_v8i32(<8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8i32: ; RV32: # %bb.0: @@ -850,8 +828,6 @@ define <8 x i32> @vpgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m ret <8 x i32> %v } -declare <2 x i64> @llvm.vp.gather.v2i64.v2p0(<2 x ptr>, <2 x i1>, i32) - define <2 x i64> @vpgather_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i64: ; RV32: # %bb.0: @@ -869,8 +845,6 @@ define <2 x i64> @vpgather_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.gather.v4i64.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x i64> @vpgather_v4i64(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4i64: ; RV32: # %bb.0: @@ -905,8 +879,6 @@ define <4 x i64> @vpgather_truemask_v4i64(<4 x ptr> %ptrs, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.gather.v8i64.v8p0(<8 x ptr>, <8 x i1>, i32) - define <8 x i64> @vpgather_v8i64(<8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8i64: ; RV32: # %bb.0: @@ -1156,8 +1128,6 @@ define <8 x i64> @vpgather_baseidx_v8i64(ptr %base, <8 x i64> %idxs, <8 x i1> %m ret <8 x i64> %v } -declare <2 x bfloat> @llvm.vp.gather.v2bf16.v2p0(<2 x ptr>, <2 x i1>, i32) - define <2 x bfloat> @vpgather_v2bf16(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2bf16: ; RV32: # %bb.0: @@ -1176,8 +1146,6 @@ define <2 x bfloat> @vpgather_v2bf16(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext % ret <2 x bfloat> %v } -declare <4 x bfloat> @llvm.vp.gather.v4bf16.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x bfloat> @vpgather_v4bf16(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4bf16: ; RV32: # %bb.0: @@ -1214,8 +1182,6 @@ define <4 x bfloat> @vpgather_truemask_v4bf16(<4 x ptr> %ptrs, i32 zeroext %evl) ret <4 x bfloat> %v } -declare <8 x bfloat> @llvm.vp.gather.v8bf16.v8p0(<8 x ptr>, <8 x i1>, i32) - define <8 x bfloat> @vpgather_v8bf16(<8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8bf16: ; RV32: # %bb.0: @@ -1325,8 +1291,6 @@ define <8 x bfloat> @vpgather_baseidx_v8bf16(ptr %base, <8 x i16> %idxs, <8 x i1 ret <8 x bfloat> %v } -declare <2 x half> @llvm.vp.gather.v2f16.v2p0(<2 x ptr>, <2 x i1>, i32) - define <2 x half> @vpgather_v2f16(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2f16: ; RV32: # %bb.0: @@ -1345,8 +1309,6 @@ define <2 x half> @vpgather_v2f16(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl ret <2 x half> %v } -declare <4 x half> @llvm.vp.gather.v4f16.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x half> @vpgather_v4f16(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4f16: ; RV32: # %bb.0: @@ -1383,8 +1345,6 @@ define <4 x half> @vpgather_truemask_v4f16(<4 x ptr> %ptrs, i32 zeroext %evl) { ret <4 x half> %v } -declare <8 x half> @llvm.vp.gather.v8f16.v8p0(<8 x ptr>, <8 x i1>, i32) - define <8 x half> @vpgather_v8f16(<8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8f16: ; RV32: # %bb.0: @@ -1494,8 +1454,6 @@ define <8 x half> @vpgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> % ret <8 x half> %v } -declare <2 x float> @llvm.vp.gather.v2f32.v2p0(<2 x ptr>, <2 x i1>, i32) - define <2 x float> @vpgather_v2f32(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2f32: ; RV32: # %bb.0: @@ -1513,8 +1471,6 @@ define <2 x float> @vpgather_v2f32(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %ev ret <2 x float> %v } -declare <4 x float> @llvm.vp.gather.v4f32.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x float> @vpgather_v4f32(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4f32: ; RV32: # %bb.0: @@ -1549,8 +1505,6 @@ define <4 x float> @vpgather_truemask_v4f32(<4 x ptr> %ptrs, i32 zeroext %evl) { ret <4 x float> %v } -declare <8 x float> @llvm.vp.gather.v8f32.v8p0(<8 x ptr>, <8 x i1>, i32) - define <8 x float> @vpgather_v8f32(<8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8f32: ; RV32: # %bb.0: @@ -1732,8 +1686,6 @@ define <8 x float> @vpgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> ret <8 x float> %v } -declare <2 x double> @llvm.vp.gather.v2f64.v2p0(<2 x ptr>, <2 x i1>, i32) - define <2 x double> @vpgather_v2f64(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2f64: ; RV32: # %bb.0: @@ -1751,8 +1703,6 @@ define <2 x double> @vpgather_v2f64(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %e ret <2 x double> %v } -declare <4 x double> @llvm.vp.gather.v4f64.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x double> @vpgather_v4f64(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4f64: ; RV32: # %bb.0: @@ -1787,8 +1737,6 @@ define <4 x double> @vpgather_truemask_v4f64(<4 x ptr> %ptrs, i32 zeroext %evl) ret <4 x double> %v } -declare <8 x double> @llvm.vp.gather.v8f64.v8p0(<8 x ptr>, <8 x i1>, i32) - define <8 x double> @vpgather_v8f64(<8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8f64: ; RV32: # %bb.0: @@ -2038,8 +1986,6 @@ define <8 x double> @vpgather_baseidx_v8f64(ptr %base, <8 x i64> %idxs, <8 x i1> ret <8 x double> %v } -declare <32 x double> @llvm.vp.gather.v32f64.v32p0(<32 x ptr>, <32 x i1>, i32) - define <32 x double> @vpgather_v32f64(<32 x ptr> %ptrs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v32f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll index 8e2e8f3fb0dec..d058669c103f3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x i8> @llvm.vp.load.v2i8.p0(ptr, <2 x i1>, i32) - define <2 x i8> @vpload_v2i8(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2i8: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <2 x i8> @vpload_v2i8(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ret <2 x i8> %load } -declare <3 x i8> @llvm.vp.load.v3i8.p0(ptr, <3 x i1>, i32) - define <3 x i8> @vpload_v3i8(ptr %ptr, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v3i8: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define <3 x i8> @vpload_v3i8(ptr %ptr, <3 x i1> %m, i32 zeroext %evl) { ret <3 x i8> %load } -declare <4 x i8> @llvm.vp.load.v4i8.p0(ptr, <4 x i1>, i32) - define <4 x i8> @vpload_v4i8(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4i8: ; CHECK: # %bb.0: @@ -50,8 +44,6 @@ define <4 x i8> @vpload_v4i8_allones_mask(ptr %ptr, i32 zeroext %evl) { ret <4 x i8> %load } -declare <8 x i8> @llvm.vp.load.v8i8.p0(ptr, <8 x i1>, i32) - define <8 x i8> @vpload_v8i8(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8i8: ; CHECK: # %bb.0: @@ -62,8 +54,6 @@ define <8 x i8> @vpload_v8i8(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ret <8 x i8> %load } -declare <2 x i16> @llvm.vp.load.v2i16.p0(ptr, <2 x i1>, i32) - define <2 x i16> @vpload_v2i16(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2i16: ; CHECK: # %bb.0: @@ -74,8 +64,6 @@ define <2 x i16> @vpload_v2i16(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ret <2 x i16> %load } -declare <4 x i16> @llvm.vp.load.v4i16.p0(ptr, <4 x i1>, i32) - define <4 x i16> @vpload_v4i16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4i16: ; CHECK: # %bb.0: @@ -86,8 +74,6 @@ define <4 x i16> @vpload_v4i16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ret <4 x i16> %load } -declare <8 x i16> @llvm.vp.load.v8i16.p0(ptr, <8 x i1>, i32) - define <8 x i16> @vpload_v8i16(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8i16: ; CHECK: # %bb.0: @@ -108,8 +94,6 @@ define <8 x i16> @vpload_v8i16_allones_mask(ptr %ptr, i32 zeroext %evl) { ret <8 x i16> %load } -declare <2 x i32> @llvm.vp.load.v2i32.p0(ptr, <2 x i1>, i32) - define <2 x i32> @vpload_v2i32(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2i32: ; CHECK: # %bb.0: @@ -120,8 +104,6 @@ define <2 x i32> @vpload_v2i32(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ret <2 x i32> %load } -declare <4 x i32> @llvm.vp.load.v4i32.p0(ptr, <4 x i1>, i32) - define <4 x i32> @vpload_v4i32(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4i32: ; CHECK: # %bb.0: @@ -132,8 +114,6 @@ define <4 x i32> @vpload_v4i32(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ret <4 x i32> %load } -declare <6 x i32> @llvm.vp.load.v6i32.p0(ptr, <6 x i1>, i32) - define <6 x i32> @vpload_v6i32(ptr %ptr, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v6i32: ; CHECK: # %bb.0: @@ -154,8 +134,6 @@ define <6 x i32> @vpload_v6i32_allones_mask(ptr %ptr, i32 zeroext %evl) { ret <6 x i32> %load } -declare <8 x i32> @llvm.vp.load.v8i32.p0(ptr, <8 x i1>, i32) - define <8 x i32> @vpload_v8i32(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8i32: ; CHECK: # %bb.0: @@ -176,8 +154,6 @@ define <8 x i32> @vpload_v8i32_allones_mask(ptr %ptr, i32 zeroext %evl) { ret <8 x i32> %load } -declare <2 x i64> @llvm.vp.load.v2i64.p0(ptr, <2 x i1>, i32) - define <2 x i64> @vpload_v2i64(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2i64: ; CHECK: # %bb.0: @@ -188,8 +164,6 @@ define <2 x i64> @vpload_v2i64(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ret <2 x i64> %load } -declare <4 x i64> @llvm.vp.load.v4i64.p0(ptr, <4 x i1>, i32) - define <4 x i64> @vpload_v4i64(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4i64: ; CHECK: # %bb.0: @@ -210,8 +184,6 @@ define <4 x i64> @vpload_v4i64_allones_mask(ptr %ptr, i32 zeroext %evl) { ret <4 x i64> %load } -declare <8 x i64> @llvm.vp.load.v8i64.p0(ptr, <8 x i1>, i32) - define <8 x i64> @vpload_v8i64(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8i64: ; CHECK: # %bb.0: @@ -222,8 +194,6 @@ define <8 x i64> @vpload_v8i64(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ret <8 x i64> %load } -declare <2 x half> @llvm.vp.load.v2f16.p0(ptr, <2 x i1>, i32) - define <2 x half> @vpload_v2f16(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2f16: ; CHECK: # %bb.0: @@ -244,8 +214,6 @@ define <2 x half> @vpload_v2f16_allones_mask(ptr %ptr, i32 zeroext %evl) { ret <2 x half> %load } -declare <4 x half> @llvm.vp.load.v4f16.p0(ptr, <4 x i1>, i32) - define <4 x half> @vpload_v4f16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4f16: ; CHECK: # %bb.0: @@ -256,8 +224,6 @@ define <4 x half> @vpload_v4f16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ret <4 x half> %load } -declare <8 x half> @llvm.vp.load.v8f16.p0(ptr, <8 x i1>, i32) - define <8 x half> @vpload_v8f16(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8f16: ; CHECK: # %bb.0: @@ -268,8 +234,6 @@ define <8 x half> @vpload_v8f16(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ret <8 x half> %load } -declare <2 x float> @llvm.vp.load.v2f32.p0(ptr, <2 x i1>, i32) - define <2 x float> @vpload_v2f32(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2f32: ; CHECK: # %bb.0: @@ -280,8 +244,6 @@ define <2 x float> @vpload_v2f32(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ret <2 x float> %load } -declare <4 x float> @llvm.vp.load.v4f32.p0(ptr, <4 x i1>, i32) - define <4 x float> @vpload_v4f32(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4f32: ; CHECK: # %bb.0: @@ -292,8 +254,6 @@ define <4 x float> @vpload_v4f32(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ret <4 x float> %load } -declare <8 x float> @llvm.vp.load.v8f32.p0(ptr, <8 x i1>, i32) - define <8 x float> @vpload_v8f32(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8f32: ; CHECK: # %bb.0: @@ -314,8 +274,6 @@ define <8 x float> @vpload_v8f32_allones_mask(ptr %ptr, i32 zeroext %evl) { ret <8 x float> %load } -declare <2 x double> @llvm.vp.load.v2f64.p0(ptr, <2 x i1>, i32) - define <2 x double> @vpload_v2f64(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2f64: ; CHECK: # %bb.0: @@ -326,8 +284,6 @@ define <2 x double> @vpload_v2f64(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ret <2 x double> %load } -declare <4 x double> @llvm.vp.load.v4f64.p0(ptr, <4 x i1>, i32) - define <4 x double> @vpload_v4f64(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4f64: ; CHECK: # %bb.0: @@ -348,8 +304,6 @@ define <4 x double> @vpload_v4f64_allones_mask(ptr %ptr, i32 zeroext %evl) { ret <4 x double> %load } -declare <8 x double> @llvm.vp.load.v8f64.p0(ptr, <8 x i1>, i32) - define <8 x double> @vpload_v8f64(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8f64: ; CHECK: # %bb.0: @@ -360,8 +314,6 @@ define <8 x double> @vpload_v8f64(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ret <8 x double> %load } -declare <32 x double> @llvm.vp.load.v32f64.p0(ptr, <32 x i1>, i32) - define <32 x double> @vpload_v32f64(ptr %ptr, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v32f64: ; CHECK: # %bb.0: @@ -387,8 +339,6 @@ define <32 x double> @vpload_v32f64(ptr %ptr, <32 x i1> %m, i32 zeroext %evl) { ret <32 x double> %load } -declare <33 x double> @llvm.vp.load.v33f64.p0(ptr, <33 x i1>, i32) - ; Widen to v64f64 then split into 4 x v16f64, of which 1 is empty. define <33 x double> @vpload_v33f64(ptr %ptr, <33 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge-bf16.ll index 4186a6b304a22..844e0213989bc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge-bf16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge-bf16.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+m,+zvfh,+zfbfmin,+zvfbfmin -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x bfloat> @llvm.vp.merge.v2bf16(<2 x i1>, <2 x bfloat>, <2 x bfloat>, i32) - define <2 x bfloat> @vpmerge_vv_v2bf16(<2 x bfloat> %va, <2 x bfloat> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2bf16: ; CHECK: # %bb.0: @@ -36,8 +34,6 @@ define <2 x bfloat> @vpmerge_vf_v2bf16(bfloat %a, <2 x bfloat> %vb, <2 x i1> %m, ret <2 x bfloat> %v } -declare <4 x bfloat> @llvm.vp.merge.v4bf16(<4 x i1>, <4 x bfloat>, <4 x bfloat>, i32) - define <4 x bfloat> @vpmerge_vv_v4bf16(<4 x bfloat> %va, <4 x bfloat> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4bf16: ; CHECK: # %bb.0: @@ -64,8 +60,6 @@ define <4 x bfloat> @vpmerge_vf_v4bf16(bfloat %a, <4 x bfloat> %vb, <4 x i1> %m, ret <4 x bfloat> %v } -declare <8 x bfloat> @llvm.vp.merge.v8bf16(<8 x i1>, <8 x bfloat>, <8 x bfloat>, i32) - define <8 x bfloat> @vpmerge_vv_v8bf16(<8 x bfloat> %va, <8 x bfloat> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8bf16: ; CHECK: # %bb.0: @@ -92,8 +86,6 @@ define <8 x bfloat> @vpmerge_vf_v8bf16(bfloat %a, <8 x bfloat> %vb, <8 x i1> %m, ret <8 x bfloat> %v } -declare <16 x bfloat> @llvm.vp.merge.v16bf16(<16 x i1>, <16 x bfloat>, <16 x bfloat>, i32) - define <16 x bfloat> @vpmerge_vv_v16bf16(<16 x bfloat> %va, <16 x bfloat> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16bf16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll index 4299707c9a48c..7968c5190eb01 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare <4 x i1> @llvm.vp.merge.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @vpmerge_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpmerge_vv_v4i1: ; RV32: # %bb.0: @@ -234,8 +232,6 @@ define <64 x i1> @vpmerge_vv_v64i1(<64 x i1> %va, <64 x i1> %vb, <64 x i1> %m, i ret <64 x i1> %v } -declare <2 x i8> @llvm.vp.merge.v2i8(<2 x i1>, <2 x i8>, <2 x i8>, i32) - define <2 x i8> @vpmerge_vv_v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2i8: ; CHECK: # %bb.0: @@ -269,8 +265,6 @@ define <2 x i8> @vpmerge_vi_v2i8(<2 x i8> %vb, <2 x i1> %m, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.merge.v4i8(<4 x i1>, <4 x i8>, <4 x i8>, i32) - define <4 x i8> @vpmerge_vv_v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4i8: ; CHECK: # %bb.0: @@ -304,8 +298,6 @@ define <4 x i8> @vpmerge_vi_v4i8(<4 x i8> %vb, <4 x i1> %m, i32 zeroext %evl) { ret <4 x i8> %v } -declare <6 x i8> @llvm.vp.merge.v6i8(<6 x i1>, <6 x i8>, <6 x i8>, i32) - define <6 x i8> @vpmerge_vv_v6i8(<6 x i8> %va, <6 x i8> %vb, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v6i8: ; CHECK: # %bb.0: @@ -339,8 +331,6 @@ define <6 x i8> @vpmerge_vi_v6i8(<6 x i8> %vb, <6 x i1> %m, i32 zeroext %evl) { ret <6 x i8> %v } -declare <8 x i7> @llvm.vp.merge.v8i7(<8 x i1>, <8 x i7>, <8 x i7>, i32) - define <8 x i7> @vpmerge_vv_v8i7(<8 x i7> %va, <8 x i7> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8i7: ; CHECK: # %bb.0: @@ -374,8 +364,6 @@ define <8 x i7> @vpmerge_vi_v8i7(<8 x i7> %vb, <8 x i1> %m, i32 zeroext %evl) { ret <8 x i7> %v } -declare <8 x i8> @llvm.vp.merge.v8i8(<8 x i1>, <8 x i8>, <8 x i8>, i32) - define <8 x i8> @vpmerge_vv_v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8i8: ; CHECK: # %bb.0: @@ -409,8 +397,6 @@ define <8 x i8> @vpmerge_vi_v8i8(<8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.merge.v16i8(<16 x i1>, <16 x i8>, <16 x i8>, i32) - define <16 x i8> @vpmerge_vv_v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16i8: ; CHECK: # %bb.0: @@ -444,8 +430,6 @@ define <16 x i8> @vpmerge_vi_v16i8(<16 x i8> %vb, <16 x i1> %m, i32 zeroext %evl ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.merge.v2i16(<2 x i1>, <2 x i16>, <2 x i16>, i32) - define <2 x i16> @vpmerge_vv_v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2i16: ; CHECK: # %bb.0: @@ -479,8 +463,6 @@ define <2 x i16> @vpmerge_vi_v2i16(<2 x i16> %vb, <2 x i1> %m, i32 zeroext %evl) ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.merge.v4i16(<4 x i1>, <4 x i16>, <4 x i16>, i32) - define <4 x i16> @vpmerge_vv_v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4i16: ; CHECK: # %bb.0: @@ -514,8 +496,6 @@ define <4 x i16> @vpmerge_vi_v4i16(<4 x i16> %vb, <4 x i1> %m, i32 zeroext %evl) ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.merge.v8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) - define <8 x i16> @vpmerge_vv_v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8i16: ; CHECK: # %bb.0: @@ -549,8 +529,6 @@ define <8 x i16> @vpmerge_vi_v8i16(<8 x i16> %vb, <8 x i1> %m, i32 zeroext %evl) ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.merge.v16i16(<16 x i1>, <16 x i16>, <16 x i16>, i32) - define <16 x i16> @vpmerge_vv_v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16i16: ; CHECK: # %bb.0: @@ -584,8 +562,6 @@ define <16 x i16> @vpmerge_vi_v16i16(<16 x i16> %vb, <16 x i1> %m, i32 zeroext % ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.merge.v2i32(<2 x i1>, <2 x i32>, <2 x i32>, i32) - define <2 x i32> @vpmerge_vv_v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2i32: ; CHECK: # %bb.0: @@ -619,8 +595,6 @@ define <2 x i32> @vpmerge_vi_v2i32(<2 x i32> %vb, <2 x i1> %m, i32 zeroext %evl) ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.merge.v4i32(<4 x i1>, <4 x i32>, <4 x i32>, i32) - define <4 x i32> @vpmerge_vv_v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4i32: ; CHECK: # %bb.0: @@ -654,8 +628,6 @@ define <4 x i32> @vpmerge_vi_v4i32(<4 x i32> %vb, <4 x i1> %m, i32 zeroext %evl) ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.merge.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) - define <8 x i32> @vpmerge_vv_v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8i32: ; CHECK: # %bb.0: @@ -689,8 +661,6 @@ define <8 x i32> @vpmerge_vi_v8i32(<8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.merge.v16i32(<16 x i1>, <16 x i32>, <16 x i32>, i32) - define <16 x i32> @vpmerge_vv_v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16i32: ; CHECK: # %bb.0: @@ -724,8 +694,6 @@ define <16 x i32> @vpmerge_vi_v16i32(<16 x i32> %vb, <16 x i1> %m, i32 zeroext % ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.merge.v2i64(<2 x i1>, <2 x i64>, <2 x i64>, i32) - define <2 x i64> @vpmerge_vv_v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2i64: ; CHECK: # %bb.0: @@ -795,8 +763,6 @@ define <2 x i64> @vpmerge_vi_v2i64(<2 x i64> %vb, <2 x i1> %m, i32 zeroext %evl) ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.merge.v4i64(<4 x i1>, <4 x i64>, <4 x i64>, i32) - define <4 x i64> @vpmerge_vv_v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4i64: ; CHECK: # %bb.0: @@ -866,8 +832,6 @@ define <4 x i64> @vpmerge_vi_v4i64(<4 x i64> %vb, <4 x i1> %m, i32 zeroext %evl) ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.merge.v8i64(<8 x i1>, <8 x i64>, <8 x i64>, i32) - define <8 x i64> @vpmerge_vv_v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8i64: ; CHECK: # %bb.0: @@ -937,8 +901,6 @@ define <8 x i64> @vpmerge_vi_v8i64(<8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.merge.v16i64(<16 x i1>, <16 x i64>, <16 x i64>, i32) - define <16 x i64> @vpmerge_vv_v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16i64: ; CHECK: # %bb.0: @@ -1008,8 +970,6 @@ define <16 x i64> @vpmerge_vi_v16i64(<16 x i64> %vb, <16 x i1> %m, i32 zeroext % ret <16 x i64> %v } -declare <2 x half> @llvm.vp.merge.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) - define <2 x half> @vpmerge_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2f16: ; CHECK: # %bb.0: @@ -1042,8 +1002,6 @@ define <2 x half> @vpmerge_vf_v2f16(half %a, <2 x half> %vb, <2 x i1> %m, i32 ze ret <2 x half> %v } -declare <4 x half> @llvm.vp.merge.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) - define <4 x half> @vpmerge_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4f16: ; CHECK: # %bb.0: @@ -1076,8 +1034,6 @@ define <4 x half> @vpmerge_vf_v4f16(half %a, <4 x half> %vb, <4 x i1> %m, i32 ze ret <4 x half> %v } -declare <8 x half> @llvm.vp.merge.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) - define <8 x half> @vpmerge_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8f16: ; CHECK: # %bb.0: @@ -1110,8 +1066,6 @@ define <8 x half> @vpmerge_vf_v8f16(half %a, <8 x half> %vb, <8 x i1> %m, i32 ze ret <8 x half> %v } -declare <16 x half> @llvm.vp.merge.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) - define <16 x half> @vpmerge_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16f16: ; CHECK: # %bb.0: @@ -1144,8 +1098,6 @@ define <16 x half> @vpmerge_vf_v16f16(half %a, <16 x half> %vb, <16 x i1> %m, i3 ret <16 x half> %v } -declare <2 x float> @llvm.vp.merge.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) - define <2 x float> @vpmerge_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2f32: ; CHECK: # %bb.0: @@ -1169,8 +1121,6 @@ define <2 x float> @vpmerge_vf_v2f32(float %a, <2 x float> %vb, <2 x i1> %m, i32 ret <2 x float> %v } -declare <4 x float> @llvm.vp.merge.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) - define <4 x float> @vpmerge_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4f32: ; CHECK: # %bb.0: @@ -1194,8 +1144,6 @@ define <4 x float> @vpmerge_vf_v4f32(float %a, <4 x float> %vb, <4 x i1> %m, i32 ret <4 x float> %v } -declare <8 x float> @llvm.vp.merge.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) - define <8 x float> @vpmerge_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8f32: ; CHECK: # %bb.0: @@ -1219,8 +1167,6 @@ define <8 x float> @vpmerge_vf_v8f32(float %a, <8 x float> %vb, <8 x i1> %m, i32 ret <8 x float> %v } -declare <16 x float> @llvm.vp.merge.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) - define <16 x float> @vpmerge_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16f32: ; CHECK: # %bb.0: @@ -1244,8 +1190,6 @@ define <16 x float> @vpmerge_vf_v16f32(float %a, <16 x float> %vb, <16 x i1> %m, ret <16 x float> %v } -declare <2 x double> @llvm.vp.merge.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) - define <2 x double> @vpmerge_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2f64: ; CHECK: # %bb.0: @@ -1269,8 +1213,6 @@ define <2 x double> @vpmerge_vf_v2f64(double %a, <2 x double> %vb, <2 x i1> %m, ret <2 x double> %v } -declare <4 x double> @llvm.vp.merge.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) - define <4 x double> @vpmerge_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4f64: ; CHECK: # %bb.0: @@ -1294,8 +1236,6 @@ define <4 x double> @vpmerge_vf_v4f64(double %a, <4 x double> %vb, <4 x i1> %m, ret <4 x double> %v } -declare <8 x double> @llvm.vp.merge.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) - define <8 x double> @vpmerge_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8f64: ; CHECK: # %bb.0: @@ -1319,8 +1259,6 @@ define <8 x double> @vpmerge_vf_v8f64(double %a, <8 x double> %vb, <8 x i1> %m, ret <8 x double> %v } -declare <16 x double> @llvm.vp.merge.v16f64(<16 x i1>, <16 x double>, <16 x double>, i32) - define <16 x double> @vpmerge_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16f64: ; CHECK: # %bb.0: @@ -1344,8 +1282,6 @@ define <16 x double> @vpmerge_vf_v16f64(double %a, <16 x double> %vb, <16 x i1> ret <16 x double> %v } -declare <32 x double> @llvm.vp.merge.v32f64(<32 x i1>, <32 x double>, <32 x double>, i32) - define <32 x double> @vpmerge_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll index c361ccce14e4a..b4d20d93f2a1c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 -declare void @llvm.vp.scatter.v2i8.v2p0(<2 x i8>, <2 x ptr>, <2 x i1>, i32) - define void @vpscatter_v2i8(<2 x i8> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i8: ; RV32: # %bb.0: @@ -101,8 +99,6 @@ define void @vpscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x ptr> %ptrs, <2 ret void } -declare void @llvm.vp.scatter.v4i8.v4p0(<4 x i8>, <4 x ptr>, <4 x i1>, i32) - define void @vpscatter_v4i8(<4 x i8> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4i8: ; RV32: # %bb.0: @@ -135,8 +131,6 @@ define void @vpscatter_truemask_v4i8(<4 x i8> %val, <4 x ptr> %ptrs, i32 zeroext ret void } -declare void @llvm.vp.scatter.v8i8.v8p0(<8 x i8>, <8 x ptr>, <8 x i1>, i32) - define void @vpscatter_v8i8(<8 x i8> %val, <8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8i8: ; RV32: # %bb.0: @@ -174,8 +168,6 @@ define void @vpscatter_baseidx_v8i8(<8 x i8> %val, ptr %base, <8 x i8> %idxs, <8 ret void } -declare void @llvm.vp.scatter.v2i16.v2p0(<2 x i16>, <2 x ptr>, <2 x i1>, i32) - define void @vpscatter_v2i16(<2 x i16> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i16: ; RV32: # %bb.0: @@ -238,8 +230,6 @@ define void @vpscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x ptr> %ptrs, < ret void } -declare void @llvm.vp.scatter.v3i16.v3p0(<3 x i16>, <3 x ptr>, <3 x i1>, i32) - define void @vpscatter_v3i16(<3 x i16> %val, <3 x ptr> %ptrs, <3 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v3i16: ; RV32: # %bb.0: @@ -272,8 +262,6 @@ define void @vpscatter_truemask_v3i16(<3 x i16> %val, <3 x ptr> %ptrs, i32 zeroe ret void } -declare void @llvm.vp.scatter.v4i16.v4p0(<4 x i16>, <4 x ptr>, <4 x i1>, i32) - define void @vpscatter_v4i16(<4 x i16> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4i16: ; RV32: # %bb.0: @@ -306,8 +294,6 @@ define void @vpscatter_truemask_v4i16(<4 x i16> %val, <4 x ptr> %ptrs, i32 zeroe ret void } -declare void @llvm.vp.scatter.v8i16.v8p0(<8 x i16>, <8 x ptr>, <8 x i1>, i32) - define void @vpscatter_v8i16(<8 x i16> %val, <8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8i16: ; RV32: # %bb.0: @@ -415,8 +401,6 @@ define void @vpscatter_baseidx_v8i16(<8 x i16> %val, ptr %base, <8 x i16> %idxs, ret void } -declare void @llvm.vp.scatter.v2i32.v2p0(<2 x i32>, <2 x ptr>, <2 x i1>, i32) - define void @vpscatter_v2i32(<2 x i32> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i32: ; RV32: # %bb.0: @@ -454,8 +438,6 @@ define void @vpscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x ptr> %ptrs, < ret void } -declare void @llvm.vp.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, <4 x i1>, i32) - define void @vpscatter_v4i32(<4 x i32> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4i32: ; RV32: # %bb.0: @@ -488,8 +470,6 @@ define void @vpscatter_truemask_v4i32(<4 x i32> %val, <4 x ptr> %ptrs, i32 zeroe ret void } -declare void @llvm.vp.scatter.v8i32.v8p0(<8 x i32>, <8 x ptr>, <8 x i1>, i32) - define void @vpscatter_v8i32(<8 x i32> %val, <8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8i32: ; RV32: # %bb.0: @@ -670,8 +650,6 @@ define void @vpscatter_baseidx_v8i32(<8 x i32> %val, ptr %base, <8 x i32> %idxs, ret void } -declare void @llvm.vp.scatter.v2i64.v2p0(<2 x i64>, <2 x ptr>, <2 x i1>, i32) - define void @vpscatter_v2i64(<2 x i64> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i64: ; RV32: # %bb.0: @@ -688,8 +666,6 @@ define void @vpscatter_v2i64(<2 x i64> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 z ret void } -declare void @llvm.vp.scatter.v4i64.v4p0(<4 x i64>, <4 x ptr>, <4 x i1>, i32) - define void @vpscatter_v4i64(<4 x i64> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4i64: ; RV32: # %bb.0: @@ -722,8 +698,6 @@ define void @vpscatter_truemask_v4i64(<4 x i64> %val, <4 x ptr> %ptrs, i32 zeroe ret void } -declare void @llvm.vp.scatter.v8i64.v8p0(<8 x i64>, <8 x ptr>, <8 x i1>, i32) - define void @vpscatter_v8i64(<8 x i64> %val, <8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8i64: ; RV32: # %bb.0: @@ -972,8 +946,6 @@ define void @vpscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs, ret void } -declare void @llvm.vp.scatter.v2bf16.v2p0(<2 x bfloat>, <2 x ptr>, <2 x i1>, i32) - define void @vpscatter_v2bf16(<2 x bfloat> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2bf16: ; RV32: # %bb.0: @@ -990,8 +962,6 @@ define void @vpscatter_v2bf16(<2 x bfloat> %val, <2 x ptr> %ptrs, <2 x i1> %m, i ret void } -declare void @llvm.vp.scatter.v4bf16.v4p0(<4 x bfloat>, <4 x ptr>, <4 x i1>, i32) - define void @vpscatter_v4bf16(<4 x bfloat> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4bf16: ; RV32: # %bb.0: @@ -1024,8 +994,6 @@ define void @vpscatter_truemask_v4bf16(<4 x bfloat> %val, <4 x ptr> %ptrs, i32 z ret void } -declare void @llvm.vp.scatter.v8bf16.v8p0(<8 x bfloat>, <8 x ptr>, <8 x i1>, i32) - define void @vpscatter_v8bf16(<8 x bfloat> %val, <8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8bf16: ; RV32: # %bb.0: @@ -1042,8 +1010,6 @@ define void @vpscatter_v8bf16(<8 x bfloat> %val, <8 x ptr> %ptrs, <8 x i1> %m, i ret void } -declare void @llvm.vp.scatter.v2f16.v2p0(<2 x half>, <2 x ptr>, <2 x i1>, i32) - define void @vpscatter_v2f16(<2 x half> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2f16: ; RV32: # %bb.0: @@ -1060,8 +1026,6 @@ define void @vpscatter_v2f16(<2 x half> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 ret void } -declare void @llvm.vp.scatter.v4f16.v4p0(<4 x half>, <4 x ptr>, <4 x i1>, i32) - define void @vpscatter_v4f16(<4 x half> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4f16: ; RV32: # %bb.0: @@ -1094,8 +1058,6 @@ define void @vpscatter_truemask_v4f16(<4 x half> %val, <4 x ptr> %ptrs, i32 zero ret void } -declare void @llvm.vp.scatter.v8f16.v8p0(<8 x half>, <8 x ptr>, <8 x i1>, i32) - define void @vpscatter_v8f16(<8 x half> %val, <8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8f16: ; RV32: # %bb.0: @@ -1203,8 +1165,6 @@ define void @vpscatter_baseidx_v8f16(<8 x half> %val, ptr %base, <8 x i16> %idxs ret void } -declare void @llvm.vp.scatter.v2f32.v2p0(<2 x float>, <2 x ptr>, <2 x i1>, i32) - define void @vpscatter_v2f32(<2 x float> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2f32: ; RV32: # %bb.0: @@ -1221,8 +1181,6 @@ define void @vpscatter_v2f32(<2 x float> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 ret void } -declare void @llvm.vp.scatter.v4f32.v4p0(<4 x float>, <4 x ptr>, <4 x i1>, i32) - define void @vpscatter_v4f32(<4 x float> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4f32: ; RV32: # %bb.0: @@ -1255,8 +1213,6 @@ define void @vpscatter_truemask_v4f32(<4 x float> %val, <4 x ptr> %ptrs, i32 zer ret void } -declare void @llvm.vp.scatter.v8f32.v8p0(<8 x float>, <8 x ptr>, <8 x i1>, i32) - define void @vpscatter_v8f32(<8 x float> %val, <8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8f32: ; RV32: # %bb.0: @@ -1437,8 +1393,6 @@ define void @vpscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idx ret void } -declare void @llvm.vp.scatter.v2f64.v2p0(<2 x double>, <2 x ptr>, <2 x i1>, i32) - define void @vpscatter_v2f64(<2 x double> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2f64: ; RV32: # %bb.0: @@ -1455,8 +1409,6 @@ define void @vpscatter_v2f64(<2 x double> %val, <2 x ptr> %ptrs, <2 x i1> %m, i3 ret void } -declare void @llvm.vp.scatter.v4f64.v4p0(<4 x double>, <4 x ptr>, <4 x i1>, i32) - define void @vpscatter_v4f64(<4 x double> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4f64: ; RV32: # %bb.0: @@ -1489,8 +1441,6 @@ define void @vpscatter_truemask_v4f64(<4 x double> %val, <4 x ptr> %ptrs, i32 ze ret void } -declare void @llvm.vp.scatter.v8f64.v8p0(<8 x double>, <8 x ptr>, <8 x i1>, i32) - define void @vpscatter_v8f64(<8 x double> %val, <8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8f64: ; RV32: # %bb.0: @@ -1739,8 +1689,6 @@ define void @vpscatter_baseidx_v8f64(<8 x double> %val, ptr %base, <8 x i64> %id ret void } -declare void @llvm.vp.scatter.v32f64.v32p0(<32 x double>, <32 x ptr>, <32 x i1>, i32) - define void @vpscatter_v32f64(<32 x double> %val, <32 x ptr> %ptrs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v32f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll index d30e8b46e6df2..855a87d21b7dc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.vp.store.v2i8.p0(<2 x i8>, ptr, <2 x i1>, i32) - define void @vpstore_v2i8(<2 x i8> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2i8: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define void @vpstore_v2i8(<2 x i8> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl ret void } -declare void @llvm.vp.store.v4i8.p0(<4 x i8>, ptr, <4 x i1>, i32) - define void @vpstore_v4i8(<4 x i8> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4i8: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define void @vpstore_v4i8(<4 x i8> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl ret void } -declare void @llvm.vp.store.v8i7.v8i7.p0(<8 x i7>, ptr, <8 x i1>, i32) - define void @vpstore_v8i7(<8 x i7> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8i7: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define void @vpstore_v8i7(<8 x i7> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl ret void } -declare void @llvm.vp.store.v8i8.p0(<8 x i8>, ptr, <8 x i1>, i32) - define void @vpstore_v8i8(<8 x i8> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8i8: ; CHECK: # %bb.0: @@ -52,8 +44,6 @@ define void @vpstore_v8i8(<8 x i8> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl ret void } -declare void @llvm.vp.store.v2i16.p0(<2 x i16>, ptr, <2 x i1>, i32) - define void @vpstore_v2i16(<2 x i16> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2i16: ; CHECK: # %bb.0: @@ -64,8 +54,6 @@ define void @vpstore_v2i16(<2 x i16> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v4i16.p0(<4 x i16>, ptr, <4 x i1>, i32) - define void @vpstore_v4i16(<4 x i16> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4i16: ; CHECK: # %bb.0: @@ -76,8 +64,6 @@ define void @vpstore_v4i16(<4 x i16> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v8i16.p0(<8 x i16>, ptr, <8 x i1>, i32) - define void @vpstore_v8i16(<8 x i16> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8i16: ; CHECK: # %bb.0: @@ -88,8 +74,6 @@ define void @vpstore_v8i16(<8 x i16> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v2i32.p0(<2 x i32>, ptr, <2 x i1>, i32) - define void @vpstore_v2i32(<2 x i32> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2i32: ; CHECK: # %bb.0: @@ -100,8 +84,6 @@ define void @vpstore_v2i32(<2 x i32> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v4i32.p0(<4 x i32>, ptr, <4 x i1>, i32) - define void @vpstore_v4i32(<4 x i32> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4i32: ; CHECK: # %bb.0: @@ -112,8 +94,6 @@ define void @vpstore_v4i32(<4 x i32> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v8i32.p0(<8 x i32>, ptr, <8 x i1>, i32) - define void @vpstore_v8i32(<8 x i32> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8i32: ; CHECK: # %bb.0: @@ -124,8 +104,6 @@ define void @vpstore_v8i32(<8 x i32> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v2i64.p0(<2 x i64>, ptr, <2 x i1>, i32) - define void @vpstore_v2i64(<2 x i64> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2i64: ; CHECK: # %bb.0: @@ -136,8 +114,6 @@ define void @vpstore_v2i64(<2 x i64> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v4i64.p0(<4 x i64>, ptr, <4 x i1>, i32) - define void @vpstore_v4i64(<4 x i64> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4i64: ; CHECK: # %bb.0: @@ -148,8 +124,6 @@ define void @vpstore_v4i64(<4 x i64> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v8i64.p0(<8 x i64>, ptr, <8 x i1>, i32) - define void @vpstore_v8i64(<8 x i64> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8i64: ; CHECK: # %bb.0: @@ -160,8 +134,6 @@ define void @vpstore_v8i64(<8 x i64> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v2f16.p0(<2 x half>, ptr, <2 x i1>, i32) - define void @vpstore_v2f16(<2 x half> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2f16: ; CHECK: # %bb.0: @@ -172,8 +144,6 @@ define void @vpstore_v2f16(<2 x half> %val, ptr %ptr, <2 x i1> %m, i32 zeroext % ret void } -declare void @llvm.vp.store.v4f16.p0(<4 x half>, ptr, <4 x i1>, i32) - define void @vpstore_v4f16(<4 x half> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4f16: ; CHECK: # %bb.0: @@ -184,8 +154,6 @@ define void @vpstore_v4f16(<4 x half> %val, ptr %ptr, <4 x i1> %m, i32 zeroext % ret void } -declare void @llvm.vp.store.v8f16.p0(<8 x half>, ptr, <8 x i1>, i32) - define void @vpstore_v8f16(<8 x half> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8f16: ; CHECK: # %bb.0: @@ -196,8 +164,6 @@ define void @vpstore_v8f16(<8 x half> %val, ptr %ptr, <8 x i1> %m, i32 zeroext % ret void } -declare void @llvm.vp.store.v2f32.p0(<2 x float>, ptr, <2 x i1>, i32) - define void @vpstore_v2f32(<2 x float> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2f32: ; CHECK: # %bb.0: @@ -208,8 +174,6 @@ define void @vpstore_v2f32(<2 x float> %val, ptr %ptr, <2 x i1> %m, i32 zeroext ret void } -declare void @llvm.vp.store.v4f32.p0(<4 x float>, ptr, <4 x i1>, i32) - define void @vpstore_v4f32(<4 x float> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4f32: ; CHECK: # %bb.0: @@ -220,8 +184,6 @@ define void @vpstore_v4f32(<4 x float> %val, ptr %ptr, <4 x i1> %m, i32 zeroext ret void } -declare void @llvm.vp.store.v6f32.p0(<6 x float>, ptr, <6 x i1>, i32) - define void @vpstore_v6f32(<6 x float> %val, ptr %ptr, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v6f32: ; CHECK: # %bb.0: @@ -232,8 +194,6 @@ define void @vpstore_v6f32(<6 x float> %val, ptr %ptr, <6 x i1> %m, i32 zeroext ret void } -declare void @llvm.vp.store.v8f32.p0(<8 x float>, ptr, <8 x i1>, i32) - define void @vpstore_v8f32(<8 x float> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8f32: ; CHECK: # %bb.0: @@ -244,8 +204,6 @@ define void @vpstore_v8f32(<8 x float> %val, ptr %ptr, <8 x i1> %m, i32 zeroext ret void } -declare void @llvm.vp.store.v2f64.p0(<2 x double>, ptr, <2 x i1>, i32) - define void @vpstore_v2f64(<2 x double> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2f64: ; CHECK: # %bb.0: @@ -256,8 +214,6 @@ define void @vpstore_v2f64(<2 x double> %val, ptr %ptr, <2 x i1> %m, i32 zeroext ret void } -declare void @llvm.vp.store.v4f64.p0(<4 x double>, ptr, <4 x i1>, i32) - define void @vpstore_v4f64(<4 x double> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4f64: ; CHECK: # %bb.0: @@ -268,8 +224,6 @@ define void @vpstore_v4f64(<4 x double> %val, ptr %ptr, <4 x i1> %m, i32 zeroext ret void } -declare void @llvm.vp.store.v8f64.p0(<8 x double>, ptr, <8 x i1>, i32) - define void @vpstore_v8f64(<8 x double> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8f64: ; CHECK: # %bb.0: @@ -290,8 +244,6 @@ define void @vpstore_v2i8_allones_mask(<2 x i8> %val, ptr %ptr, i32 zeroext %evl ret void } -declare void @llvm.vp.store.v32f64.p0(<32 x double>, ptr, <32 x i1>, i32) - define void @vpstore_v32f64(<32 x double> %val, ptr %ptr, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll index 0d31ec5f78435..74a958f40f35b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s -declare i1 @llvm.vector.reduce.or.v1i1(<1 x i1>) - define zeroext i1 @vreduce_or_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_or_v1i1: ; CHECK: # %bb.0: @@ -15,8 +13,6 @@ define zeroext i1 @vreduce_or_v1i1(<1 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v1i1(<1 x i1>) - define zeroext i1 @vreduce_xor_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v1i1: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define zeroext i1 @vreduce_xor_v1i1(<1 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v1i1(<1 x i1>) - define zeroext i1 @vreduce_and_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_and_v1i1: ; CHECK: # %bb.0: @@ -41,8 +35,6 @@ define zeroext i1 @vreduce_and_v1i1(<1 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v1i1(<1 x i1>) - define zeroext i1 @vreduce_umax_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v1i1: ; CHECK: # %bb.0: @@ -54,8 +46,6 @@ define zeroext i1 @vreduce_umax_v1i1(<1 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v1i1(<1 x i1>) - define zeroext i1 @vreduce_smax_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v1i1: ; CHECK: # %bb.0: @@ -67,8 +57,6 @@ define zeroext i1 @vreduce_smax_v1i1(<1 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v1i1(<1 x i1>) - define zeroext i1 @vreduce_umin_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v1i1: ; CHECK: # %bb.0: @@ -80,8 +68,6 @@ define zeroext i1 @vreduce_umin_v1i1(<1 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v1i1(<1 x i1>) - define zeroext i1 @vreduce_smin_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v1i1: ; CHECK: # %bb.0: @@ -93,8 +79,6 @@ define zeroext i1 @vreduce_smin_v1i1(<1 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v2i1(<2 x i1>) - define zeroext i1 @vreduce_or_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_or_v2i1: ; CHECK: # %bb.0: @@ -106,8 +90,6 @@ define zeroext i1 @vreduce_or_v2i1(<2 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v2i1(<2 x i1>) - define zeroext i1 @vreduce_xor_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v2i1: ; CHECK: # %bb.0: @@ -119,8 +101,6 @@ define zeroext i1 @vreduce_xor_v2i1(<2 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v2i1(<2 x i1>) - define zeroext i1 @vreduce_and_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_and_v2i1: ; CHECK: # %bb.0: @@ -133,8 +113,6 @@ define zeroext i1 @vreduce_and_v2i1(<2 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v2i1(<2 x i1>) - define zeroext i1 @vreduce_umax_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v2i1: ; CHECK: # %bb.0: @@ -146,8 +124,6 @@ define zeroext i1 @vreduce_umax_v2i1(<2 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v2i1(<2 x i1>) - define zeroext i1 @vreduce_smax_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v2i1: ; CHECK: # %bb.0: @@ -160,8 +136,6 @@ define zeroext i1 @vreduce_smax_v2i1(<2 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v2i1(<2 x i1>) - define zeroext i1 @vreduce_umin_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v2i1: ; CHECK: # %bb.0: @@ -174,8 +148,6 @@ define zeroext i1 @vreduce_umin_v2i1(<2 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v2i1(<2 x i1>) - define zeroext i1 @vreduce_smin_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v2i1: ; CHECK: # %bb.0: @@ -187,8 +159,6 @@ define zeroext i1 @vreduce_smin_v2i1(<2 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v4i1(<4 x i1>) - define zeroext i1 @vreduce_or_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_or_v4i1: ; CHECK: # %bb.0: @@ -200,8 +170,6 @@ define zeroext i1 @vreduce_or_v4i1(<4 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v4i1(<4 x i1>) - define zeroext i1 @vreduce_xor_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v4i1: ; CHECK: # %bb.0: @@ -213,8 +181,6 @@ define zeroext i1 @vreduce_xor_v4i1(<4 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v4i1(<4 x i1>) - define zeroext i1 @vreduce_and_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_and_v4i1: ; CHECK: # %bb.0: @@ -227,8 +193,6 @@ define zeroext i1 @vreduce_and_v4i1(<4 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v4i1(<4 x i1>) - define zeroext i1 @vreduce_umax_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v4i1: ; CHECK: # %bb.0: @@ -240,8 +204,6 @@ define zeroext i1 @vreduce_umax_v4i1(<4 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v4i1(<4 x i1>) - define zeroext i1 @vreduce_smax_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v4i1: ; CHECK: # %bb.0: @@ -254,8 +216,6 @@ define zeroext i1 @vreduce_smax_v4i1(<4 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v4i1(<4 x i1>) - define zeroext i1 @vreduce_umin_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v4i1: ; CHECK: # %bb.0: @@ -268,8 +228,6 @@ define zeroext i1 @vreduce_umin_v4i1(<4 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v4i1(<4 x i1>) - define zeroext i1 @vreduce_smin_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v4i1: ; CHECK: # %bb.0: @@ -281,8 +239,6 @@ define zeroext i1 @vreduce_smin_v4i1(<4 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v8i1(<8 x i1>) - define zeroext i1 @vreduce_or_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_or_v8i1: ; CHECK: # %bb.0: @@ -294,8 +250,6 @@ define zeroext i1 @vreduce_or_v8i1(<8 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v8i1(<8 x i1>) - define zeroext i1 @vreduce_xor_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v8i1: ; CHECK: # %bb.0: @@ -307,8 +261,6 @@ define zeroext i1 @vreduce_xor_v8i1(<8 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v8i1(<8 x i1>) - define zeroext i1 @vreduce_and_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_and_v8i1: ; CHECK: # %bb.0: @@ -321,8 +273,6 @@ define zeroext i1 @vreduce_and_v8i1(<8 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v8i1(<8 x i1>) - define zeroext i1 @vreduce_umax_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v8i1: ; CHECK: # %bb.0: @@ -334,8 +284,6 @@ define zeroext i1 @vreduce_umax_v8i1(<8 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v8i1(<8 x i1>) - define zeroext i1 @vreduce_smax_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v8i1: ; CHECK: # %bb.0: @@ -348,8 +296,6 @@ define zeroext i1 @vreduce_smax_v8i1(<8 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v8i1(<8 x i1>) - define zeroext i1 @vreduce_umin_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v8i1: ; CHECK: # %bb.0: @@ -362,8 +308,6 @@ define zeroext i1 @vreduce_umin_v8i1(<8 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v8i1(<8 x i1>) - define zeroext i1 @vreduce_smin_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v8i1: ; CHECK: # %bb.0: @@ -375,8 +319,6 @@ define zeroext i1 @vreduce_smin_v8i1(<8 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v16i1(<16 x i1>) - define zeroext i1 @vreduce_or_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_or_v16i1: ; CHECK: # %bb.0: @@ -388,8 +330,6 @@ define zeroext i1 @vreduce_or_v16i1(<16 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v16i1(<16 x i1>) - define zeroext i1 @vreduce_xor_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v16i1: ; CHECK: # %bb.0: @@ -401,8 +341,6 @@ define zeroext i1 @vreduce_xor_v16i1(<16 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v16i1(<16 x i1>) - define zeroext i1 @vreduce_and_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_and_v16i1: ; CHECK: # %bb.0: @@ -415,8 +353,6 @@ define zeroext i1 @vreduce_and_v16i1(<16 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v16i1(<16 x i1>) - define zeroext i1 @vreduce_umax_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v16i1: ; CHECK: # %bb.0: @@ -428,8 +364,6 @@ define zeroext i1 @vreduce_umax_v16i1(<16 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v16i1(<16 x i1>) - define zeroext i1 @vreduce_smax_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v16i1: ; CHECK: # %bb.0: @@ -442,8 +376,6 @@ define zeroext i1 @vreduce_smax_v16i1(<16 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v16i1(<16 x i1>) - define zeroext i1 @vreduce_umin_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v16i1: ; CHECK: # %bb.0: @@ -456,8 +388,6 @@ define zeroext i1 @vreduce_umin_v16i1(<16 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v16i1(<16 x i1>) - define zeroext i1 @vreduce_smin_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v16i1: ; CHECK: # %bb.0: @@ -469,8 +399,6 @@ define zeroext i1 @vreduce_smin_v16i1(<16 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v32i1(<32 x i1>) - define zeroext i1 @vreduce_or_v32i1(<32 x i1> %v) { ; CHECK-LABEL: vreduce_or_v32i1: ; CHECK: # %bb.0: @@ -483,8 +411,6 @@ define zeroext i1 @vreduce_or_v32i1(<32 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v32i1(<32 x i1>) - define zeroext i1 @vreduce_xor_v32i1(<32 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v32i1: ; CHECK: # %bb.0: @@ -497,8 +423,6 @@ define zeroext i1 @vreduce_xor_v32i1(<32 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v32i1(<32 x i1>) - define zeroext i1 @vreduce_and_v32i1(<32 x i1> %v) { ; CHECK-LABEL: vreduce_and_v32i1: ; CHECK: # %bb.0: @@ -512,8 +436,6 @@ define zeroext i1 @vreduce_and_v32i1(<32 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v32i1(<32 x i1>) - define zeroext i1 @vreduce_umax_v32i1(<32 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v32i1: ; CHECK: # %bb.0: @@ -526,8 +448,6 @@ define zeroext i1 @vreduce_umax_v32i1(<32 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v32i1(<32 x i1>) - define zeroext i1 @vreduce_smax_v32i1(<32 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v32i1: ; CHECK: # %bb.0: @@ -541,8 +461,6 @@ define zeroext i1 @vreduce_smax_v32i1(<32 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v32i1(<32 x i1>) - define zeroext i1 @vreduce_umin_v32i1(<32 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v32i1: ; CHECK: # %bb.0: @@ -556,8 +474,6 @@ define zeroext i1 @vreduce_umin_v32i1(<32 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v32i1(<32 x i1>) - define zeroext i1 @vreduce_smin_v32i1(<32 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v32i1: ; CHECK: # %bb.0: @@ -570,8 +486,6 @@ define zeroext i1 @vreduce_smin_v32i1(<32 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v64i1(<64 x i1>) - define zeroext i1 @vreduce_or_v64i1(<64 x i1> %v) { ; CHECK-LABEL: vreduce_or_v64i1: ; CHECK: # %bb.0: @@ -584,8 +498,6 @@ define zeroext i1 @vreduce_or_v64i1(<64 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v64i1(<64 x i1>) - define zeroext i1 @vreduce_xor_v64i1(<64 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v64i1: ; CHECK: # %bb.0: @@ -598,8 +510,6 @@ define zeroext i1 @vreduce_xor_v64i1(<64 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v64i1(<64 x i1>) - define zeroext i1 @vreduce_and_v64i1(<64 x i1> %v) { ; CHECK-LABEL: vreduce_and_v64i1: ; CHECK: # %bb.0: @@ -613,8 +523,6 @@ define zeroext i1 @vreduce_and_v64i1(<64 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v64i1(<64 x i1>) - define zeroext i1 @vreduce_umax_v64i1(<64 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v64i1: ; CHECK: # %bb.0: @@ -627,8 +535,6 @@ define zeroext i1 @vreduce_umax_v64i1(<64 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v64i1(<64 x i1>) - define zeroext i1 @vreduce_smax_v64i1(<64 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v64i1: ; CHECK: # %bb.0: @@ -642,8 +548,6 @@ define zeroext i1 @vreduce_smax_v64i1(<64 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v64i1(<64 x i1>) - define zeroext i1 @vreduce_umin_v64i1(<64 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v64i1: ; CHECK: # %bb.0: @@ -657,8 +561,6 @@ define zeroext i1 @vreduce_umin_v64i1(<64 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v64i1(<64 x i1>) - define zeroext i1 @vreduce_smin_v64i1(<64 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v64i1: ; CHECK: # %bb.0: @@ -671,8 +573,6 @@ define zeroext i1 @vreduce_smin_v64i1(<64 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.v1i1(<1 x i1>) - define zeroext i1 @vreduce_add_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_add_v1i1: ; CHECK: # %bb.0: @@ -684,8 +584,6 @@ define zeroext i1 @vreduce_add_v1i1(<1 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.v2i1(<2 x i1>) - define zeroext i1 @vreduce_add_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_add_v2i1: ; CHECK: # %bb.0: @@ -697,8 +595,6 @@ define zeroext i1 @vreduce_add_v2i1(<2 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.v4i1(<4 x i1>) - define zeroext i1 @vreduce_add_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_add_v4i1: ; CHECK: # %bb.0: @@ -710,8 +606,6 @@ define zeroext i1 @vreduce_add_v4i1(<4 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.v8i1(<8 x i1>) - define zeroext i1 @vreduce_add_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_add_v8i1: ; CHECK: # %bb.0: @@ -723,8 +617,6 @@ define zeroext i1 @vreduce_add_v8i1(<8 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.v16i1(<16 x i1>) - define zeroext i1 @vreduce_add_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_add_v16i1: ; CHECK: # %bb.0: @@ -736,8 +628,6 @@ define zeroext i1 @vreduce_add_v16i1(<16 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.v32i1(<32 x i1>) - define zeroext i1 @vreduce_add_v32i1(<32 x i1> %v) { ; CHECK-LABEL: vreduce_add_v32i1: ; CHECK: # %bb.0: @@ -750,8 +640,6 @@ define zeroext i1 @vreduce_add_v32i1(<32 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.v64i1(<64 x i1>) - define zeroext i1 @vreduce_add_v64i1(<64 x i1> %v) { ; CHECK-LABEL: vreduce_add_v64i1: ; CHECK: # %bb.0: @@ -764,8 +652,6 @@ define zeroext i1 @vreduce_add_v64i1(<64 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v128i1(<128 x i1>) - define zeroext i1 @vreduce_or_v128i1(<128 x i1> %v) { ; CHECK-LABEL: vreduce_or_v128i1: ; CHECK: # %bb.0: @@ -778,8 +664,6 @@ define zeroext i1 @vreduce_or_v128i1(<128 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v128i1(<128 x i1>) - define zeroext i1 @vreduce_xor_v128i1(<128 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v128i1: ; CHECK: # %bb.0: @@ -792,8 +676,6 @@ define zeroext i1 @vreduce_xor_v128i1(<128 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v128i1(<128 x i1>) - define zeroext i1 @vreduce_and_v128i1(<128 x i1> %v) { ; CHECK-LABEL: vreduce_and_v128i1: ; CHECK: # %bb.0: @@ -807,8 +689,6 @@ define zeroext i1 @vreduce_and_v128i1(<128 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v128i1(<128 x i1>) - define zeroext i1 @vreduce_umax_v128i1(<128 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v128i1: ; CHECK: # %bb.0: @@ -821,8 +701,6 @@ define zeroext i1 @vreduce_umax_v128i1(<128 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v128i1(<128 x i1>) - define zeroext i1 @vreduce_smax_v128i1(<128 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v128i1: ; CHECK: # %bb.0: @@ -836,8 +714,6 @@ define zeroext i1 @vreduce_smax_v128i1(<128 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v128i1(<128 x i1>) - define zeroext i1 @vreduce_umin_v128i1(<128 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v128i1: ; CHECK: # %bb.0: @@ -851,8 +727,6 @@ define zeroext i1 @vreduce_umin_v128i1(<128 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v128i1(<128 x i1>) - define zeroext i1 @vreduce_smin_v128i1(<128 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v128i1: ; CHECK: # %bb.0: @@ -865,8 +739,6 @@ define zeroext i1 @vreduce_smin_v128i1(<128 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v256i1(<256 x i1>) - define zeroext i1 @vreduce_or_v256i1(<256 x i1> %v) { ; CHECK-LABEL: vreduce_or_v256i1: ; CHECK: # %bb.0: @@ -880,8 +752,6 @@ define zeroext i1 @vreduce_or_v256i1(<256 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v256i1(<256 x i1>) - define zeroext i1 @vreduce_xor_v256i1(<256 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v256i1: ; CHECK: # %bb.0: @@ -895,8 +765,6 @@ define zeroext i1 @vreduce_xor_v256i1(<256 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v256i1(<256 x i1>) - define zeroext i1 @vreduce_and_v256i1(<256 x i1> %v) { ; CHECK-LABEL: vreduce_and_v256i1: ; CHECK: # %bb.0: @@ -910,8 +778,6 @@ define zeroext i1 @vreduce_and_v256i1(<256 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v256i1(<256 x i1>) - define zeroext i1 @vreduce_umax_v256i1(<256 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v256i1: ; CHECK: # %bb.0: @@ -925,8 +791,6 @@ define zeroext i1 @vreduce_umax_v256i1(<256 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v256i1(<256 x i1>) - define zeroext i1 @vreduce_smax_v256i1(<256 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v256i1: ; CHECK: # %bb.0: @@ -940,8 +804,6 @@ define zeroext i1 @vreduce_smax_v256i1(<256 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v256i1(<256 x i1>) - define zeroext i1 @vreduce_umin_v256i1(<256 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v256i1: ; CHECK: # %bb.0: @@ -955,8 +817,6 @@ define zeroext i1 @vreduce_umin_v256i1(<256 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v256i1(<256 x i1>) - define zeroext i1 @vreduce_smin_v256i1(<256 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v256i1: ; CHECK: # %bb.0: @@ -970,8 +830,6 @@ define zeroext i1 @vreduce_smin_v256i1(<256 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v512i1(<512 x i1>) - define zeroext i1 @vreduce_or_v512i1(<512 x i1> %v) { ; CHECK-LABEL: vreduce_or_v512i1: ; CHECK: # %bb.0: @@ -987,8 +845,6 @@ define zeroext i1 @vreduce_or_v512i1(<512 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v512i1(<512 x i1>) - define zeroext i1 @vreduce_xor_v512i1(<512 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v512i1: ; CHECK: # %bb.0: @@ -1004,8 +860,6 @@ define zeroext i1 @vreduce_xor_v512i1(<512 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v512i1(<512 x i1>) - define zeroext i1 @vreduce_and_v512i1(<512 x i1> %v) { ; CHECK-LABEL: vreduce_and_v512i1: ; CHECK: # %bb.0: @@ -1021,8 +875,6 @@ define zeroext i1 @vreduce_and_v512i1(<512 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v512i1(<512 x i1>) - define zeroext i1 @vreduce_umax_v512i1(<512 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v512i1: ; CHECK: # %bb.0: @@ -1038,8 +890,6 @@ define zeroext i1 @vreduce_umax_v512i1(<512 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v512i1(<512 x i1>) - define zeroext i1 @vreduce_smax_v512i1(<512 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v512i1: ; CHECK: # %bb.0: @@ -1055,8 +905,6 @@ define zeroext i1 @vreduce_smax_v512i1(<512 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v512i1(<512 x i1>) - define zeroext i1 @vreduce_umin_v512i1(<512 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v512i1: ; CHECK: # %bb.0: @@ -1072,8 +920,6 @@ define zeroext i1 @vreduce_umin_v512i1(<512 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v512i1(<512 x i1>) - define zeroext i1 @vreduce_smin_v512i1(<512 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v512i1: ; CHECK: # %bb.0: @@ -1089,8 +935,6 @@ define zeroext i1 @vreduce_smin_v512i1(<512 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v1024i1(<1024 x i1>) - define zeroext i1 @vreduce_or_v1024i1(<1024 x i1> %v) { ; CHECK-LABEL: vreduce_or_v1024i1: ; CHECK: # %bb.0: @@ -1110,8 +954,6 @@ define zeroext i1 @vreduce_or_v1024i1(<1024 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v1024i1(<1024 x i1>) - define zeroext i1 @vreduce_xor_v1024i1(<1024 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v1024i1: ; CHECK: # %bb.0: @@ -1131,8 +973,6 @@ define zeroext i1 @vreduce_xor_v1024i1(<1024 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v1024i1(<1024 x i1>) - define zeroext i1 @vreduce_and_v1024i1(<1024 x i1> %v) { ; CHECK-LABEL: vreduce_and_v1024i1: ; CHECK: # %bb.0: @@ -1152,8 +992,6 @@ define zeroext i1 @vreduce_and_v1024i1(<1024 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v1024i1(<1024 x i1>) - define zeroext i1 @vreduce_umax_v1024i1(<1024 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v1024i1: ; CHECK: # %bb.0: @@ -1173,8 +1011,6 @@ define zeroext i1 @vreduce_umax_v1024i1(<1024 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v1024i1(<1024 x i1>) - define zeroext i1 @vreduce_smax_v1024i1(<1024 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v1024i1: ; CHECK: # %bb.0: @@ -1194,8 +1030,6 @@ define zeroext i1 @vreduce_smax_v1024i1(<1024 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v1024i1(<1024 x i1>) - define zeroext i1 @vreduce_umin_v1024i1(<1024 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v1024i1: ; CHECK: # %bb.0: @@ -1215,8 +1049,6 @@ define zeroext i1 @vreduce_umin_v1024i1(<1024 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v1024i1(<1024 x i1>) - define zeroext i1 @vreduce_smin_v1024i1(<1024 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v1024i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll index b3d35a51280ac..78eabfec4153e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.srem.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vrem_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i7: ; CHECK: # %bb.0: @@ -20,8 +18,6 @@ define <8 x i7> @vrem_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.srem.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vrem_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v2i8: ; CHECK: # %bb.0: @@ -66,8 +62,6 @@ define <2 x i8> @vrem_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.srem.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vrem_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v4i8: ; CHECK: # %bb.0: @@ -112,8 +106,6 @@ define <4 x i8> @vrem_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <6 x i8> @llvm.vp.srem.v6i8(<6 x i8>, <6 x i8>, <6 x i1>, i32) - define <6 x i8> @vrem_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v6i8: ; CHECK: # %bb.0: @@ -124,8 +116,6 @@ define <6 x i8> @vrem_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroex ret <6 x i8> %v } -declare <8 x i8> @llvm.vp.srem.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vrem_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i8: ; CHECK: # %bb.0: @@ -170,8 +160,6 @@ define <8 x i8> @vrem_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.srem.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vrem_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v16i8: ; CHECK: # %bb.0: @@ -216,8 +204,6 @@ define <16 x i8> @vrem_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.srem.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vrem_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v2i16: ; CHECK: # %bb.0: @@ -262,8 +248,6 @@ define <2 x i16> @vrem_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.srem.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vrem_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v4i16: ; CHECK: # %bb.0: @@ -308,8 +292,6 @@ define <4 x i16> @vrem_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.srem.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vrem_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i16: ; CHECK: # %bb.0: @@ -354,8 +336,6 @@ define <8 x i16> @vrem_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.srem.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vrem_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v16i16: ; CHECK: # %bb.0: @@ -400,8 +380,6 @@ define <16 x i16> @vrem_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext % ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.srem.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vrem_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v2i32: ; CHECK: # %bb.0: @@ -446,8 +424,6 @@ define <2 x i32> @vrem_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.srem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vrem_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v4i32: ; CHECK: # %bb.0: @@ -492,8 +468,6 @@ define <4 x i32> @vrem_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.srem.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vrem_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i32: ; CHECK: # %bb.0: @@ -538,8 +512,6 @@ define <8 x i32> @vrem_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.srem.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vrem_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v16i32: ; CHECK: # %bb.0: @@ -584,8 +556,6 @@ define <16 x i32> @vrem_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext % ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.srem.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vrem_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v2i64: ; CHECK: # %bb.0: @@ -660,8 +630,6 @@ define <2 x i64> @vrem_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.srem.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vrem_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v4i64: ; CHECK: # %bb.0: @@ -736,8 +704,6 @@ define <4 x i64> @vrem_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.srem.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vrem_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i64: ; CHECK: # %bb.0: @@ -812,8 +778,6 @@ define <8 x i64> @vrem_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.srem.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vrem_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v16i64: ; CHECK: # %bb.0: @@ -888,9 +852,6 @@ define <16 x i64> @vrem_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext % ret <16 x i64> %v } - -declare <3 x i8> @llvm.vp.srem.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32) - define <3 x i8> @vrem_vv_v3i8_unmasked(<3 x i8> %va, <3 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v3i8_unmasked: ; CHECK: # %bb.0: @@ -911,8 +872,6 @@ define <3 x i8> @vrem_vv_v3i8_unmasked_avl3(<3 x i8> %va, <3 x i8> %b) { ret <3 x i8> %v } -declare <7 x i8> @llvm.vp.srem.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) - define <7 x i8> @vrem_vv_v7i8_unmasked(<7 x i8> %va, <7 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v7i8_unmasked: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll index 2a453e3a39c2f..7ba66d61b13f8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.urem.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vremu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i7: ; CHECK: # %bb.0: @@ -19,8 +17,6 @@ define <8 x i7> @vremu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroe ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.urem.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vremu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v2i8: ; CHECK: # %bb.0: @@ -65,8 +61,6 @@ define <2 x i8> @vremu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.urem.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vremu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v4i8: ; CHECK: # %bb.0: @@ -111,8 +105,6 @@ define <4 x i8> @vremu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <6 x i8> @llvm.vp.urem.v6i8(<6 x i8>, <6 x i8>, <6 x i1>, i32) - define <6 x i8> @vremu_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v6i8: ; CHECK: # %bb.0: @@ -123,8 +115,6 @@ define <6 x i8> @vremu_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroe ret <6 x i8> %v } -declare <8 x i8> @llvm.vp.urem.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vremu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i8: ; CHECK: # %bb.0: @@ -169,8 +159,6 @@ define <8 x i8> @vremu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.urem.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vremu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v16i8: ; CHECK: # %bb.0: @@ -215,8 +203,6 @@ define <16 x i8> @vremu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.urem.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vremu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v2i16: ; CHECK: # %bb.0: @@ -261,8 +247,6 @@ define <2 x i16> @vremu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.urem.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vremu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v4i16: ; CHECK: # %bb.0: @@ -307,8 +291,6 @@ define <4 x i16> @vremu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.urem.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vremu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i16: ; CHECK: # %bb.0: @@ -353,8 +335,6 @@ define <8 x i16> @vremu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.urem.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vremu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v16i16: ; CHECK: # %bb.0: @@ -399,8 +379,6 @@ define <16 x i16> @vremu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.urem.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vremu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v2i32: ; CHECK: # %bb.0: @@ -445,8 +423,6 @@ define <2 x i32> @vremu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.urem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vremu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v4i32: ; CHECK: # %bb.0: @@ -491,8 +467,6 @@ define <4 x i32> @vremu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.urem.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vremu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i32: ; CHECK: # %bb.0: @@ -537,8 +511,6 @@ define <8 x i32> @vremu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.urem.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vremu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v16i32: ; CHECK: # %bb.0: @@ -583,8 +555,6 @@ define <16 x i32> @vremu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.urem.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vremu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v2i64: ; CHECK: # %bb.0: @@ -659,8 +629,6 @@ define <2 x i64> @vremu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.urem.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vremu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v4i64: ; CHECK: # %bb.0: @@ -735,8 +703,6 @@ define <4 x i64> @vremu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.urem.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vremu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i64: ; CHECK: # %bb.0: @@ -811,8 +777,6 @@ define <8 x i64> @vremu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.urem.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vremu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v16i64: ; CHECK: # %bb.0: @@ -887,9 +851,6 @@ define <16 x i64> @vremu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext ret <16 x i64> %v } - -declare <3 x i8> @llvm.vp.urem.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32) - define <3 x i8> @vremu_vv_v3i8_unmasked(<3 x i8> %va, <3 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v3i8_unmasked: ; CHECK: # %bb.0: @@ -910,8 +871,6 @@ define <3 x i8> @vremu_vv_v3i8_unmasked_avl3(<3 x i8> %va, <3 x i8> %b) { ret <3 x i8> %v } -declare <7 x i8> @llvm.vp.urem.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) - define <7 x i8> @vremu_vv_v7i8_unmasked(<7 x i8> %va, <7 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v7i8_unmasked: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrol.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrol.ll index 32ae81926bbee..9e0b04d7f09ba 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrol.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrol.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB -declare <1 x i8> @llvm.fshl.v1i8(<1 x i8>, <1 x i8>, <1 x i8>) - define <1 x i8> @vrol_vv_v1i8(<1 x i8> %a, <1 x i8> %b) { ; CHECK-LABEL: vrol_vv_v1i8: ; CHECK: # %bb.0: @@ -51,8 +49,6 @@ define <1 x i8> @vrol_vx_v1i8(<1 x i8> %a, i8 %b) { ret <1 x i8> %x } -declare <2 x i8> @llvm.fshl.v2i8(<2 x i8>, <2 x i8>, <2 x i8>) - define <2 x i8> @vrol_vv_v2i8(<2 x i8> %a, <2 x i8> %b) { ; CHECK-LABEL: vrol_vv_v2i8: ; CHECK: # %bb.0: @@ -98,8 +94,6 @@ define <2 x i8> @vrol_vx_v2i8(<2 x i8> %a, i8 %b) { ret <2 x i8> %x } -declare <4 x i8> @llvm.fshl.v4i8(<4 x i8>, <4 x i8>, <4 x i8>) - define <4 x i8> @vrol_vv_v4i8(<4 x i8> %a, <4 x i8> %b) { ; CHECK-LABEL: vrol_vv_v4i8: ; CHECK: # %bb.0: @@ -145,8 +139,6 @@ define <4 x i8> @vrol_vx_v4i8(<4 x i8> %a, i8 %b) { ret <4 x i8> %x } -declare <8 x i8> @llvm.fshl.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) - define <8 x i8> @vrol_vv_v8i8(<8 x i8> %a, <8 x i8> %b) { ; CHECK-LABEL: vrol_vv_v8i8: ; CHECK: # %bb.0: @@ -192,8 +184,6 @@ define <8 x i8> @vrol_vx_v8i8(<8 x i8> %a, i8 %b) { ret <8 x i8> %x } -declare <16 x i8> @llvm.fshl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) - define <16 x i8> @vrol_vv_v16i8(<16 x i8> %a, <16 x i8> %b) { ; CHECK-LABEL: vrol_vv_v16i8: ; CHECK: # %bb.0: @@ -239,8 +229,6 @@ define <16 x i8> @vrol_vx_v16i8(<16 x i8> %a, i8 %b) { ret <16 x i8> %x } -declare <32 x i8> @llvm.fshl.v32i8(<32 x i8>, <32 x i8>, <32 x i8>) - define <32 x i8> @vrol_vv_v32i8(<32 x i8> %a, <32 x i8> %b) { ; CHECK-LABEL: vrol_vv_v32i8: ; CHECK: # %bb.0: @@ -290,8 +278,6 @@ define <32 x i8> @vrol_vx_v32i8(<32 x i8> %a, i8 %b) { ret <32 x i8> %x } -declare <64 x i8> @llvm.fshl.v64i8(<64 x i8>, <64 x i8>, <64 x i8>) - define <64 x i8> @vrol_vv_v64i8(<64 x i8> %a, <64 x i8> %b) { ; CHECK-LABEL: vrol_vv_v64i8: ; CHECK: # %bb.0: @@ -341,8 +327,6 @@ define <64 x i8> @vrol_vx_v64i8(<64 x i8> %a, i8 %b) { ret <64 x i8> %x } -declare <1 x i16> @llvm.fshl.v1i16(<1 x i16>, <1 x i16>, <1 x i16>) - define <1 x i16> @vrol_vv_v1i16(<1 x i16> %a, <1 x i16> %b) { ; CHECK-LABEL: vrol_vv_v1i16: ; CHECK: # %bb.0: @@ -388,8 +372,6 @@ define <1 x i16> @vrol_vx_v1i16(<1 x i16> %a, i16 %b) { ret <1 x i16> %x } -declare <2 x i16> @llvm.fshl.v2i16(<2 x i16>, <2 x i16>, <2 x i16>) - define <2 x i16> @vrol_vv_v2i16(<2 x i16> %a, <2 x i16> %b) { ; CHECK-LABEL: vrol_vv_v2i16: ; CHECK: # %bb.0: @@ -435,8 +417,6 @@ define <2 x i16> @vrol_vx_v2i16(<2 x i16> %a, i16 %b) { ret <2 x i16> %x } -declare <4 x i16> @llvm.fshl.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) - define <4 x i16> @vrol_vv_v4i16(<4 x i16> %a, <4 x i16> %b) { ; CHECK-LABEL: vrol_vv_v4i16: ; CHECK: # %bb.0: @@ -482,8 +462,6 @@ define <4 x i16> @vrol_vx_v4i16(<4 x i16> %a, i16 %b) { ret <4 x i16> %x } -declare <8 x i16> @llvm.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) - define <8 x i16> @vrol_vv_v8i16(<8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: vrol_vv_v8i16: ; CHECK: # %bb.0: @@ -529,8 +507,6 @@ define <8 x i16> @vrol_vx_v8i16(<8 x i16> %a, i16 %b) { ret <8 x i16> %x } -declare <16 x i16> @llvm.fshl.v16i16(<16 x i16>, <16 x i16>, <16 x i16>) - define <16 x i16> @vrol_vv_v16i16(<16 x i16> %a, <16 x i16> %b) { ; CHECK-LABEL: vrol_vv_v16i16: ; CHECK: # %bb.0: @@ -576,8 +552,6 @@ define <16 x i16> @vrol_vx_v16i16(<16 x i16> %a, i16 %b) { ret <16 x i16> %x } -declare <32 x i16> @llvm.fshl.v32i16(<32 x i16>, <32 x i16>, <32 x i16>) - define <32 x i16> @vrol_vv_v32i16(<32 x i16> %a, <32 x i16> %b) { ; CHECK-LABEL: vrol_vv_v32i16: ; CHECK: # %bb.0: @@ -627,8 +601,6 @@ define <32 x i16> @vrol_vx_v32i16(<32 x i16> %a, i16 %b) { ret <32 x i16> %x } -declare <1 x i32> @llvm.fshl.v1i32(<1 x i32>, <1 x i32>, <1 x i32>) - define <1 x i32> @vrol_vv_v1i32(<1 x i32> %a, <1 x i32> %b) { ; CHECK-LABEL: vrol_vv_v1i32: ; CHECK: # %bb.0: @@ -676,8 +648,6 @@ define <1 x i32> @vrol_vx_v1i32(<1 x i32> %a, i32 %b) { ret <1 x i32> %x } -declare <2 x i32> @llvm.fshl.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) - define <2 x i32> @vrol_vv_v2i32(<2 x i32> %a, <2 x i32> %b) { ; CHECK-LABEL: vrol_vv_v2i32: ; CHECK: # %bb.0: @@ -725,8 +695,6 @@ define <2 x i32> @vrol_vx_v2i32(<2 x i32> %a, i32 %b) { ret <2 x i32> %x } -declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) - define <4 x i32> @vrol_vv_v4i32(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: vrol_vv_v4i32: ; CHECK: # %bb.0: @@ -774,8 +742,6 @@ define <4 x i32> @vrol_vx_v4i32(<4 x i32> %a, i32 %b) { ret <4 x i32> %x } -declare <8 x i32> @llvm.fshl.v8i32(<8 x i32>, <8 x i32>, <8 x i32>) - define <8 x i32> @vrol_vv_v8i32(<8 x i32> %a, <8 x i32> %b) { ; CHECK-LABEL: vrol_vv_v8i32: ; CHECK: # %bb.0: @@ -823,8 +789,6 @@ define <8 x i32> @vrol_vx_v8i32(<8 x i32> %a, i32 %b) { ret <8 x i32> %x } -declare <16 x i32> @llvm.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>) - define <16 x i32> @vrol_vv_v16i32(<16 x i32> %a, <16 x i32> %b) { ; CHECK-LABEL: vrol_vv_v16i32: ; CHECK: # %bb.0: @@ -872,8 +836,6 @@ define <16 x i32> @vrol_vx_v16i32(<16 x i32> %a, i32 %b) { ret <16 x i32> %x } -declare <1 x i64> @llvm.fshl.v1i64(<1 x i64>, <1 x i64>, <1 x i64>) - define <1 x i64> @vrol_vv_v1i64(<1 x i64> %a, <1 x i64> %b) { ; CHECK-LABEL: vrol_vv_v1i64: ; CHECK: # %bb.0: @@ -921,8 +883,6 @@ define <1 x i64> @vrol_vx_v1i64(<1 x i64> %a, i64 %b) { ret <1 x i64> %x } -declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) - define <2 x i64> @vrol_vv_v2i64(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: vrol_vv_v2i64: ; CHECK: # %bb.0: @@ -986,8 +946,6 @@ define <2 x i64> @vrol_vx_v2i64(<2 x i64> %a, i64 %b) { ret <2 x i64> %x } -declare <4 x i64> @llvm.fshl.v4i64(<4 x i64>, <4 x i64>, <4 x i64>) - define <4 x i64> @vrol_vv_v4i64(<4 x i64> %a, <4 x i64> %b) { ; CHECK-LABEL: vrol_vv_v4i64: ; CHECK: # %bb.0: @@ -1051,8 +1009,6 @@ define <4 x i64> @vrol_vx_v4i64(<4 x i64> %a, i64 %b) { ret <4 x i64> %x } -declare <8 x i64> @llvm.fshl.v8i64(<8 x i64>, <8 x i64>, <8 x i64>) - define <8 x i64> @vrol_vv_v8i64(<8 x i64> %a, <8 x i64> %b) { ; CHECK-LABEL: vrol_vv_v8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll index ec22d2be1eaad..29aa04e2f308a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll @@ -4,9 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB -declare <1 x i8> @llvm.fshr.v1i8(<1 x i8>, <1 x i8>, <1 x i8>) -declare <1 x i8> @llvm.fshl.v1i8(<1 x i8>, <1 x i8>, <1 x i8>) - define <1 x i8> @vror_vv_v1i8(<1 x i8> %a, <1 x i8> %b) { ; CHECK-LABEL: vror_vv_v1i8: ; CHECK: # %bb.0: @@ -88,9 +85,6 @@ define <1 x i8> @vror_vi_rotl_v1i8(<1 x i8> %a) { ret <1 x i8> %x } -declare <2 x i8> @llvm.fshr.v2i8(<2 x i8>, <2 x i8>, <2 x i8>) -declare <2 x i8> @llvm.fshl.v2i8(<2 x i8>, <2 x i8>, <2 x i8>) - define <2 x i8> @vror_vv_v2i8(<2 x i8> %a, <2 x i8> %b) { ; CHECK-LABEL: vror_vv_v2i8: ; CHECK: # %bb.0: @@ -172,9 +166,6 @@ define <2 x i8> @vror_vi_rotl_v2i8(<2 x i8> %a) { ret <2 x i8> %x } -declare <4 x i8> @llvm.fshr.v4i8(<4 x i8>, <4 x i8>, <4 x i8>) -declare <4 x i8> @llvm.fshl.v4i8(<4 x i8>, <4 x i8>, <4 x i8>) - define <4 x i8> @vror_vv_v4i8(<4 x i8> %a, <4 x i8> %b) { ; CHECK-LABEL: vror_vv_v4i8: ; CHECK: # %bb.0: @@ -256,9 +247,6 @@ define <4 x i8> @vror_vi_rotl_v4i8(<4 x i8> %a) { ret <4 x i8> %x } -declare <8 x i8> @llvm.fshr.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) -declare <8 x i8> @llvm.fshl.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) - define <8 x i8> @vror_vv_v8i8(<8 x i8> %a, <8 x i8> %b) { ; CHECK-LABEL: vror_vv_v8i8: ; CHECK: # %bb.0: @@ -340,9 +328,6 @@ define <8 x i8> @vror_vi_rotl_v8i8(<8 x i8> %a) { ret <8 x i8> %x } -declare <16 x i8> @llvm.fshr.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) -declare <16 x i8> @llvm.fshl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) - define <16 x i8> @vror_vv_v16i8(<16 x i8> %a, <16 x i8> %b) { ; CHECK-LABEL: vror_vv_v16i8: ; CHECK: # %bb.0: @@ -424,9 +409,6 @@ define <16 x i8> @vror_vi_rotl_v16i8(<16 x i8> %a) { ret <16 x i8> %x } -declare <32 x i8> @llvm.fshr.v32i8(<32 x i8>, <32 x i8>, <32 x i8>) -declare <32 x i8> @llvm.fshl.v32i8(<32 x i8>, <32 x i8>, <32 x i8>) - define <32 x i8> @vror_vv_v32i8(<32 x i8> %a, <32 x i8> %b) { ; CHECK-LABEL: vror_vv_v32i8: ; CHECK: # %bb.0: @@ -516,9 +498,6 @@ define <32 x i8> @vror_vi_rotl_v32i8(<32 x i8> %a) { ret <32 x i8> %x } -declare <64 x i8> @llvm.fshr.v64i8(<64 x i8>, <64 x i8>, <64 x i8>) -declare <64 x i8> @llvm.fshl.v64i8(<64 x i8>, <64 x i8>, <64 x i8>) - define <64 x i8> @vror_vv_v64i8(<64 x i8> %a, <64 x i8> %b) { ; CHECK-LABEL: vror_vv_v64i8: ; CHECK: # %bb.0: @@ -608,9 +587,6 @@ define <64 x i8> @vror_vi_rotl_v64i8(<64 x i8> %a) { ret <64 x i8> %x } -declare <1 x i16> @llvm.fshr.v1i16(<1 x i16>, <1 x i16>, <1 x i16>) -declare <1 x i16> @llvm.fshl.v1i16(<1 x i16>, <1 x i16>, <1 x i16>) - define <1 x i16> @vror_vv_v1i16(<1 x i16> %a, <1 x i16> %b) { ; CHECK-LABEL: vror_vv_v1i16: ; CHECK: # %bb.0: @@ -692,9 +668,6 @@ define <1 x i16> @vror_vi_rotl_v1i16(<1 x i16> %a) { ret <1 x i16> %x } -declare <2 x i16> @llvm.fshr.v2i16(<2 x i16>, <2 x i16>, <2 x i16>) -declare <2 x i16> @llvm.fshl.v2i16(<2 x i16>, <2 x i16>, <2 x i16>) - define <2 x i16> @vror_vv_v2i16(<2 x i16> %a, <2 x i16> %b) { ; CHECK-LABEL: vror_vv_v2i16: ; CHECK: # %bb.0: @@ -776,9 +749,6 @@ define <2 x i16> @vror_vi_rotl_v2i16(<2 x i16> %a) { ret <2 x i16> %x } -declare <4 x i16> @llvm.fshr.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) -declare <4 x i16> @llvm.fshl.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) - define <4 x i16> @vror_vv_v4i16(<4 x i16> %a, <4 x i16> %b) { ; CHECK-LABEL: vror_vv_v4i16: ; CHECK: # %bb.0: @@ -860,9 +830,6 @@ define <4 x i16> @vror_vi_rotl_v4i16(<4 x i16> %a) { ret <4 x i16> %x } -declare <8 x i16> @llvm.fshr.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) -declare <8 x i16> @llvm.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) - define <8 x i16> @vror_vv_v8i16(<8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: vror_vv_v8i16: ; CHECK: # %bb.0: @@ -944,9 +911,6 @@ define <8 x i16> @vror_vi_rotl_v8i16(<8 x i16> %a) { ret <8 x i16> %x } -declare <16 x i16> @llvm.fshr.v16i16(<16 x i16>, <16 x i16>, <16 x i16>) -declare <16 x i16> @llvm.fshl.v16i16(<16 x i16>, <16 x i16>, <16 x i16>) - define <16 x i16> @vror_vv_v16i16(<16 x i16> %a, <16 x i16> %b) { ; CHECK-LABEL: vror_vv_v16i16: ; CHECK: # %bb.0: @@ -1028,9 +992,6 @@ define <16 x i16> @vror_vi_rotl_v16i16(<16 x i16> %a) { ret <16 x i16> %x } -declare <32 x i16> @llvm.fshr.v32i16(<32 x i16>, <32 x i16>, <32 x i16>) -declare <32 x i16> @llvm.fshl.v32i16(<32 x i16>, <32 x i16>, <32 x i16>) - define <32 x i16> @vror_vv_v32i16(<32 x i16> %a, <32 x i16> %b) { ; CHECK-LABEL: vror_vv_v32i16: ; CHECK: # %bb.0: @@ -1120,9 +1081,6 @@ define <32 x i16> @vror_vi_rotl_v32i16(<32 x i16> %a) { ret <32 x i16> %x } -declare <1 x i32> @llvm.fshr.v1i32(<1 x i32>, <1 x i32>, <1 x i32>) -declare <1 x i32> @llvm.fshl.v1i32(<1 x i32>, <1 x i32>, <1 x i32>) - define <1 x i32> @vror_vv_v1i32(<1 x i32> %a, <1 x i32> %b) { ; CHECK-LABEL: vror_vv_v1i32: ; CHECK: # %bb.0: @@ -1206,9 +1164,6 @@ define <1 x i32> @vror_vi_rotl_v1i32(<1 x i32> %a) { ret <1 x i32> %x } -declare <2 x i32> @llvm.fshr.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) -declare <2 x i32> @llvm.fshl.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) - define <2 x i32> @vror_vv_v2i32(<2 x i32> %a, <2 x i32> %b) { ; CHECK-LABEL: vror_vv_v2i32: ; CHECK: # %bb.0: @@ -1292,9 +1247,6 @@ define <2 x i32> @vror_vi_rotl_v2i32(<2 x i32> %a) { ret <2 x i32> %x } -declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) - define <4 x i32> @vror_vv_v4i32(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: vror_vv_v4i32: ; CHECK: # %bb.0: @@ -1378,9 +1330,6 @@ define <4 x i32> @vror_vi_rotl_v4i32(<4 x i32> %a) { ret <4 x i32> %x } -declare <8 x i32> @llvm.fshr.v8i32(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.fshl.v8i32(<8 x i32>, <8 x i32>, <8 x i32>) - define <8 x i32> @vror_vv_v8i32(<8 x i32> %a, <8 x i32> %b) { ; CHECK-LABEL: vror_vv_v8i32: ; CHECK: # %bb.0: @@ -1464,9 +1413,6 @@ define <8 x i32> @vror_vi_rotl_v8i32(<8 x i32> %a) { ret <8 x i32> %x } -declare <16 x i32> @llvm.fshr.v16i32(<16 x i32>, <16 x i32>, <16 x i32>) -declare <16 x i32> @llvm.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>) - define <16 x i32> @vror_vv_v16i32(<16 x i32> %a, <16 x i32> %b) { ; CHECK-LABEL: vror_vv_v16i32: ; CHECK: # %bb.0: @@ -1550,9 +1496,6 @@ define <16 x i32> @vror_vi_rotl_v16i32(<16 x i32> %a) { ret <16 x i32> %x } -declare <1 x i64> @llvm.fshr.v1i64(<1 x i64>, <1 x i64>, <1 x i64>) -declare <1 x i64> @llvm.fshl.v1i64(<1 x i64>, <1 x i64>, <1 x i64>) - define <1 x i64> @vror_vv_v1i64(<1 x i64> %a, <1 x i64> %b) { ; CHECK-LABEL: vror_vv_v1i64: ; CHECK: # %bb.0: @@ -1666,9 +1609,6 @@ define <1 x i64> @vror_vi_rotl_v1i64(<1 x i64> %a) { ret <1 x i64> %x } -declare <2 x i64> @llvm.fshr.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) -declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) - define <2 x i64> @vror_vv_v2i64(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: vror_vv_v2i64: ; CHECK: # %bb.0: @@ -1802,9 +1742,6 @@ define <2 x i64> @vror_vi_rotl_v2i64(<2 x i64> %a) { ret <2 x i64> %x } -declare <4 x i64> @llvm.fshr.v4i64(<4 x i64>, <4 x i64>, <4 x i64>) -declare <4 x i64> @llvm.fshl.v4i64(<4 x i64>, <4 x i64>, <4 x i64>) - define <4 x i64> @vror_vv_v4i64(<4 x i64> %a, <4 x i64> %b) { ; CHECK-LABEL: vror_vv_v4i64: ; CHECK: # %bb.0: @@ -1938,9 +1875,6 @@ define <4 x i64> @vror_vi_rotl_v4i64(<4 x i64> %a) { ret <4 x i64> %x } -declare <8 x i64> @llvm.fshr.v8i64(<8 x i64>, <8 x i64>, <8 x i64>) -declare <8 x i64> @llvm.fshl.v8i64(<8 x i64>, <8 x i64>, <8 x i64>) - define <8 x i64> @vror_vv_v8i64(<8 x i64> %a, <8 x i64> %b) { ; CHECK-LABEL: vror_vv_v8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll index a30eeeaa6690e..91eb28cffc94d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.vp.sub.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vrsub_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v2i8: ; CHECK: # %bb.0: @@ -50,8 +48,6 @@ define <2 x i8> @vrsub_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.sub.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vrsub_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v4i8: ; CHECK: # %bb.0: @@ -96,8 +92,6 @@ define <4 x i8> @vrsub_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.sub.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vrsub_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v8i8: ; CHECK: # %bb.0: @@ -142,8 +136,6 @@ define <8 x i8> @vrsub_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.sub.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vrsub_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v16i8: ; CHECK: # %bb.0: @@ -188,8 +180,6 @@ define <16 x i8> @vrsub_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.sub.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vrsub_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v2i16: ; CHECK: # %bb.0: @@ -234,8 +224,6 @@ define <2 x i16> @vrsub_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.sub.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vrsub_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v4i16: ; CHECK: # %bb.0: @@ -280,8 +268,6 @@ define <4 x i16> @vrsub_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.sub.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vrsub_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v8i16: ; CHECK: # %bb.0: @@ -326,8 +312,6 @@ define <8 x i16> @vrsub_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.sub.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vrsub_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v16i16: ; CHECK: # %bb.0: @@ -372,8 +356,6 @@ define <16 x i16> @vrsub_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.sub.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vrsub_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v2i32: ; CHECK: # %bb.0: @@ -418,8 +400,6 @@ define <2 x i32> @vrsub_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.sub.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vrsub_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v4i32: ; CHECK: # %bb.0: @@ -464,8 +444,6 @@ define <4 x i32> @vrsub_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.sub.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vrsub_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v8i32: ; CHECK: # %bb.0: @@ -510,8 +488,6 @@ define <8 x i32> @vrsub_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.sub.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vrsub_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v16i32: ; CHECK: # %bb.0: @@ -556,8 +532,6 @@ define <16 x i32> @vrsub_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.sub.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vrsub_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vrsub_vx_v2i64: ; RV32: # %bb.0: @@ -632,8 +606,6 @@ define <2 x i64> @vrsub_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.sub.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vrsub_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vrsub_vx_v4i64: ; RV32: # %bb.0: @@ -708,8 +680,6 @@ define <4 x i64> @vrsub_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.sub.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vrsub_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vrsub_vx_v8i64: ; RV32: # %bb.0: @@ -784,8 +754,6 @@ define <8 x i64> @vrsub_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.sub.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vrsub_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vrsub_vx_v16i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll index 6ceb03c765fd7..acaa1e6fa002d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.sadd.sat.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vsadd_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v8i7: ; CHECK: # %bb.0: @@ -25,8 +23,6 @@ define <8 x i7> @vsadd_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroe ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vsadd_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v2i8: ; CHECK: # %bb.0: @@ -91,8 +87,6 @@ define <2 x i8> @vsadd_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vsadd_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v4i8: ; CHECK: # %bb.0: @@ -169,8 +163,6 @@ define <4 x i8> @vsadd_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vsadd_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v5i8: ; CHECK: # %bb.0: @@ -235,8 +227,6 @@ define <5 x i8> @vsadd_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vsadd_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v8i8: ; CHECK: # %bb.0: @@ -301,8 +291,6 @@ define <8 x i8> @vsadd_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vsadd_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v16i8: ; CHECK: # %bb.0: @@ -367,8 +355,6 @@ define <16 x i8> @vsadd_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vsadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vi_v258i8: ; CHECK: # %bb.0: @@ -446,8 +432,6 @@ define <256 x i8> @vsadd_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vsadd_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v2i16: ; CHECK: # %bb.0: @@ -512,8 +496,6 @@ define <2 x i16> @vsadd_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vsadd_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v4i16: ; CHECK: # %bb.0: @@ -578,8 +560,6 @@ define <4 x i16> @vsadd_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vsadd_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v8i16: ; CHECK: # %bb.0: @@ -644,8 +624,6 @@ define <8 x i16> @vsadd_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vsadd_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v16i16: ; CHECK: # %bb.0: @@ -710,8 +688,6 @@ define <16 x i16> @vsadd_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vsadd_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v2i32: ; CHECK: # %bb.0: @@ -776,8 +752,6 @@ define <2 x i32> @vsadd_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsadd_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v4i32: ; CHECK: # %bb.0: @@ -842,8 +816,6 @@ define <4 x i32> @vsadd_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vsadd_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v8i32: ; CHECK: # %bb.0: @@ -908,8 +880,6 @@ define <8 x i32> @vsadd_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vsadd_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v16i32: ; CHECK: # %bb.0: @@ -974,8 +944,6 @@ define <16 x i32> @vsadd_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vsadd_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v2i64: ; CHECK: # %bb.0: @@ -1070,8 +1038,6 @@ define <2 x i64> @vsadd_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vsadd_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v4i64: ; CHECK: # %bb.0: @@ -1166,8 +1132,6 @@ define <4 x i64> @vsadd_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vsadd_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v8i64: ; CHECK: # %bb.0: @@ -1262,8 +1226,6 @@ define <8 x i64> @vsadd_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vsadd_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v16i64: ; CHECK: # %bb.0: @@ -1360,8 +1322,6 @@ define <16 x i64> @vsadd_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vsadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll index 94c453b0edd26..105be4d87092a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8>, <2 x i8>) - define <2 x i8> @sadd_v2i8_vv(<2 x i8> %va, <2 x i8> %b) { ; CHECK-LABEL: sadd_v2i8_vv: ; CHECK: # %bb.0: @@ -38,8 +36,6 @@ define <2 x i8> @sadd_v2i8_vi(<2 x i8> %va) { ret <2 x i8> %v } -declare <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8>, <4 x i8>) - define <4 x i8> @sadd_v4i8_vv(<4 x i8> %va, <4 x i8> %b) { ; CHECK-LABEL: sadd_v4i8_vv: ; CHECK: # %bb.0: @@ -72,8 +68,6 @@ define <4 x i8> @sadd_v4i8_vi(<4 x i8> %va) { ret <4 x i8> %v } -declare <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8>, <8 x i8>) - define <8 x i8> @sadd_v8i8_vv(<8 x i8> %va, <8 x i8> %b) { ; CHECK-LABEL: sadd_v8i8_vv: ; CHECK: # %bb.0: @@ -106,8 +100,6 @@ define <8 x i8> @sadd_v8i8_vi(<8 x i8> %va) { ret <8 x i8> %v } -declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>) - define <16 x i8> @sadd_v16i8_vv(<16 x i8> %va, <16 x i8> %b) { ; CHECK-LABEL: sadd_v16i8_vv: ; CHECK: # %bb.0: @@ -140,8 +132,6 @@ define <16 x i8> @sadd_v16i8_vi(<16 x i8> %va) { ret <16 x i8> %v } -declare <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16>, <2 x i16>) - define <2 x i16> @sadd_v2i16_vv(<2 x i16> %va, <2 x i16> %b) { ; CHECK-LABEL: sadd_v2i16_vv: ; CHECK: # %bb.0: @@ -174,8 +164,6 @@ define <2 x i16> @sadd_v2i16_vi(<2 x i16> %va) { ret <2 x i16> %v } -declare <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16>, <4 x i16>) - define <4 x i16> @sadd_v4i16_vv(<4 x i16> %va, <4 x i16> %b) { ; CHECK-LABEL: sadd_v4i16_vv: ; CHECK: # %bb.0: @@ -208,8 +196,6 @@ define <4 x i16> @sadd_v4i16_vi(<4 x i16> %va) { ret <4 x i16> %v } -declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) - define <8 x i16> @sadd_v8i16_vv(<8 x i16> %va, <8 x i16> %b) { ; CHECK-LABEL: sadd_v8i16_vv: ; CHECK: # %bb.0: @@ -242,8 +228,6 @@ define <8 x i16> @sadd_v8i16_vi(<8 x i16> %va) { ret <8 x i16> %v } -declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>) - define <16 x i16> @sadd_v16i16_vv(<16 x i16> %va, <16 x i16> %b) { ; CHECK-LABEL: sadd_v16i16_vv: ; CHECK: # %bb.0: @@ -276,8 +260,6 @@ define <16 x i16> @sadd_v16i16_vi(<16 x i16> %va) { ret <16 x i16> %v } -declare <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32>, <2 x i32>) - define <2 x i32> @sadd_v2i32_vv(<2 x i32> %va, <2 x i32> %b) { ; CHECK-LABEL: sadd_v2i32_vv: ; CHECK: # %bb.0: @@ -322,8 +304,6 @@ define <2 x i32> @sadd_v2i32_vi(<2 x i32> %va) { ret <2 x i32> %v } -declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>) - define <4 x i32> @sadd_v4i32_vv(<4 x i32> %va, <4 x i32> %b) { ; CHECK-LABEL: sadd_v4i32_vv: ; CHECK: # %bb.0: @@ -356,8 +336,6 @@ define <4 x i32> @sadd_v4i32_vi(<4 x i32> %va) { ret <4 x i32> %v } -declare <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32>, <8 x i32>) - define <8 x i32> @sadd_v8i32_vv(<8 x i32> %va, <8 x i32> %b) { ; CHECK-LABEL: sadd_v8i32_vv: ; CHECK: # %bb.0: @@ -390,8 +368,6 @@ define <8 x i32> @sadd_v8i32_vi(<8 x i32> %va) { ret <8 x i32> %v } -declare <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32>, <16 x i32>) - define <16 x i32> @sadd_v16i32_vv(<16 x i32> %va, <16 x i32> %b) { ; CHECK-LABEL: sadd_v16i32_vv: ; CHECK: # %bb.0: @@ -424,8 +400,6 @@ define <16 x i32> @sadd_v16i32_vi(<16 x i32> %va) { ret <16 x i32> %v } -declare <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64>, <2 x i64>) - define <2 x i64> @sadd_v2i64_vv(<2 x i64> %va, <2 x i64> %b) { ; CHECK-LABEL: sadd_v2i64_vv: ; CHECK: # %bb.0: @@ -472,8 +446,6 @@ define <2 x i64> @sadd_v2i64_vi(<2 x i64> %va) { ret <2 x i64> %v } -declare <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64>, <4 x i64>) - define <4 x i64> @sadd_v4i64_vv(<4 x i64> %va, <4 x i64> %b) { ; CHECK-LABEL: sadd_v4i64_vv: ; CHECK: # %bb.0: @@ -520,8 +492,6 @@ define <4 x i64> @sadd_v4i64_vi(<4 x i64> %va) { ret <4 x i64> %v } -declare <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64>, <8 x i64>) - define <8 x i64> @sadd_v8i64_vv(<8 x i64> %va, <8 x i64> %b) { ; CHECK-LABEL: sadd_v8i64_vv: ; CHECK: # %bb.0: @@ -568,8 +538,6 @@ define <8 x i64> @sadd_v8i64_vi(<8 x i64> %va) { ret <8 x i64> %v } -declare <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64>, <16 x i64>) - define <16 x i64> @sadd_v16i64_vv(<16 x i64> %va, <16 x i64> %b) { ; CHECK-LABEL: sadd_v16i64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll index 2839efd40305b..9b3b8348d9b30 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.uadd.sat.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vsaddu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v8i7: ; CHECK: # %bb.0: @@ -21,8 +19,6 @@ define <8 x i7> @vsaddu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zero ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vsaddu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v2i8: ; CHECK: # %bb.0: @@ -87,8 +83,6 @@ define <2 x i8> @vsaddu_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vsaddu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v4i8: ; CHECK: # %bb.0: @@ -165,8 +159,6 @@ define <4 x i8> @vsaddu_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vsaddu_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v5i8: ; CHECK: # %bb.0: @@ -231,8 +223,6 @@ define <5 x i8> @vsaddu_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vsaddu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v8i8: ; CHECK: # %bb.0: @@ -297,8 +287,6 @@ define <8 x i8> @vsaddu_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vsaddu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v16i8: ; CHECK: # %bb.0: @@ -363,8 +351,6 @@ define <16 x i8> @vsaddu_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vsaddu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vi_v258i8: ; CHECK: # %bb.0: @@ -442,8 +428,6 @@ define <256 x i8> @vsaddu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vsaddu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v2i16: ; CHECK: # %bb.0: @@ -508,8 +492,6 @@ define <2 x i16> @vsaddu_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vsaddu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v4i16: ; CHECK: # %bb.0: @@ -574,8 +556,6 @@ define <4 x i16> @vsaddu_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vsaddu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v8i16: ; CHECK: # %bb.0: @@ -640,8 +620,6 @@ define <8 x i16> @vsaddu_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vsaddu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v16i16: ; CHECK: # %bb.0: @@ -706,8 +684,6 @@ define <16 x i16> @vsaddu_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vsaddu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v2i32: ; CHECK: # %bb.0: @@ -772,8 +748,6 @@ define <2 x i32> @vsaddu_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsaddu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v4i32: ; CHECK: # %bb.0: @@ -838,8 +812,6 @@ define <4 x i32> @vsaddu_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vsaddu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v8i32: ; CHECK: # %bb.0: @@ -904,8 +876,6 @@ define <8 x i32> @vsaddu_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vsaddu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v16i32: ; CHECK: # %bb.0: @@ -970,8 +940,6 @@ define <16 x i32> @vsaddu_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vsaddu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v2i64: ; CHECK: # %bb.0: @@ -1066,8 +1034,6 @@ define <2 x i64> @vsaddu_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vsaddu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v4i64: ; CHECK: # %bb.0: @@ -1162,8 +1128,6 @@ define <4 x i64> @vsaddu_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vsaddu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v8i64: ; CHECK: # %bb.0: @@ -1258,8 +1222,6 @@ define <8 x i64> @vsaddu_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vsaddu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v16i64: ; CHECK: # %bb.0: @@ -1356,8 +1318,6 @@ define <16 x i64> @vsaddu_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vsaddu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll index 42fa433830801..620c0e89db50d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>) - define <2 x i8> @uadd_v2i8_vv(<2 x i8> %va, <2 x i8> %b) { ; CHECK-LABEL: uadd_v2i8_vv: ; CHECK: # %bb.0: @@ -38,8 +36,6 @@ define <2 x i8> @uadd_v2i8_vi(<2 x i8> %va) { ret <2 x i8> %v } -declare <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8>, <4 x i8>) - define <4 x i8> @uadd_v4i8_vv(<4 x i8> %va, <4 x i8> %b) { ; CHECK-LABEL: uadd_v4i8_vv: ; CHECK: # %bb.0: @@ -72,8 +68,6 @@ define <4 x i8> @uadd_v4i8_vi(<4 x i8> %va) { ret <4 x i8> %v } -declare <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8>, <8 x i8>) - define <8 x i8> @uadd_v8i8_vv(<8 x i8> %va, <8 x i8> %b) { ; CHECK-LABEL: uadd_v8i8_vv: ; CHECK: # %bb.0: @@ -106,8 +100,6 @@ define <8 x i8> @uadd_v8i8_vi(<8 x i8> %va) { ret <8 x i8> %v } -declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>) - define <16 x i8> @uadd_v16i8_vv(<16 x i8> %va, <16 x i8> %b) { ; CHECK-LABEL: uadd_v16i8_vv: ; CHECK: # %bb.0: @@ -140,8 +132,6 @@ define <16 x i8> @uadd_v16i8_vi(<16 x i8> %va) { ret <16 x i8> %v } -declare <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16>, <2 x i16>) - define <2 x i16> @uadd_v2i16_vv(<2 x i16> %va, <2 x i16> %b) { ; CHECK-LABEL: uadd_v2i16_vv: ; CHECK: # %bb.0: @@ -174,8 +164,6 @@ define <2 x i16> @uadd_v2i16_vi(<2 x i16> %va) { ret <2 x i16> %v } -declare <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16>, <4 x i16>) - define <4 x i16> @uadd_v4i16_vv(<4 x i16> %va, <4 x i16> %b) { ; CHECK-LABEL: uadd_v4i16_vv: ; CHECK: # %bb.0: @@ -208,8 +196,6 @@ define <4 x i16> @uadd_v4i16_vi(<4 x i16> %va) { ret <4 x i16> %v } -declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>) - define <8 x i16> @uadd_v8i16_vv(<8 x i16> %va, <8 x i16> %b) { ; CHECK-LABEL: uadd_v8i16_vv: ; CHECK: # %bb.0: @@ -242,8 +228,6 @@ define <8 x i16> @uadd_v8i16_vi(<8 x i16> %va) { ret <8 x i16> %v } -declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>) - define <16 x i16> @uadd_v16i16_vv(<16 x i16> %va, <16 x i16> %b) { ; CHECK-LABEL: uadd_v16i16_vv: ; CHECK: # %bb.0: @@ -276,8 +260,6 @@ define <16 x i16> @uadd_v16i16_vi(<16 x i16> %va) { ret <16 x i16> %v } -declare <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32>, <2 x i32>) - define <2 x i32> @uadd_v2i32_vv(<2 x i32> %va, <2 x i32> %b) { ; CHECK-LABEL: uadd_v2i32_vv: ; CHECK: # %bb.0: @@ -322,8 +304,6 @@ define <2 x i32> @uadd_v2i32_vi(<2 x i32> %va) { ret <2 x i32> %v } -declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>) - define <4 x i32> @uadd_v4i32_vv(<4 x i32> %va, <4 x i32> %b) { ; CHECK-LABEL: uadd_v4i32_vv: ; CHECK: # %bb.0: @@ -356,8 +336,6 @@ define <4 x i32> @uadd_v4i32_vi(<4 x i32> %va) { ret <4 x i32> %v } -declare <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32>, <8 x i32>) - define <8 x i32> @uadd_v8i32_vv(<8 x i32> %va, <8 x i32> %b) { ; CHECK-LABEL: uadd_v8i32_vv: ; CHECK: # %bb.0: @@ -390,8 +368,6 @@ define <8 x i32> @uadd_v8i32_vi(<8 x i32> %va) { ret <8 x i32> %v } -declare <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32>, <16 x i32>) - define <16 x i32> @uadd_v16i32_vv(<16 x i32> %va, <16 x i32> %b) { ; CHECK-LABEL: uadd_v16i32_vv: ; CHECK: # %bb.0: @@ -424,8 +400,6 @@ define <16 x i32> @uadd_v16i32_vi(<16 x i32> %va) { ret <16 x i32> %v } -declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>) - define <2 x i64> @uadd_v2i64_vv(<2 x i64> %va, <2 x i64> %b) { ; CHECK-LABEL: uadd_v2i64_vv: ; CHECK: # %bb.0: @@ -472,8 +446,6 @@ define <2 x i64> @uadd_v2i64_vi(<2 x i64> %va) { ret <2 x i64> %v } -declare <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64>, <4 x i64>) - define <4 x i64> @uadd_v4i64_vv(<4 x i64> %va, <4 x i64> %b) { ; CHECK-LABEL: uadd_v4i64_vv: ; CHECK: # %bb.0: @@ -520,8 +492,6 @@ define <4 x i64> @uadd_v4i64_vi(<4 x i64> %va) { ret <4 x i64> %v } -declare <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64>, <8 x i64>) - define <8 x i64> @uadd_v8i64_vv(<8 x i64> %va, <8 x i64> %b) { ; CHECK-LABEL: uadd_v8i64_vv: ; CHECK: # %bb.0: @@ -568,8 +538,6 @@ define <8 x i64> @uadd_v8i64_vi(<8 x i64> %va) { ret <8 x i64> %v } -declare <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64>, <16 x i64>) - define <16 x i64> @uadd_v16i64_vv(<16 x i64> %va, <16 x i64> %b) { ; CHECK-LABEL: uadd_v16i64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp-bf16.ll index 31ab6699d7c51..fe2a707c2d550 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp-bf16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp-bf16.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+m,+zvfbfmin -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x bfloat> @llvm.vp.select.v2bf16(<2 x i1>, <2 x bfloat>, <2 x bfloat>, i32) - define <2 x bfloat> @select_v2bf16(<2 x i1> %a, <2 x bfloat> %b, <2 x bfloat> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2bf16: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <2 x bfloat> @select_v2bf16(<2 x i1> %a, <2 x bfloat> %b, <2 x bfloat> %c ret <2 x bfloat> %v } -declare <4 x bfloat> @llvm.vp.select.v4bf16(<4 x i1>, <4 x bfloat>, <4 x bfloat>, i32) - define <4 x bfloat> @select_v4bf16(<4 x i1> %a, <4 x bfloat> %b, <4 x bfloat> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4bf16: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define <4 x bfloat> @select_v4bf16(<4 x i1> %a, <4 x bfloat> %b, <4 x bfloat> %c ret <4 x bfloat> %v } -declare <8 x bfloat> @llvm.vp.select.v8bf16(<8 x i1>, <8 x bfloat>, <8 x bfloat>, i32) - define <8 x bfloat> @select_v8bf16(<8 x i1> %a, <8 x bfloat> %b, <8 x bfloat> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8bf16: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define <8 x bfloat> @select_v8bf16(<8 x i1> %a, <8 x bfloat> %b, <8 x bfloat> %c ret <8 x bfloat> %v } -declare <16 x bfloat> @llvm.vp.select.v16bf16(<16 x i1>, <16 x bfloat>, <16 x bfloat>, i32) - define <16 x bfloat> @select_v16bf16(<16 x i1> %a, <16 x bfloat> %b, <16 x bfloat> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16bf16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll index 93f024c2b77a5..f2f9f90f386c0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+m -target-abi=lp64d -riscv-v-vector-bits-min=128 \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <1 x i1> @llvm.vp.select.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32) - define <1 x i1> @select_v1i1(<1 x i1> %a, <1 x i1> %b, <1 x i1> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v1i1: ; CHECK: # %bb.0: @@ -22,8 +20,6 @@ define <1 x i1> @select_v1i1(<1 x i1> %a, <1 x i1> %b, <1 x i1> %c, i32 zeroext ret <1 x i1> %v } -declare <2 x i1> @llvm.vp.select.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @select_v2i1(<2 x i1> %a, <2 x i1> %b, <2 x i1> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2i1: ; CHECK: # %bb.0: @@ -36,8 +32,6 @@ define <2 x i1> @select_v2i1(<2 x i1> %a, <2 x i1> %b, <2 x i1> %c, i32 zeroext ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.select.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @select_v4i1(<4 x i1> %a, <4 x i1> %b, <4 x i1> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4i1: ; CHECK: # %bb.0: @@ -50,8 +44,6 @@ define <4 x i1> @select_v4i1(<4 x i1> %a, <4 x i1> %b, <4 x i1> %c, i32 zeroext ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.select.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @select_v8i1(<8 x i1> %a, <8 x i1> %b, <8 x i1> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i1: ; CHECK: # %bb.0: @@ -64,8 +56,6 @@ define <8 x i1> @select_v8i1(<8 x i1> %a, <8 x i1> %b, <8 x i1> %c, i32 zeroext ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.select.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @select_v16i1(<16 x i1> %a, <16 x i1> %b, <16 x i1> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16i1: ; CHECK: # %bb.0: @@ -78,8 +68,6 @@ define <16 x i1> @select_v16i1(<16 x i1> %a, <16 x i1> %b, <16 x i1> %c, i32 zer ret <16 x i1> %v } -declare <8 x i7> @llvm.vp.select.v8i7(<8 x i1>, <8 x i7>, <8 x i7>, i32) - define <8 x i7> @select_v8i7(<8 x i1> %a, <8 x i7> %b, <8 x i7> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i7: ; CHECK: # %bb.0: @@ -90,8 +78,6 @@ define <8 x i7> @select_v8i7(<8 x i1> %a, <8 x i7> %b, <8 x i7> %c, i32 zeroext ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.select.v2i8(<2 x i1>, <2 x i8>, <2 x i8>, i32) - define <2 x i8> @select_v2i8(<2 x i1> %a, <2 x i8> %b, <2 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2i8: ; CHECK: # %bb.0: @@ -102,8 +88,6 @@ define <2 x i8> @select_v2i8(<2 x i1> %a, <2 x i8> %b, <2 x i8> %c, i32 zeroext ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.select.v4i8(<4 x i1>, <4 x i8>, <4 x i8>, i32) - define <4 x i8> @select_v4i8(<4 x i1> %a, <4 x i8> %b, <4 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4i8: ; CHECK: # %bb.0: @@ -114,8 +98,6 @@ define <4 x i8> @select_v4i8(<4 x i1> %a, <4 x i8> %b, <4 x i8> %c, i32 zeroext ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.select.v5i8(<5 x i1>, <5 x i8>, <5 x i8>, i32) - define <5 x i8> @select_v5i8(<5 x i1> %a, <5 x i8> %b, <5 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v5i8: ; CHECK: # %bb.0: @@ -126,8 +108,6 @@ define <5 x i8> @select_v5i8(<5 x i1> %a, <5 x i8> %b, <5 x i8> %c, i32 zeroext ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.select.v8i8(<8 x i1>, <8 x i8>, <8 x i8>, i32) - define <8 x i8> @select_v8i8(<8 x i1> %a, <8 x i8> %b, <8 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i8: ; CHECK: # %bb.0: @@ -138,8 +118,6 @@ define <8 x i8> @select_v8i8(<8 x i1> %a, <8 x i8> %b, <8 x i8> %c, i32 zeroext ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.select.v16i8(<16 x i1>, <16 x i8>, <16 x i8>, i32) - define <16 x i8> @select_v16i8(<16 x i1> %a, <16 x i8> %b, <16 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16i8: ; CHECK: # %bb.0: @@ -150,8 +128,6 @@ define <16 x i8> @select_v16i8(<16 x i1> %a, <16 x i8> %b, <16 x i8> %c, i32 zer ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.select.v256i8(<256 x i1>, <256 x i8>, <256 x i8>, i32) - define <256 x i8> @select_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v256i8: ; CHECK: # %bb.0: @@ -223,8 +199,6 @@ define <256 x i8> @select_evl_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.select.v2i16(<2 x i1>, <2 x i16>, <2 x i16>, i32) - define <2 x i16> @select_v2i16(<2 x i1> %a, <2 x i16> %b, <2 x i16> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2i16: ; CHECK: # %bb.0: @@ -235,8 +209,6 @@ define <2 x i16> @select_v2i16(<2 x i1> %a, <2 x i16> %b, <2 x i16> %c, i32 zero ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.select.v4i16(<4 x i1>, <4 x i16>, <4 x i16>, i32) - define <4 x i16> @select_v4i16(<4 x i1> %a, <4 x i16> %b, <4 x i16> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4i16: ; CHECK: # %bb.0: @@ -247,8 +219,6 @@ define <4 x i16> @select_v4i16(<4 x i1> %a, <4 x i16> %b, <4 x i16> %c, i32 zero ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.select.v8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) - define <8 x i16> @select_v8i16(<8 x i1> %a, <8 x i16> %b, <8 x i16> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i16: ; CHECK: # %bb.0: @@ -259,8 +229,6 @@ define <8 x i16> @select_v8i16(<8 x i1> %a, <8 x i16> %b, <8 x i16> %c, i32 zero ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.select.v16i16(<16 x i1>, <16 x i16>, <16 x i16>, i32) - define <16 x i16> @select_v16i16(<16 x i1> %a, <16 x i16> %b, <16 x i16> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16i16: ; CHECK: # %bb.0: @@ -271,8 +239,6 @@ define <16 x i16> @select_v16i16(<16 x i1> %a, <16 x i16> %b, <16 x i16> %c, i32 ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.select.v2i32(<2 x i1>, <2 x i32>, <2 x i32>, i32) - define <2 x i32> @select_v2i32(<2 x i1> %a, <2 x i32> %b, <2 x i32> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2i32: ; CHECK: # %bb.0: @@ -283,8 +249,6 @@ define <2 x i32> @select_v2i32(<2 x i1> %a, <2 x i32> %b, <2 x i32> %c, i32 zero ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.select.v4i32(<4 x i1>, <4 x i32>, <4 x i32>, i32) - define <4 x i32> @select_v4i32(<4 x i1> %a, <4 x i32> %b, <4 x i32> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4i32: ; CHECK: # %bb.0: @@ -295,8 +259,6 @@ define <4 x i32> @select_v4i32(<4 x i1> %a, <4 x i32> %b, <4 x i32> %c, i32 zero ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.select.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) - define <8 x i32> @select_v8i32(<8 x i1> %a, <8 x i32> %b, <8 x i32> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i32: ; CHECK: # %bb.0: @@ -307,8 +269,6 @@ define <8 x i32> @select_v8i32(<8 x i1> %a, <8 x i32> %b, <8 x i32> %c, i32 zero ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.select.v16i32(<16 x i1>, <16 x i32>, <16 x i32>, i32) - define <16 x i32> @select_v16i32(<16 x i1> %a, <16 x i32> %b, <16 x i32> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16i32: ; CHECK: # %bb.0: @@ -319,8 +279,6 @@ define <16 x i32> @select_v16i32(<16 x i1> %a, <16 x i32> %b, <16 x i32> %c, i32 ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.select.v2i64(<2 x i1>, <2 x i64>, <2 x i64>, i32) - define <2 x i64> @select_v2i64(<2 x i1> %a, <2 x i64> %b, <2 x i64> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2i64: ; CHECK: # %bb.0: @@ -331,8 +289,6 @@ define <2 x i64> @select_v2i64(<2 x i1> %a, <2 x i64> %b, <2 x i64> %c, i32 zero ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.select.v4i64(<4 x i1>, <4 x i64>, <4 x i64>, i32) - define <4 x i64> @select_v4i64(<4 x i1> %a, <4 x i64> %b, <4 x i64> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4i64: ; CHECK: # %bb.0: @@ -343,8 +299,6 @@ define <4 x i64> @select_v4i64(<4 x i1> %a, <4 x i64> %b, <4 x i64> %c, i32 zero ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.select.v8i64(<8 x i1>, <8 x i64>, <8 x i64>, i32) - define <8 x i64> @select_v8i64(<8 x i1> %a, <8 x i64> %b, <8 x i64> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i64: ; CHECK: # %bb.0: @@ -355,8 +309,6 @@ define <8 x i64> @select_v8i64(<8 x i1> %a, <8 x i64> %b, <8 x i64> %c, i32 zero ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.select.v16i64(<16 x i1>, <16 x i64>, <16 x i64>, i32) - define <16 x i64> @select_v16i64(<16 x i1> %a, <16 x i64> %b, <16 x i64> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16i64: ; CHECK: # %bb.0: @@ -367,8 +319,6 @@ define <16 x i64> @select_v16i64(<16 x i1> %a, <16 x i64> %b, <16 x i64> %c, i32 ret <16 x i64> %v } -declare <32 x i64> @llvm.vp.select.v32i64(<32 x i1>, <32 x i64>, <32 x i64>, i32) - define <32 x i64> @select_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v32i64: ; CHECK: # %bb.0: @@ -458,8 +408,6 @@ define <32 x i64> @select_evl_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c) ret <32 x i64> %v } -declare <2 x half> @llvm.vp.select.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) - define <2 x half> @select_v2f16(<2 x i1> %a, <2 x half> %b, <2 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2f16: ; CHECK: # %bb.0: @@ -470,8 +418,6 @@ define <2 x half> @select_v2f16(<2 x i1> %a, <2 x half> %b, <2 x half> %c, i32 z ret <2 x half> %v } -declare <4 x half> @llvm.vp.select.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) - define <4 x half> @select_v4f16(<4 x i1> %a, <4 x half> %b, <4 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4f16: ; CHECK: # %bb.0: @@ -482,8 +428,6 @@ define <4 x half> @select_v4f16(<4 x i1> %a, <4 x half> %b, <4 x half> %c, i32 z ret <4 x half> %v } -declare <8 x half> @llvm.vp.select.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) - define <8 x half> @select_v8f16(<8 x i1> %a, <8 x half> %b, <8 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8f16: ; CHECK: # %bb.0: @@ -494,8 +438,6 @@ define <8 x half> @select_v8f16(<8 x i1> %a, <8 x half> %b, <8 x half> %c, i32 z ret <8 x half> %v } -declare <16 x half> @llvm.vp.select.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) - define <16 x half> @select_v16f16(<16 x i1> %a, <16 x half> %b, <16 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16f16: ; CHECK: # %bb.0: @@ -506,8 +448,6 @@ define <16 x half> @select_v16f16(<16 x i1> %a, <16 x half> %b, <16 x half> %c, ret <16 x half> %v } -declare <2 x float> @llvm.vp.select.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) - define <2 x float> @select_v2f32(<2 x i1> %a, <2 x float> %b, <2 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2f32: ; CHECK: # %bb.0: @@ -518,8 +458,6 @@ define <2 x float> @select_v2f32(<2 x i1> %a, <2 x float> %b, <2 x float> %c, i3 ret <2 x float> %v } -declare <4 x float> @llvm.vp.select.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) - define <4 x float> @select_v4f32(<4 x i1> %a, <4 x float> %b, <4 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4f32: ; CHECK: # %bb.0: @@ -530,8 +468,6 @@ define <4 x float> @select_v4f32(<4 x i1> %a, <4 x float> %b, <4 x float> %c, i3 ret <4 x float> %v } -declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) - define <8 x float> @select_v8f32(<8 x i1> %a, <8 x float> %b, <8 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8f32: ; CHECK: # %bb.0: @@ -542,8 +478,6 @@ define <8 x float> @select_v8f32(<8 x i1> %a, <8 x float> %b, <8 x float> %c, i3 ret <8 x float> %v } -declare <16 x float> @llvm.vp.select.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) - define <16 x float> @select_v16f32(<16 x i1> %a, <16 x float> %b, <16 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16f32: ; CHECK: # %bb.0: @@ -554,8 +488,6 @@ define <16 x float> @select_v16f32(<16 x i1> %a, <16 x float> %b, <16 x float> % ret <16 x float> %v } -declare <64 x float> @llvm.vp.select.v64f32(<64 x i1>, <64 x float>, <64 x float>, i32) - define <64 x float> @select_v64f32(<64 x i1> %a, <64 x float> %b, <64 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v64f32: ; CHECK: # %bb.0: @@ -600,8 +532,6 @@ define <64 x float> @select_v64f32(<64 x i1> %a, <64 x float> %b, <64 x float> % ret <64 x float> %v } -declare <2 x double> @llvm.vp.select.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) - define <2 x double> @select_v2f64(<2 x i1> %a, <2 x double> %b, <2 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2f64: ; CHECK: # %bb.0: @@ -612,8 +542,6 @@ define <2 x double> @select_v2f64(<2 x i1> %a, <2 x double> %b, <2 x double> %c, ret <2 x double> %v } -declare <4 x double> @llvm.vp.select.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) - define <4 x double> @select_v4f64(<4 x i1> %a, <4 x double> %b, <4 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4f64: ; CHECK: # %bb.0: @@ -624,8 +552,6 @@ define <4 x double> @select_v4f64(<4 x i1> %a, <4 x double> %b, <4 x double> %c, ret <4 x double> %v } -declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) - define <8 x double> @select_v8f64(<8 x i1> %a, <8 x double> %b, <8 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8f64: ; CHECK: # %bb.0: @@ -636,8 +562,6 @@ define <8 x double> @select_v8f64(<8 x i1> %a, <8 x double> %b, <8 x double> %c, ret <8 x double> %v } -declare <16 x double> @llvm.vp.select.v16f64(<16 x i1>, <16 x double>, <16 x double>, i32) - define <16 x double> @select_v16f64(<16 x i1> %a, <16 x double> %b, <16 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll index 16a0fddfa9827..7730d6e5e1312 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.shl.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vsll_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i7: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define <8 x i7> @vsll_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.shl.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vsll_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v2i8: ; CHECK: # %bb.0: @@ -84,8 +80,6 @@ define <2 x i8> @vsll_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <3 x i8> @llvm.vp.shl.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32) - define <3 x i8> @vsll_vv_v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v3i8: ; CHECK: # %bb.0: @@ -96,8 +90,6 @@ define <3 x i8> @vsll_vv_v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 zeroex ret <3 x i8> %v } -declare <4 x i8> @llvm.vp.shl.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vsll_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v4i8: ; CHECK: # %bb.0: @@ -162,8 +154,6 @@ define <4 x i8> @vsll_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.shl.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vsll_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i8: ; CHECK: # %bb.0: @@ -228,8 +218,6 @@ define <8 x i8> @vsll_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.shl.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vsll_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v16i8: ; CHECK: # %bb.0: @@ -294,8 +282,6 @@ define <16 x i8> @vsll_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.shl.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vsll_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v2i16: ; CHECK: # %bb.0: @@ -360,8 +346,6 @@ define <2 x i16> @vsll_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.shl.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vsll_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v4i16: ; CHECK: # %bb.0: @@ -426,8 +410,6 @@ define <4 x i16> @vsll_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.shl.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vsll_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i16: ; CHECK: # %bb.0: @@ -492,8 +474,6 @@ define <8 x i16> @vsll_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.shl.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vsll_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v16i16: ; CHECK: # %bb.0: @@ -558,8 +538,6 @@ define <16 x i16> @vsll_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.shl.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vsll_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v2i32: ; CHECK: # %bb.0: @@ -624,8 +602,6 @@ define <2 x i32> @vsll_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.shl.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsll_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v4i32: ; CHECK: # %bb.0: @@ -690,8 +666,6 @@ define <4 x i32> @vsll_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.shl.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vsll_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i32: ; CHECK: # %bb.0: @@ -756,8 +730,6 @@ define <8 x i32> @vsll_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.shl.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vsll_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v16i32: ; CHECK: # %bb.0: @@ -822,8 +794,6 @@ define <16 x i32> @vsll_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.shl.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vsll_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v2i64: ; CHECK: # %bb.0: @@ -900,8 +870,6 @@ define <2 x i64> @vsll_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.shl.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vsll_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v4i64: ; CHECK: # %bb.0: @@ -978,8 +946,6 @@ define <4 x i64> @vsll_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.shl.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vsll_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i64: ; CHECK: # %bb.0: @@ -1056,8 +1022,6 @@ define <8 x i64> @vsll_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.shl.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vsll_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll index 180fafa9659b1..1d0c3a6937b54 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.ashr.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vsra_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i7: ; CHECK: # %bb.0: @@ -20,8 +18,6 @@ define <8 x i7> @vsra_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vsra_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v2i8: ; CHECK: # %bb.0: @@ -86,8 +82,6 @@ define <2 x i8> @vsra_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vsra_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v4i8: ; CHECK: # %bb.0: @@ -152,8 +146,6 @@ define <4 x i8> @vsra_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <7 x i8> @llvm.vp.ashr.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) - define <7 x i8> @vsra_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v7i8: ; CHECK: # %bb.0: @@ -164,8 +156,6 @@ define <7 x i8> @vsra_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroex ret <7 x i8> %v } -declare <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vsra_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i8: ; CHECK: # %bb.0: @@ -230,8 +220,6 @@ define <8 x i8> @vsra_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vsra_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v16i8: ; CHECK: # %bb.0: @@ -296,8 +284,6 @@ define <16 x i8> @vsra_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vsra_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v2i16: ; CHECK: # %bb.0: @@ -362,8 +348,6 @@ define <2 x i16> @vsra_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vsra_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v4i16: ; CHECK: # %bb.0: @@ -428,8 +412,6 @@ define <4 x i16> @vsra_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vsra_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i16: ; CHECK: # %bb.0: @@ -494,8 +476,6 @@ define <8 x i16> @vsra_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vsra_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v16i16: ; CHECK: # %bb.0: @@ -560,8 +540,6 @@ define <16 x i16> @vsra_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vsra_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v2i32: ; CHECK: # %bb.0: @@ -626,8 +604,6 @@ define <2 x i32> @vsra_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsra_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v4i32: ; CHECK: # %bb.0: @@ -692,8 +668,6 @@ define <4 x i32> @vsra_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vsra_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i32: ; CHECK: # %bb.0: @@ -758,8 +732,6 @@ define <8 x i32> @vsra_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vsra_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v16i32: ; CHECK: # %bb.0: @@ -824,8 +796,6 @@ define <16 x i32> @vsra_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vsra_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v2i64: ; CHECK: # %bb.0: @@ -902,8 +872,6 @@ define <2 x i64> @vsra_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vsra_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v4i64: ; CHECK: # %bb.0: @@ -980,8 +948,6 @@ define <4 x i64> @vsra_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vsra_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i64: ; CHECK: # %bb.0: @@ -1058,8 +1024,6 @@ define <8 x i64> @vsra_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vsra_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll index 22f04803eadd7..c8659b6d9739e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.lshr.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vsrl_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i7: ; CHECK: # %bb.0: @@ -19,8 +17,6 @@ define <8 x i7> @vsrl_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vsrl_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v2i8: ; CHECK: # %bb.0: @@ -85,8 +81,6 @@ define <2 x i8> @vsrl_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vsrl_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v4i8: ; CHECK: # %bb.0: @@ -151,8 +145,6 @@ define <4 x i8> @vsrl_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <7 x i8> @llvm.vp.lshr.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) - define <7 x i8> @vsrl_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v7i8: ; CHECK: # %bb.0: @@ -163,8 +155,6 @@ define <7 x i8> @vsrl_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroex ret <7 x i8> %v } -declare <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vsrl_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i8: ; CHECK: # %bb.0: @@ -229,8 +219,6 @@ define <8 x i8> @vsrl_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vsrl_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v16i8: ; CHECK: # %bb.0: @@ -295,8 +283,6 @@ define <16 x i8> @vsrl_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vsrl_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v2i16: ; CHECK: # %bb.0: @@ -361,8 +347,6 @@ define <2 x i16> @vsrl_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vsrl_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v4i16: ; CHECK: # %bb.0: @@ -427,8 +411,6 @@ define <4 x i16> @vsrl_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vsrl_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i16: ; CHECK: # %bb.0: @@ -493,8 +475,6 @@ define <8 x i16> @vsrl_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vsrl_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v16i16: ; CHECK: # %bb.0: @@ -559,8 +539,6 @@ define <16 x i16> @vsrl_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vsrl_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v2i32: ; CHECK: # %bb.0: @@ -625,8 +603,6 @@ define <2 x i32> @vsrl_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsrl_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v4i32: ; CHECK: # %bb.0: @@ -691,8 +667,6 @@ define <4 x i32> @vsrl_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vsrl_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i32: ; CHECK: # %bb.0: @@ -757,8 +731,6 @@ define <8 x i32> @vsrl_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vsrl_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v16i32: ; CHECK: # %bb.0: @@ -823,8 +795,6 @@ define <16 x i32> @vsrl_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vsrl_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v2i64: ; CHECK: # %bb.0: @@ -901,8 +871,6 @@ define <2 x i64> @vsrl_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vsrl_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v4i64: ; CHECK: # %bb.0: @@ -979,8 +947,6 @@ define <4 x i64> @vsrl_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vsrl_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i64: ; CHECK: # %bb.0: @@ -1057,8 +1023,6 @@ define <8 x i64> @vsrl_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vsrl_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll index 79856de033060..4c7d312e8e785 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.ssub.sat.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vssub_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v8i7: ; CHECK: # %bb.0: @@ -25,8 +23,6 @@ define <8 x i7> @vssub_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroe ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vssub_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v2i8: ; CHECK: # %bb.0: @@ -93,8 +89,6 @@ define <2 x i8> @vssub_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vssub_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v4i8: ; CHECK: # %bb.0: @@ -175,8 +169,6 @@ define <4 x i8> @vssub_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vssub_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v5i8: ; CHECK: # %bb.0: @@ -243,8 +235,6 @@ define <5 x i8> @vssub_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vssub_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v8i8: ; CHECK: # %bb.0: @@ -311,8 +301,6 @@ define <8 x i8> @vssub_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vssub_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v16i8: ; CHECK: # %bb.0: @@ -379,8 +367,6 @@ define <16 x i8> @vssub_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vssub_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vi_v258i8: ; CHECK: # %bb.0: @@ -462,8 +448,6 @@ define <256 x i8> @vssub_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vssub_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v2i16: ; CHECK: # %bb.0: @@ -530,8 +514,6 @@ define <2 x i16> @vssub_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vssub_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v4i16: ; CHECK: # %bb.0: @@ -598,8 +580,6 @@ define <4 x i16> @vssub_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vssub_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v8i16: ; CHECK: # %bb.0: @@ -666,8 +646,6 @@ define <8 x i16> @vssub_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vssub_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v16i16: ; CHECK: # %bb.0: @@ -734,8 +712,6 @@ define <16 x i16> @vssub_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vssub_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v2i32: ; CHECK: # %bb.0: @@ -802,8 +778,6 @@ define <2 x i32> @vssub_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vssub_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v4i32: ; CHECK: # %bb.0: @@ -870,8 +844,6 @@ define <4 x i32> @vssub_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vssub_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v8i32: ; CHECK: # %bb.0: @@ -938,8 +910,6 @@ define <8 x i32> @vssub_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vssub_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v16i32: ; CHECK: # %bb.0: @@ -1006,8 +976,6 @@ define <16 x i32> @vssub_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vssub_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v2i64: ; CHECK: # %bb.0: @@ -1104,8 +1072,6 @@ define <2 x i64> @vssub_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vssub_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v4i64: ; CHECK: # %bb.0: @@ -1202,8 +1168,6 @@ define <4 x i64> @vssub_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vssub_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v8i64: ; CHECK: # %bb.0: @@ -1300,8 +1264,6 @@ define <8 x i64> @vssub_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vssub_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v16i64: ; CHECK: # %bb.0: @@ -1400,8 +1362,6 @@ define <16 x i64> @vssub_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vssub_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll index b64e5c4d3467f..392c20756e185 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8>, <2 x i8>) - define <2 x i8> @ssub_v2i8_vv(<2 x i8> %va, <2 x i8> %b) { ; CHECK-LABEL: ssub_v2i8_vv: ; CHECK: # %bb.0: @@ -39,8 +37,6 @@ define <2 x i8> @ssub_v2i8_vi(<2 x i8> %va) { ret <2 x i8> %v } -declare <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8>, <4 x i8>) - define <4 x i8> @ssub_v4i8_vv(<4 x i8> %va, <4 x i8> %b) { ; CHECK-LABEL: ssub_v4i8_vv: ; CHECK: # %bb.0: @@ -74,8 +70,6 @@ define <4 x i8> @ssub_v4i8_vi(<4 x i8> %va) { ret <4 x i8> %v } -declare <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8>, <8 x i8>) - define <8 x i8> @ssub_v8i8_vv(<8 x i8> %va, <8 x i8> %b) { ; CHECK-LABEL: ssub_v8i8_vv: ; CHECK: # %bb.0: @@ -109,8 +103,6 @@ define <8 x i8> @ssub_v8i8_vi(<8 x i8> %va) { ret <8 x i8> %v } -declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>) - define <16 x i8> @ssub_v16i8_vv(<16 x i8> %va, <16 x i8> %b) { ; CHECK-LABEL: ssub_v16i8_vv: ; CHECK: # %bb.0: @@ -144,8 +136,6 @@ define <16 x i8> @ssub_v16i8_vi(<16 x i8> %va) { ret <16 x i8> %v } -declare <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16>, <2 x i16>) - define <2 x i16> @ssub_v2i16_vv(<2 x i16> %va, <2 x i16> %b) { ; CHECK-LABEL: ssub_v2i16_vv: ; CHECK: # %bb.0: @@ -179,8 +169,6 @@ define <2 x i16> @ssub_v2i16_vi(<2 x i16> %va) { ret <2 x i16> %v } -declare <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16>, <4 x i16>) - define <4 x i16> @ssub_v4i16_vv(<4 x i16> %va, <4 x i16> %b) { ; CHECK-LABEL: ssub_v4i16_vv: ; CHECK: # %bb.0: @@ -214,8 +202,6 @@ define <4 x i16> @ssub_v4i16_vi(<4 x i16> %va) { ret <4 x i16> %v } -declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>) - define <8 x i16> @ssub_v8i16_vv(<8 x i16> %va, <8 x i16> %b) { ; CHECK-LABEL: ssub_v8i16_vv: ; CHECK: # %bb.0: @@ -249,8 +235,6 @@ define <8 x i16> @ssub_v8i16_vi(<8 x i16> %va) { ret <8 x i16> %v } -declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>) - define <16 x i16> @ssub_v16i16_vv(<16 x i16> %va, <16 x i16> %b) { ; CHECK-LABEL: ssub_v16i16_vv: ; CHECK: # %bb.0: @@ -284,8 +268,6 @@ define <16 x i16> @ssub_v16i16_vi(<16 x i16> %va) { ret <16 x i16> %v } -declare <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32>, <2 x i32>) - define <2 x i32> @ssub_v2i32_vv(<2 x i32> %va, <2 x i32> %b) { ; CHECK-LABEL: ssub_v2i32_vv: ; CHECK: # %bb.0: @@ -319,8 +301,6 @@ define <2 x i32> @ssub_v2i32_vi(<2 x i32> %va) { ret <2 x i32> %v } -declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>) - define <4 x i32> @ssub_v4i32_vv(<4 x i32> %va, <4 x i32> %b) { ; CHECK-LABEL: ssub_v4i32_vv: ; CHECK: # %bb.0: @@ -354,8 +334,6 @@ define <4 x i32> @ssub_v4i32_vi(<4 x i32> %va) { ret <4 x i32> %v } -declare <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32>, <8 x i32>) - define <8 x i32> @ssub_v8i32_vv(<8 x i32> %va, <8 x i32> %b) { ; CHECK-LABEL: ssub_v8i32_vv: ; CHECK: # %bb.0: @@ -389,8 +367,6 @@ define <8 x i32> @ssub_v8i32_vi(<8 x i32> %va) { ret <8 x i32> %v } -declare <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32>, <16 x i32>) - define <16 x i32> @ssub_v16i32_vv(<16 x i32> %va, <16 x i32> %b) { ; CHECK-LABEL: ssub_v16i32_vv: ; CHECK: # %bb.0: @@ -424,8 +400,6 @@ define <16 x i32> @ssub_v16i32_vi(<16 x i32> %va) { ret <16 x i32> %v } -declare <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64>, <2 x i64>) - define <2 x i64> @ssub_v2i64_vv(<2 x i64> %va, <2 x i64> %b) { ; CHECK-LABEL: ssub_v2i64_vv: ; CHECK: # %bb.0: @@ -473,8 +447,6 @@ define <2 x i64> @ssub_v2i64_vi(<2 x i64> %va) { ret <2 x i64> %v } -declare <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64>, <4 x i64>) - define <4 x i64> @ssub_v4i64_vv(<4 x i64> %va, <4 x i64> %b) { ; CHECK-LABEL: ssub_v4i64_vv: ; CHECK: # %bb.0: @@ -522,8 +494,6 @@ define <4 x i64> @ssub_v4i64_vi(<4 x i64> %va) { ret <4 x i64> %v } -declare <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64>, <8 x i64>) - define <8 x i64> @ssub_v8i64_vv(<8 x i64> %va, <8 x i64> %b) { ; CHECK-LABEL: ssub_v8i64_vv: ; CHECK: # %bb.0: @@ -571,8 +541,6 @@ define <8 x i64> @ssub_v8i64_vi(<8 x i64> %va) { ret <8 x i64> %v } -declare <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64>, <16 x i64>) - define <16 x i64> @ssub_v16i64_vv(<16 x i64> %va, <16 x i64> %b) { ; CHECK-LABEL: ssub_v16i64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll index 7a9bef49c994d..f9000a1b88a6d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.usub.sat.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vssubu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v8i7: ; CHECK: # %bb.0: @@ -20,8 +18,6 @@ define <8 x i7> @vssubu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zero ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vssubu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v2i8: ; CHECK: # %bb.0: @@ -88,8 +84,6 @@ define <2 x i8> @vssubu_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vssubu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v4i8: ; CHECK: # %bb.0: @@ -170,8 +164,6 @@ define <4 x i8> @vssubu_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vssubu_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v5i8: ; CHECK: # %bb.0: @@ -238,8 +230,6 @@ define <5 x i8> @vssubu_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vssubu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v8i8: ; CHECK: # %bb.0: @@ -306,8 +296,6 @@ define <8 x i8> @vssubu_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vssubu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v16i8: ; CHECK: # %bb.0: @@ -374,8 +362,6 @@ define <16 x i8> @vssubu_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vssubu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vi_v258i8: ; CHECK: # %bb.0: @@ -457,8 +443,6 @@ define <256 x i8> @vssubu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vssubu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v2i16: ; CHECK: # %bb.0: @@ -525,8 +509,6 @@ define <2 x i16> @vssubu_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vssubu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v4i16: ; CHECK: # %bb.0: @@ -593,8 +575,6 @@ define <4 x i16> @vssubu_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vssubu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v8i16: ; CHECK: # %bb.0: @@ -661,8 +641,6 @@ define <8 x i16> @vssubu_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vssubu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v16i16: ; CHECK: # %bb.0: @@ -729,8 +707,6 @@ define <16 x i16> @vssubu_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vssubu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v2i32: ; CHECK: # %bb.0: @@ -797,8 +773,6 @@ define <2 x i32> @vssubu_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vssubu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v4i32: ; CHECK: # %bb.0: @@ -865,8 +839,6 @@ define <4 x i32> @vssubu_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vssubu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v8i32: ; CHECK: # %bb.0: @@ -933,8 +905,6 @@ define <8 x i32> @vssubu_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vssubu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v16i32: ; CHECK: # %bb.0: @@ -1001,8 +971,6 @@ define <16 x i32> @vssubu_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vssubu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v2i64: ; CHECK: # %bb.0: @@ -1099,8 +1067,6 @@ define <2 x i64> @vssubu_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vssubu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v4i64: ; CHECK: # %bb.0: @@ -1197,8 +1163,6 @@ define <4 x i64> @vssubu_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vssubu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v8i64: ; CHECK: # %bb.0: @@ -1295,8 +1259,6 @@ define <8 x i64> @vssubu_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vssubu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v16i64: ; CHECK: # %bb.0: @@ -1395,8 +1357,6 @@ define <16 x i64> @vssubu_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vssubu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll index 26a8879bfdf9f..65a21d8e14366 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.usub.sat.v2i8(<2 x i8>, <2 x i8>) - define <2 x i8> @usub_v2i8_vv(<2 x i8> %va, <2 x i8> %b) { ; CHECK-LABEL: usub_v2i8_vv: ; CHECK: # %bb.0: @@ -39,8 +37,6 @@ define <2 x i8> @usub_v2i8_vi(<2 x i8> %va) { ret <2 x i8> %v } -declare <4 x i8> @llvm.usub.sat.v4i8(<4 x i8>, <4 x i8>) - define <4 x i8> @usub_v4i8_vv(<4 x i8> %va, <4 x i8> %b) { ; CHECK-LABEL: usub_v4i8_vv: ; CHECK: # %bb.0: @@ -74,8 +70,6 @@ define <4 x i8> @usub_v4i8_vi(<4 x i8> %va) { ret <4 x i8> %v } -declare <8 x i8> @llvm.usub.sat.v8i8(<8 x i8>, <8 x i8>) - define <8 x i8> @usub_v8i8_vv(<8 x i8> %va, <8 x i8> %b) { ; CHECK-LABEL: usub_v8i8_vv: ; CHECK: # %bb.0: @@ -109,8 +103,6 @@ define <8 x i8> @usub_v8i8_vi(<8 x i8> %va) { ret <8 x i8> %v } -declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>) - define <16 x i8> @usub_v16i8_vv(<16 x i8> %va, <16 x i8> %b) { ; CHECK-LABEL: usub_v16i8_vv: ; CHECK: # %bb.0: @@ -144,8 +136,6 @@ define <16 x i8> @usub_v16i8_vi(<16 x i8> %va) { ret <16 x i8> %v } -declare <2 x i16> @llvm.usub.sat.v2i16(<2 x i16>, <2 x i16>) - define <2 x i16> @usub_v2i16_vv(<2 x i16> %va, <2 x i16> %b) { ; CHECK-LABEL: usub_v2i16_vv: ; CHECK: # %bb.0: @@ -179,8 +169,6 @@ define <2 x i16> @usub_v2i16_vi(<2 x i16> %va) { ret <2 x i16> %v } -declare <4 x i16> @llvm.usub.sat.v4i16(<4 x i16>, <4 x i16>) - define <4 x i16> @usub_v4i16_vv(<4 x i16> %va, <4 x i16> %b) { ; CHECK-LABEL: usub_v4i16_vv: ; CHECK: # %bb.0: @@ -214,8 +202,6 @@ define <4 x i16> @usub_v4i16_vi(<4 x i16> %va) { ret <4 x i16> %v } -declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>) - define <8 x i16> @usub_v8i16_vv(<8 x i16> %va, <8 x i16> %b) { ; CHECK-LABEL: usub_v8i16_vv: ; CHECK: # %bb.0: @@ -249,8 +235,6 @@ define <8 x i16> @usub_v8i16_vi(<8 x i16> %va) { ret <8 x i16> %v } -declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>) - define <16 x i16> @usub_v16i16_vv(<16 x i16> %va, <16 x i16> %b) { ; CHECK-LABEL: usub_v16i16_vv: ; CHECK: # %bb.0: @@ -284,8 +268,6 @@ define <16 x i16> @usub_v16i16_vi(<16 x i16> %va) { ret <16 x i16> %v } -declare <2 x i32> @llvm.usub.sat.v2i32(<2 x i32>, <2 x i32>) - define <2 x i32> @usub_v2i32_vv(<2 x i32> %va, <2 x i32> %b) { ; CHECK-LABEL: usub_v2i32_vv: ; CHECK: # %bb.0: @@ -319,8 +301,6 @@ define <2 x i32> @usub_v2i32_vi(<2 x i32> %va) { ret <2 x i32> %v } -declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>) - define <4 x i32> @usub_v4i32_vv(<4 x i32> %va, <4 x i32> %b) { ; CHECK-LABEL: usub_v4i32_vv: ; CHECK: # %bb.0: @@ -354,8 +334,6 @@ define <4 x i32> @usub_v4i32_vi(<4 x i32> %va) { ret <4 x i32> %v } -declare <8 x i32> @llvm.usub.sat.v8i32(<8 x i32>, <8 x i32>) - define <8 x i32> @usub_v8i32_vv(<8 x i32> %va, <8 x i32> %b) { ; CHECK-LABEL: usub_v8i32_vv: ; CHECK: # %bb.0: @@ -389,8 +367,6 @@ define <8 x i32> @usub_v8i32_vi(<8 x i32> %va) { ret <8 x i32> %v } -declare <16 x i32> @llvm.usub.sat.v16i32(<16 x i32>, <16 x i32>) - define <16 x i32> @usub_v16i32_vv(<16 x i32> %va, <16 x i32> %b) { ; CHECK-LABEL: usub_v16i32_vv: ; CHECK: # %bb.0: @@ -424,8 +400,6 @@ define <16 x i32> @usub_v16i32_vi(<16 x i32> %va) { ret <16 x i32> %v } -declare <2 x i64> @llvm.usub.sat.v2i64(<2 x i64>, <2 x i64>) - define <2 x i64> @usub_v2i64_vv(<2 x i64> %va, <2 x i64> %b) { ; CHECK-LABEL: usub_v2i64_vv: ; CHECK: # %bb.0: @@ -473,8 +447,6 @@ define <2 x i64> @usub_v2i64_vi(<2 x i64> %va) { ret <2 x i64> %v } -declare <4 x i64> @llvm.usub.sat.v4i64(<4 x i64>, <4 x i64>) - define <4 x i64> @usub_v4i64_vv(<4 x i64> %va, <4 x i64> %b) { ; CHECK-LABEL: usub_v4i64_vv: ; CHECK: # %bb.0: @@ -522,8 +494,6 @@ define <4 x i64> @usub_v4i64_vi(<4 x i64> %va) { ret <4 x i64> %v } -declare <8 x i64> @llvm.usub.sat.v8i64(<8 x i64>, <8 x i64>) - define <8 x i64> @usub_v8i64_vv(<8 x i64> %va, <8 x i64> %b) { ; CHECK-LABEL: usub_v8i64_vv: ; CHECK: # %bb.0: @@ -571,8 +541,6 @@ define <8 x i64> @usub_v8i64_vi(<8 x i64> %va) { ret <8 x i64> %v } -declare <16 x i64> @llvm.usub.sat.v16i64(<16 x i64>, <16 x i64>) - define <16 x i64> @usub_v16i64_vv(<16 x i64> %va, <16 x i64> %b) { ; CHECK-LABEL: usub_v16i64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp-mask.ll index 7f3bbc3dacde3..87ec263e8262b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp-mask.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK -declare <2 x i1> @llvm.vp.sub.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @vsub_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i1: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <2 x i1> @vsub_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroex ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.sub.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @vsub_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i1: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define <4 x i1> @vsub_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroex ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.sub.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @vsub_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i1: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define <8 x i1> @vsub_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroex ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.sub.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @vsub_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i1: ; CHECK: # %bb.0: @@ -52,8 +44,6 @@ define <16 x i1> @vsub_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 z ret <16 x i1> %v } -declare <32 x i1> @llvm.vp.sub.v32i1(<32 x i1>, <32 x i1>, <32 x i1>, i32) - define <32 x i1> @vsub_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v32i1: ; CHECK: # %bb.0: @@ -64,8 +54,6 @@ define <32 x i1> @vsub_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 z ret <32 x i1> %v } -declare <64 x i1> @llvm.vp.sub.v64i1(<64 x i1>, <64 x i1>, <64 x i1>, i32) - define <64 x i1> @vsub_vv_v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v64i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll index 5c57aa139f065..e5bfd4d6c688e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.sub.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vsub_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i7: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <8 x i7> @vsub_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.sub.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vsub_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i8: ; CHECK: # %bb.0: @@ -62,8 +58,6 @@ define <2 x i8> @vsub_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <3 x i8> @llvm.vp.sub.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32) - define <3 x i8> @vsub_vv_v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v3i8: ; CHECK: # %bb.0: @@ -108,8 +102,6 @@ define <3 x i8> @vsub_vx_v3i8_unmasked(<3 x i8> %va, i8 %b, i32 zeroext %evl) { ret <3 x i8> %v } -declare <4 x i8> @llvm.vp.sub.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vsub_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i8: ; CHECK: # %bb.0: @@ -154,8 +146,6 @@ define <4 x i8> @vsub_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.sub.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vsub_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i8: ; CHECK: # %bb.0: @@ -200,8 +190,6 @@ define <8 x i8> @vsub_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.sub.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vsub_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i8: ; CHECK: # %bb.0: @@ -246,8 +234,6 @@ define <16 x i8> @vsub_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.sub.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vsub_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i16: ; CHECK: # %bb.0: @@ -292,8 +278,6 @@ define <2 x i16> @vsub_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.sub.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vsub_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i16: ; CHECK: # %bb.0: @@ -338,8 +322,6 @@ define <4 x i16> @vsub_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.sub.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vsub_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i16: ; CHECK: # %bb.0: @@ -384,8 +366,6 @@ define <8 x i16> @vsub_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.sub.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vsub_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i16: ; CHECK: # %bb.0: @@ -430,8 +410,6 @@ define <16 x i16> @vsub_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext % ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.sub.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vsub_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i32: ; CHECK: # %bb.0: @@ -476,8 +454,6 @@ define <2 x i32> @vsub_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.sub.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsub_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i32: ; CHECK: # %bb.0: @@ -522,8 +498,6 @@ define <4 x i32> @vsub_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.sub.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vsub_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i32: ; CHECK: # %bb.0: @@ -568,8 +542,6 @@ define <8 x i32> @vsub_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.sub.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vsub_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i32: ; CHECK: # %bb.0: @@ -614,8 +586,6 @@ define <16 x i32> @vsub_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext % ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.sub.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vsub_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i64: ; CHECK: # %bb.0: @@ -690,8 +660,6 @@ define <2 x i64> @vsub_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.sub.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vsub_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i64: ; CHECK: # %bb.0: @@ -766,8 +734,6 @@ define <4 x i64> @vsub_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.sub.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vsub_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i64: ; CHECK: # %bb.0: @@ -842,8 +808,6 @@ define <8 x i64> @vsub_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.sub.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vsub_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll index 97b86a8eff19f..2cb344434eec8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.xor.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vxor_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i7: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <8 x i7> @vxor_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.xor.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vxor_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v2i8: ; CHECK: # %bb.0: @@ -114,8 +110,6 @@ define <2 x i8> @vxor_vi_v2i8_unmasked_1(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.xor.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vxor_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v4i8: ; CHECK: # %bb.0: @@ -200,8 +194,6 @@ define <4 x i8> @vxor_vi_v4i8_unmasked_1(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.xor.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vxor_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i8: ; CHECK: # %bb.0: @@ -286,8 +278,6 @@ define <8 x i8> @vxor_vi_v8i8_unmasked_1(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <9 x i8> @llvm.vp.xor.v9i8(<9 x i8>, <9 x i8>, <9 x i1>, i32) - define <9 x i8> @vxor_vv_v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v9i8: ; CHECK: # %bb.0: @@ -372,8 +362,6 @@ define <9 x i8> @vxor_vi_v9i8_unmasked_1(<9 x i8> %va, i32 zeroext %evl) { ret <9 x i8> %v } -declare <16 x i8> @llvm.vp.xor.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vxor_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v16i8: ; CHECK: # %bb.0: @@ -458,8 +446,6 @@ define <16 x i8> @vxor_vi_v16i8_unmasked_1(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.xor.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vxor_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v2i16: ; CHECK: # %bb.0: @@ -544,8 +530,6 @@ define <2 x i16> @vxor_vi_v2i16_unmasked_1(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.xor.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vxor_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v4i16: ; CHECK: # %bb.0: @@ -630,8 +614,6 @@ define <4 x i16> @vxor_vi_v4i16_unmasked_1(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.xor.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vxor_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i16: ; CHECK: # %bb.0: @@ -716,8 +698,6 @@ define <8 x i16> @vxor_vi_v8i16_unmasked_1(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.xor.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vxor_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v16i16: ; CHECK: # %bb.0: @@ -802,8 +782,6 @@ define <16 x i16> @vxor_vi_v16i16_unmasked_1(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.xor.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vxor_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v2i32: ; CHECK: # %bb.0: @@ -888,8 +866,6 @@ define <2 x i32> @vxor_vi_v2i32_unmasked_1(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.xor.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vxor_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v4i32: ; CHECK: # %bb.0: @@ -974,8 +950,6 @@ define <4 x i32> @vxor_vi_v4i32_unmasked_1(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.xor.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vxor_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i32: ; CHECK: # %bb.0: @@ -1060,8 +1034,6 @@ define <8 x i32> @vxor_vi_v8i32_unmasked_1(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.xor.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vxor_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v16i32: ; CHECK: # %bb.0: @@ -1146,8 +1118,6 @@ define <16 x i32> @vxor_vi_v16i32_unmasked_1(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.xor.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vxor_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v2i64: ; CHECK: # %bb.0: @@ -1262,8 +1232,6 @@ define <2 x i64> @vxor_vi_v2i64_unmasked_1(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.xor.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vxor_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v4i64: ; CHECK: # %bb.0: @@ -1378,8 +1346,6 @@ define <4 x i64> @vxor_vi_v4i64_unmasked_1(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.xor.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vxor_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i64: ; CHECK: # %bb.0: @@ -1494,8 +1460,6 @@ define <8 x i64> @vxor_vi_v8i64_unmasked_1(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.xor.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vxor_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll index 88803f7cd5d89..df6dce28df244 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll @@ -15,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -41,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -54,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -67,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -80,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -93,8 +81,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -106,8 +92,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -119,8 +103,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -132,8 +114,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -145,8 +125,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -158,8 +136,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -171,8 +147,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32mf2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -184,8 +158,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32m1(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -197,8 +169,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32m2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -210,8 +180,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32m4(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -223,8 +191,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32m8(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -236,8 +202,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define <1 x i8> @test_sf_vc_v_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -249,8 +213,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.x.se.nxv1i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <2 x i8> @test_sf_vc_v_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -262,8 +224,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.x.se.nxv2i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <4 x i8> @test_sf_vc_v_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -275,8 +235,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.x.se.nxv4i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <8 x i8> @test_sf_vc_v_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -288,8 +246,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.x.se.nxv8i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <16 x i8> @test_sf_vc_v_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -301,8 +257,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.x.se.nxv16i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <32 x i8> @test_sf_vc_v_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -314,8 +268,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.x.se.nxv32i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <64 x i8> @test_sf_vc_v_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -327,8 +279,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.x.se.nxv64i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <1 x i16> @test_sf_vc_v_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -340,8 +290,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.x.se.nxv1i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <2 x i16> @test_sf_vc_v_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -353,8 +301,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.x.se.nxv2i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <4 x i16> @test_sf_vc_v_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -366,8 +312,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.x.se.nxv4i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <8 x i16> @test_sf_vc_v_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -379,8 +323,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.x.se.nxv8i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <16 x i16> @test_sf_vc_v_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -392,8 +334,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.x.se.nxv16i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <32 x i16> @test_sf_vc_v_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -405,8 +345,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.x.se.nxv32i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <1 x i32> @test_sf_vc_v_x_se_e32mf2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -418,8 +356,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.x.se.nxv1i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <2 x i32> @test_sf_vc_v_x_se_e32m1(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -431,8 +367,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.x.se.nxv2i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <4 x i32> @test_sf_vc_v_x_se_e32m2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -444,8 +378,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.x.se.nxv4i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <8 x i32> @test_sf_vc_v_x_se_e32m4(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -457,8 +389,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.x.se.nxv8i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <16 x i32> @test_sf_vc_v_x_se_e32m8(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -470,8 +400,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.x.se.nxv16i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <1 x i8> @test_sf_vc_v_x_e8mf8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8mf8: ; CHECK: # %bb.0: # %entry @@ -483,8 +411,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.x.nxv1i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <2 x i8> @test_sf_vc_v_x_e8mf4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8mf4: ; CHECK: # %bb.0: # %entry @@ -496,8 +422,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.x.nxv2i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <4 x i8> @test_sf_vc_v_x_e8mf2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8mf2: ; CHECK: # %bb.0: # %entry @@ -509,8 +433,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.x.nxv4i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <8 x i8> @test_sf_vc_v_x_e8m1(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8m1: ; CHECK: # %bb.0: # %entry @@ -522,8 +444,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.x.nxv8i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <16 x i8> @test_sf_vc_v_x_e8m2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8m2: ; CHECK: # %bb.0: # %entry @@ -535,8 +455,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.x.nxv16i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <32 x i8> @test_sf_vc_v_x_e8m4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8m4: ; CHECK: # %bb.0: # %entry @@ -548,8 +466,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.x.nxv32i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <64 x i8> @test_sf_vc_v_x_e8m8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8m8: ; CHECK: # %bb.0: # %entry @@ -561,8 +477,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.x.nxv64i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <1 x i16> @test_sf_vc_v_x_e16mf4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16mf4: ; CHECK: # %bb.0: # %entry @@ -574,8 +488,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.x.nxv1i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <2 x i16> @test_sf_vc_v_x_e16mf2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16mf2: ; CHECK: # %bb.0: # %entry @@ -587,8 +499,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.x.nxv2i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <4 x i16> @test_sf_vc_v_x_e16m1(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16m1: ; CHECK: # %bb.0: # %entry @@ -600,8 +510,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.x.nxv4i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <8 x i16> @test_sf_vc_v_x_e16m2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16m2: ; CHECK: # %bb.0: # %entry @@ -613,8 +521,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.x.nxv8i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <16 x i16> @test_sf_vc_v_x_e16m4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16m4: ; CHECK: # %bb.0: # %entry @@ -626,8 +532,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.x.nxv16i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <32 x i16> @test_sf_vc_v_x_e16m8(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16m8: ; CHECK: # %bb.0: # %entry @@ -639,8 +543,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.x.nxv32i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <1 x i32> @test_sf_vc_v_x_e32mf2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32mf2: ; CHECK: # %bb.0: # %entry @@ -652,8 +554,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <2 x i32> @test_sf_vc_v_x_e32m1(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32m1: ; CHECK: # %bb.0: # %entry @@ -665,8 +565,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <4 x i32> @test_sf_vc_v_x_e32m2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32m2: ; CHECK: # %bb.0: # %entry @@ -678,8 +576,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <8 x i32> @test_sf_vc_v_x_e32m4(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32m4: ; CHECK: # %bb.0: # %entry @@ -691,8 +587,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <16 x i32> @test_sf_vc_v_x_e32m8(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32m8: ; CHECK: # %bb.0: # %entry @@ -704,8 +598,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.x.nxv16i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define void @test_sf_vc_i_se_e8mf8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -717,8 +609,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -730,8 +620,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -743,8 +631,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -756,8 +642,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -769,8 +653,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -782,8 +664,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -795,8 +675,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -808,8 +686,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -821,8 +697,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -834,8 +708,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -847,8 +719,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -860,8 +730,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -873,8 +741,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -886,8 +752,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -899,8 +763,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -912,8 +774,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -925,8 +785,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -938,8 +796,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e64m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -951,8 +807,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e64m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -964,8 +818,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e64m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -977,8 +829,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e64m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -990,8 +840,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define <1 x i8> @test_sf_vc_v_i_se_e8mf8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1003,8 +851,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.i.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x i8> @test_sf_vc_v_i_se_e8mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1016,8 +862,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.i.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x i8> @test_sf_vc_v_i_se_e8mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1029,8 +873,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.i.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x i8> @test_sf_vc_v_i_se_e8m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1042,8 +884,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.i.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <16 x i8> @test_sf_vc_v_i_se_e8m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1055,8 +895,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.i.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <32 x i8> @test_sf_vc_v_i_se_e8m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1068,8 +906,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.i.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <64 x i8> @test_sf_vc_v_i_se_e8m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1081,8 +917,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.i.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x i16> @test_sf_vc_v_i_se_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1094,8 +928,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.i.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x i16> @test_sf_vc_v_i_se_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1107,8 +939,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.i.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x i16> @test_sf_vc_v_i_se_e16m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1120,8 +950,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.i.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x i16> @test_sf_vc_v_i_se_e16m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1133,8 +961,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.i.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <16 x i16> @test_sf_vc_v_i_se_e16m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1146,8 +972,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.i.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <32 x i16> @test_sf_vc_v_i_se_e16m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1159,8 +983,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.i.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x i32> @test_sf_vc_v_i_se_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1172,8 +994,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.i.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x i32> @test_sf_vc_v_i_se_e32m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1185,8 +1005,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.i.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x i32> @test_sf_vc_v_i_se_e32m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1198,8 +1016,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.i.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x i32> @test_sf_vc_v_i_se_e32m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1211,8 +1027,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.i.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <16 x i32> @test_sf_vc_v_i_se_e32m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1224,8 +1038,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.i.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x i64> @test_sf_vc_v_i_se_e64m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1237,8 +1049,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.i.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x i64> @test_sf_vc_v_i_se_e64m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1250,8 +1060,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.i.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x i64> @test_sf_vc_v_i_se_e64m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1263,8 +1071,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.i.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x i64> @test_sf_vc_v_i_se_e64m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1276,8 +1082,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.i.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x i8> @test_sf_vc_v_i_e8mf8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1289,8 +1093,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.i.nxv1i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x i8> @test_sf_vc_v_i_e8mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1302,8 +1104,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.i.nxv2i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x i8> @test_sf_vc_v_i_e8mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1315,8 +1115,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.i.nxv4i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x i8> @test_sf_vc_v_i_e8m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8m1: ; CHECK: # %bb.0: # %entry @@ -1328,8 +1126,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.i.nxv8i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <16 x i8> @test_sf_vc_v_i_e8m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8m2: ; CHECK: # %bb.0: # %entry @@ -1341,8 +1137,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.i.nxv16i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <32 x i8> @test_sf_vc_v_i_e8m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8m4: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1148,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.i.nxv32i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <64 x i8> @test_sf_vc_v_i_e8m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8m8: ; CHECK: # %bb.0: # %entry @@ -1367,8 +1159,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.i.nxv64i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x i16> @test_sf_vc_v_i_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1380,8 +1170,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.i.nxv1i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x i16> @test_sf_vc_v_i_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1393,8 +1181,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.i.nxv2i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x i16> @test_sf_vc_v_i_e16m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16m1: ; CHECK: # %bb.0: # %entry @@ -1406,8 +1192,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.i.nxv4i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x i16> @test_sf_vc_v_i_e16m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16m2: ; CHECK: # %bb.0: # %entry @@ -1419,8 +1203,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.i.nxv8i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <16 x i16> @test_sf_vc_v_i_e16m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16m4: ; CHECK: # %bb.0: # %entry @@ -1432,8 +1214,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.i.nxv16i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <32 x i16> @test_sf_vc_v_i_e16m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16m8: ; CHECK: # %bb.0: # %entry @@ -1445,8 +1225,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.i.nxv32i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x i32> @test_sf_vc_v_i_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1458,8 +1236,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.i.nxv1i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x i32> @test_sf_vc_v_i_e32m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32m1: ; CHECK: # %bb.0: # %entry @@ -1471,8 +1247,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.i.nxv2i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x i32> @test_sf_vc_v_i_e32m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32m2: ; CHECK: # %bb.0: # %entry @@ -1484,8 +1258,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.i.nxv4i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x i32> @test_sf_vc_v_i_e32m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32m4: ; CHECK: # %bb.0: # %entry @@ -1497,8 +1269,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.i.nxv8i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <16 x i32> @test_sf_vc_v_i_e32m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32m8: ; CHECK: # %bb.0: # %entry @@ -1510,8 +1280,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.i.nxv16i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x i64> @test_sf_vc_v_i_e64m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e64m1: ; CHECK: # %bb.0: # %entry @@ -1523,8 +1291,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.i.nxv1i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x i64> @test_sf_vc_v_i_e64m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e64m2: ; CHECK: # %bb.0: # %entry @@ -1536,8 +1302,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.i.nxv2i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x i64> @test_sf_vc_v_i_e64m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e64m4: ; CHECK: # %bb.0: # %entry @@ -1549,8 +1313,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.i.nxv4i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x i64> @test_sf_vc_v_i_e64m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e64m8: ; CHECK: # %bb.0: # %entry @@ -1562,8 +1324,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x half> @test_sf_vc_fv_x_se_e16mf4(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1575,8 +1335,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.x.se.nxv1f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <2 x half> @test_sf_vc_fv_x_se_e16mf2(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1588,8 +1346,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.x.se.nxv2f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <4 x half> @test_sf_vc_fv_x_se_e16m1(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1601,8 +1357,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.x.se.nxv4f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <8 x half> @test_sf_vc_fv_x_se_e16m2(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1614,8 +1368,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.x.se.nxv8f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <16 x half> @test_sf_vc_fv_x_se_e16m4(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1627,8 +1379,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.x.se.nxv16f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <32 x half> @test_sf_vc_fv_x_se_e16m8(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1640,8 +1390,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.x.se.nxv32f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <1 x float> @test_sf_vc_fv_x_se_e32mf2(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1653,8 +1401,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.x.se.nxv1f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <2 x float> @test_sf_vc_fv_x_se_e32m1(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1412,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.x.se.nxv2f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <4 x float> @test_sf_vc_fv_x_se_e32m2(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1679,8 +1423,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.x.se.nxv4f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <8 x float> @test_sf_vc_fv_x_se_e32m4(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1692,8 +1434,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.x.se.nxv8f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <16 x float> @test_sf_vc_fv_x_se_e32m8(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1705,8 +1445,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.x.se.nxv16f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <1 x half> @test_sf_vc_fv_i_se_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1718,8 +1456,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x half> @test_sf_vc_fv_i_se_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1731,8 +1467,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x half> @test_sf_vc_fv_i_se_e16m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1744,8 +1478,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x half> @test_sf_vc_fv_i_se_e16m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1757,8 +1489,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <16 x half> @test_sf_vc_fv_i_se_e16m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1770,8 +1500,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <32 x half> @test_sf_vc_fv_i_se_e16m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1783,8 +1511,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x float> @test_sf_vc_fv_i_se_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1796,8 +1522,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x float> @test_sf_vc_fv_i_se_e32m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1809,8 +1533,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x float> @test_sf_vc_fv_i_se_e32m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1822,8 +1544,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x float> @test_sf_vc_fv_i_se_e32m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1835,8 +1555,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <16 x float> @test_sf_vc_fv_i_se_e32m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1848,4 +1566,3 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xv.ll index b553a62ae496a..1ec74e3452c57 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xv.ll @@ -15,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, iXLen, <1 x i8>, <1 x i8>, iXLen) - define void @test_sf_vc_vv_se_e8mf4(<2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, iXLen, <2 x i8>, <2 x i8>, iXLen) - define void @test_sf_vc_vv_se_e8mf2(<4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -41,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, iXLen, <4 x i8>, <4 x i8>, iXLen) - define void @test_sf_vc_vv_se_e8m1(<8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -54,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, iXLen, <8 x i8>, <8 x i8>, iXLen) - define void @test_sf_vc_vv_se_e8m2(<16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -67,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, iXLen, <16 x i8>, <16 x i8>, iXLen) - define void @test_sf_vc_vv_se_e8m4(<32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -80,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, iXLen, <32 x i8>, <32 x i8>, iXLen) - define void @test_sf_vc_vv_se_e8m8(<64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -93,8 +81,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, iXLen, <64 x i8>, <64 x i8>, iXLen) - define void @test_sf_vc_vv_se_e16mf4(<1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -106,8 +92,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, iXLen, <1 x i16>, <1 x i16>, iXLen) - define void @test_sf_vc_vv_se_e16mf2(<2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -119,8 +103,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, iXLen, <2 x i16>, <2 x i16>, iXLen) - define void @test_sf_vc_vv_se_e16m1(<4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -132,8 +114,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, iXLen, <4 x i16>, <4 x i16>, iXLen) - define void @test_sf_vc_vv_se_e16m2(<8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -145,8 +125,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, iXLen, <8 x i16>, <8 x i16>, iXLen) - define void @test_sf_vc_vv_se_e16m4(<16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -158,8 +136,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, iXLen, <16 x i16>, <16 x i16>, iXLen) - define void @test_sf_vc_vv_se_e16m8(<32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -171,8 +147,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, iXLen, <32 x i16>, <32 x i16>, iXLen) - define void @test_sf_vc_vv_se_e32mf2(<1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -184,8 +158,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, iXLen, <1 x i32>, <1 x i32>, iXLen) - define void @test_sf_vc_vv_se_e32m1(<2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -197,8 +169,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, iXLen, <2 x i32>, <2 x i32>, iXLen) - define void @test_sf_vc_vv_se_e32m2(<4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -210,8 +180,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, iXLen, <4 x i32>, <4 x i32>, iXLen) - define void @test_sf_vc_vv_se_e32m4(<8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -223,8 +191,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, iXLen, <8 x i32>, <8 x i32>, iXLen) - define void @test_sf_vc_vv_se_e32m8(<16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -236,8 +202,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, iXLen, <16 x i32>, <16 x i32>, iXLen) - define void @test_sf_vc_vv_se_e64m1(<1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -249,8 +213,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, iXLen, <1 x i64>, <1 x i64>, iXLen) - define void @test_sf_vc_vv_se_e64m2(<2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -262,8 +224,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, iXLen, <2 x i64>, <2 x i64>, iXLen) - define void @test_sf_vc_vv_se_e64m4(<4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -275,8 +235,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, iXLen, <4 x i64>, <4 x i64>, iXLen) - define void @test_sf_vc_vv_se_e64m8(<8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -288,8 +246,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, iXLen, <8 x i64>, <8 x i64>, iXLen) - define <1 x i8> @test_sf_vc_v_vv_se_e8mf8(<1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -301,8 +257,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <1 x i8>, <1 x i8>, iXLen) - define <2 x i8> @test_sf_vc_v_vv_se_e8mf4(<2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -314,8 +268,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <2 x i8>, <2 x i8>, iXLen) - define <4 x i8> @test_sf_vc_v_vv_se_e8mf2(<4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -327,8 +279,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <4 x i8>, <4 x i8>, iXLen) - define <8 x i8> @test_sf_vc_v_vv_se_e8m1(<8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -340,8 +290,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <8 x i8>, <8 x i8>, iXLen) - define <16 x i8> @test_sf_vc_v_vv_se_e8m2(<16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -353,8 +301,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <16 x i8>, <16 x i8>, iXLen) - define <32 x i8> @test_sf_vc_v_vv_se_e8m4(<32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -366,8 +312,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <32 x i8>, <32 x i8>, iXLen) - define <64 x i8> @test_sf_vc_v_vv_se_e8m8(<64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -379,8 +323,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <64 x i8>, <64 x i8>, iXLen) - define <1 x i16> @test_sf_vc_v_vv_se_e16mf4(<1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -392,8 +334,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <1 x i16>, <1 x i16>, iXLen) - define <2 x i16> @test_sf_vc_v_vv_se_e16mf2(<2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -405,8 +345,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <2 x i16>, <2 x i16>, iXLen) - define <4 x i16> @test_sf_vc_v_vv_se_e16m1(<4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -418,8 +356,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <4 x i16>, <4 x i16>, iXLen) - define <8 x i16> @test_sf_vc_v_vv_se_e16m2(<8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -431,8 +367,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <8 x i16>, <8 x i16>, iXLen) - define <16 x i16> @test_sf_vc_v_vv_se_e16m4(<16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -444,8 +378,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <16 x i16>, <16 x i16>, iXLen) - define <32 x i16> @test_sf_vc_v_vv_se_e16m8(<32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -457,8 +389,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <32 x i16>, <32 x i16>, iXLen) - define <1 x i32> @test_sf_vc_v_vv_se_e32mf2(<1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -470,8 +400,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <1 x i32>, <1 x i32>, iXLen) - define <2 x i32> @test_sf_vc_v_vv_se_e32m1(<2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -483,8 +411,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <2 x i32>, <2 x i32>, iXLen) - define <4 x i32> @test_sf_vc_v_vv_se_e32m2(<4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -496,8 +422,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <4 x i32>, <4 x i32>, iXLen) - define <8 x i32> @test_sf_vc_v_vv_se_e32m4(<8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -509,8 +433,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <8 x i32>, <8 x i32>, iXLen) - define <16 x i32> @test_sf_vc_v_vv_se_e32m8(<16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -522,8 +444,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <16 x i32>, <16 x i32>, iXLen) - define <1 x i64> @test_sf_vc_v_vv_se_e64m1(<1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -535,8 +455,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <1 x i64>, <1 x i64>, iXLen) - define <2 x i64> @test_sf_vc_v_vv_se_e64m2(<2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -548,8 +466,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <2 x i64>, <2 x i64>, iXLen) - define <4 x i64> @test_sf_vc_v_vv_se_e64m4(<4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -561,8 +477,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <4 x i64>, <4 x i64>, iXLen) - define <8 x i64> @test_sf_vc_v_vv_se_e64m8(<8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -574,8 +488,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <8 x i64>, <8 x i64>, iXLen) - define <1 x i8> @test_sf_vc_v_vv_e8mf8(<1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -587,8 +499,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.vv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <1 x i8>, <1 x i8>, iXLen) - define <2 x i8> @test_sf_vc_v_vv_e8mf4(<2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -600,8 +510,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.vv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <2 x i8>, <2 x i8>, iXLen) - define <4 x i8> @test_sf_vc_v_vv_e8mf2(<4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -613,8 +521,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.vv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <4 x i8>, <4 x i8>, iXLen) - define <8 x i8> @test_sf_vc_v_vv_e8m1(<8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8m1: ; CHECK: # %bb.0: # %entry @@ -626,8 +532,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.vv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <8 x i8>, <8 x i8>, iXLen) - define <16 x i8> @test_sf_vc_v_vv_e8m2(<16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8m2: ; CHECK: # %bb.0: # %entry @@ -639,8 +543,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.vv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <16 x i8>, <16 x i8>, iXLen) - define <32 x i8> @test_sf_vc_v_vv_e8m4(<32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8m4: ; CHECK: # %bb.0: # %entry @@ -652,8 +554,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.vv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <32 x i8>, <32 x i8>, iXLen) - define <64 x i8> @test_sf_vc_v_vv_e8m8(<64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8m8: ; CHECK: # %bb.0: # %entry @@ -665,8 +565,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.vv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <64 x i8>, <64 x i8>, iXLen) - define <1 x i16> @test_sf_vc_v_vv_e16mf4(<1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -678,8 +576,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.vv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <1 x i16>, <1 x i16>, iXLen) - define <2 x i16> @test_sf_vc_v_vv_e16mf2(<2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -691,8 +587,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.vv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <2 x i16>, <2 x i16>, iXLen) - define <4 x i16> @test_sf_vc_v_vv_e16m1(<4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16m1: ; CHECK: # %bb.0: # %entry @@ -704,8 +598,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.vv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <4 x i16>, <4 x i16>, iXLen) - define <8 x i16> @test_sf_vc_v_vv_e16m2(<8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16m2: ; CHECK: # %bb.0: # %entry @@ -717,8 +609,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.vv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <8 x i16>, <8 x i16>, iXLen) - define <16 x i16> @test_sf_vc_v_vv_e16m4(<16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16m4: ; CHECK: # %bb.0: # %entry @@ -730,8 +620,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.vv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <16 x i16>, <16 x i16>, iXLen) - define <32 x i16> @test_sf_vc_v_vv_e16m8(<32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16m8: ; CHECK: # %bb.0: # %entry @@ -743,8 +631,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.vv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <32 x i16>, <32 x i16>, iXLen) - define <1 x i32> @test_sf_vc_v_vv_e32mf2(<1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -756,8 +642,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.vv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <1 x i32>, <1 x i32>, iXLen) - define <2 x i32> @test_sf_vc_v_vv_e32m1(<2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32m1: ; CHECK: # %bb.0: # %entry @@ -769,8 +653,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.vv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <2 x i32>, <2 x i32>, iXLen) - define <4 x i32> @test_sf_vc_v_vv_e32m2(<4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32m2: ; CHECK: # %bb.0: # %entry @@ -782,8 +664,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.vv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <4 x i32>, <4 x i32>, iXLen) - define <8 x i32> @test_sf_vc_v_vv_e32m4(<8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32m4: ; CHECK: # %bb.0: # %entry @@ -795,8 +675,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.vv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <8 x i32>, <8 x i32>, iXLen) - define <16 x i32> @test_sf_vc_v_vv_e32m8(<16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32m8: ; CHECK: # %bb.0: # %entry @@ -808,8 +686,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.vv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <16 x i32>, <16 x i32>, iXLen) - define <1 x i64> @test_sf_vc_v_vv_e64m1(<1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e64m1: ; CHECK: # %bb.0: # %entry @@ -821,8 +697,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.vv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <1 x i64>, <1 x i64>, iXLen) - define <2 x i64> @test_sf_vc_v_vv_e64m2(<2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e64m2: ; CHECK: # %bb.0: # %entry @@ -834,8 +708,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.vv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <2 x i64>, <2 x i64>, iXLen) - define <4 x i64> @test_sf_vc_v_vv_e64m4(<4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e64m4: ; CHECK: # %bb.0: # %entry @@ -847,8 +719,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.vv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <4 x i64>, <4 x i64>, iXLen) - define <8 x i64> @test_sf_vc_v_vv_e64m8(<8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e64m8: ; CHECK: # %bb.0: # %entry @@ -860,8 +730,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.vv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <8 x i64>, <8 x i64>, iXLen) - define void @test_sf_vc_xv_se_e8mf8(<1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -873,8 +741,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i8.i8.iXLen(iXLen, iXLen, <1 x i8>, i8, iXLen) - define void @test_sf_vc_xv_se_e8mf4(<2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -886,8 +752,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i8.i8.iXLen(iXLen, iXLen, <2 x i8>, i8, iXLen) - define void @test_sf_vc_xv_se_e8mf2(<4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -899,8 +763,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i8.i8.iXLen(iXLen, iXLen, <4 x i8>, i8, iXLen) - define void @test_sf_vc_xv_se_e8m1(<8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -912,8 +774,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i8.i8.iXLen(iXLen, iXLen, <8 x i8>, i8, iXLen) - define void @test_sf_vc_xv_se_e8m2(<16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -925,8 +785,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i8.i8.iXLen(iXLen, iXLen, <16 x i8>, i8, iXLen) - define void @test_sf_vc_xv_se_e8m4(<32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -938,8 +796,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i8.i8.iXLen(iXLen, iXLen, <32 x i8>, i8, iXLen) - define void @test_sf_vc_xv_se_e8m8(<64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -951,8 +807,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv64i8.i8.iXLen(iXLen, iXLen, <64 x i8>, i8, iXLen) - define void @test_sf_vc_xv_se_e16mf4(<1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -964,8 +818,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i16.i16.iXLen(iXLen, iXLen, <1 x i16>, i16, iXLen) - define void @test_sf_vc_xv_se_e16mf2(<2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -977,8 +829,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i16.i16.iXLen(iXLen, iXLen, <2 x i16>, i16, iXLen) - define void @test_sf_vc_xv_se_e16m1(<4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -990,8 +840,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i16.i16.iXLen(iXLen, iXLen, <4 x i16>, i16, iXLen) - define void @test_sf_vc_xv_se_e16m2(<8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1003,8 +851,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i16.i16.iXLen(iXLen, iXLen, <8 x i16>, i16, iXLen) - define void @test_sf_vc_xv_se_e16m4(<16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1016,8 +862,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i16.i16.iXLen(iXLen, iXLen, <16 x i16>, i16, iXLen) - define void @test_sf_vc_xv_se_e16m8(<32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1029,8 +873,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i16.i16.iXLen(iXLen, iXLen, <32 x i16>, i16, iXLen) - define void @test_sf_vc_xv_se_e32mf2(<1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1042,8 +884,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i32.i32.iXLen(iXLen, iXLen, <1 x i32>, i32, iXLen) - define void @test_sf_vc_xv_se_e32m1(<2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1055,8 +895,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i32.i32.iXLen(iXLen, iXLen, <2 x i32>, i32, iXLen) - define void @test_sf_vc_xv_se_e32m2(<4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1068,8 +906,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i32.i32.iXLen(iXLen, iXLen, <4 x i32>, i32, iXLen) - define void @test_sf_vc_xv_se_e32m4(<8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1081,8 +917,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i32.i32.iXLen(iXLen, iXLen, <8 x i32>, i32, iXLen) - define void @test_sf_vc_xv_se_e32m8(<16 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1094,8 +928,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i32.i32.iXLen(iXLen, iXLen, <16 x i32>, i32, iXLen) - define <1 x i8> @test_sf_vc_v_xv_se_e8mf8(<1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1107,8 +939,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv1i8.iXLen.i8.iXLen(iXLen, <1 x i8>, i8, iXLen) - define <2 x i8> @test_sf_vc_v_xv_se_e8mf4(<2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1120,8 +950,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv2i8.iXLen.i8.iXLen(iXLen, <2 x i8>, i8, iXLen) - define <4 x i8> @test_sf_vc_v_xv_se_e8mf2(<4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1133,8 +961,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv4i8.iXLen.i8.iXLen(iXLen, <4 x i8>, i8, iXLen) - define <8 x i8> @test_sf_vc_v_xv_se_e8m1(<8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1146,8 +972,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv8i8.iXLen.i8.iXLen(iXLen, <8 x i8>, i8, iXLen) - define <16 x i8> @test_sf_vc_v_xv_se_e8m2(<16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1159,8 +983,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv16i8.iXLen.i8.iXLen(iXLen, <16 x i8>, i8, iXLen) - define <32 x i8> @test_sf_vc_v_xv_se_e8m4(<32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1172,8 +994,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv32i8.iXLen.i8.iXLen(iXLen, <32 x i8>, i8, iXLen) - define <64 x i8> @test_sf_vc_v_xv_se_e8m8(<64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1185,8 +1005,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv64i8.iXLen.i8.iXLen(iXLen, <64 x i8>, i8, iXLen) - define <1 x i16> @test_sf_vc_v_xv_se_e16mf4(<1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1198,8 +1016,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv1i16.iXLen.i16.iXLen(iXLen, <1 x i16>, i16, iXLen) - define <2 x i16> @test_sf_vc_v_xv_se_e16mf2(<2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1211,8 +1027,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv2i16.iXLen.i16.iXLen(iXLen, <2 x i16>, i16, iXLen) - define <4 x i16> @test_sf_vc_v_xv_se_e16m1(<4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1224,8 +1038,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv4i16.iXLen.i16.iXLen(iXLen, <4 x i16>, i16, iXLen) - define <8 x i16> @test_sf_vc_v_xv_se_e16m2(<8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1237,8 +1049,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv8i16.iXLen.i16.iXLen(iXLen, <8 x i16>, i16, iXLen) - define <16 x i16> @test_sf_vc_v_xv_se_e16m4(<16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1250,8 +1060,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv16i16.iXLen.i16.iXLen(iXLen, <16 x i16>, i16, iXLen) - define <32 x i16> @test_sf_vc_v_xv_se_e16m8(<32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1263,8 +1071,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv32i16.iXLen.i16.iXLen(iXLen, <32 x i16>, i16, iXLen) - define <1 x i32> @test_sf_vc_v_xv_se_e32mf2(<1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1276,8 +1082,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32.iXLen(iXLen, <1 x i32>, i32, iXLen) - define <2 x i32> @test_sf_vc_v_xv_se_e32m1(<2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1289,8 +1093,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32.iXLen(iXLen, <2 x i32>, i32, iXLen) - define <4 x i32> @test_sf_vc_v_xv_se_e32m2(<4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1302,8 +1104,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32.iXLen(iXLen, <4 x i32>, i32, iXLen) - define <8 x i32> @test_sf_vc_v_xv_se_e32m4(<8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1315,8 +1115,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32.iXLen(iXLen, <8 x i32>, i32, iXLen) - define <16 x i32> @test_sf_vc_v_xv_se_e32m8(<16 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1328,8 +1126,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32.iXLen(iXLen, <16 x i32>, i32, iXLen) - define <1 x i8> @test_sf_vc_v_xv_e8mf8(<1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1341,8 +1137,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.xv.nxv1i8.iXLen.i8.iXLen(iXLen, <1 x i8>, i8, iXLen) - define <2 x i8> @test_sf_vc_v_xv_e8mf4(<2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1148,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.xv.nxv2i8.iXLen.i8.iXLen(iXLen, <2 x i8>, i8, iXLen) - define <4 x i8> @test_sf_vc_v_xv_e8mf2(<4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1367,8 +1159,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.xv.nxv4i8.iXLen.i8.iXLen(iXLen, <4 x i8>, i8, iXLen) - define <8 x i8> @test_sf_vc_v_xv_e8m1(<8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8m1: ; CHECK: # %bb.0: # %entry @@ -1380,8 +1170,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.xv.nxv8i8.iXLen.i8.iXLen(iXLen, <8 x i8>, i8, iXLen) - define <16 x i8> @test_sf_vc_v_xv_e8m2(<16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8m2: ; CHECK: # %bb.0: # %entry @@ -1393,8 +1181,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.xv.nxv16i8.iXLen.i8.iXLen(iXLen, <16 x i8>, i8, iXLen) - define <32 x i8> @test_sf_vc_v_xv_e8m4(<32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8m4: ; CHECK: # %bb.0: # %entry @@ -1406,8 +1192,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.xv.nxv32i8.iXLen.i8.iXLen(iXLen, <32 x i8>, i8, iXLen) - define <64 x i8> @test_sf_vc_v_xv_e8m8(<64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8m8: ; CHECK: # %bb.0: # %entry @@ -1419,8 +1203,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.xv.nxv64i8.iXLen.i8.iXLen(iXLen, <64 x i8>, i8, iXLen) - define <1 x i16> @test_sf_vc_v_xv_e16mf4(<1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1432,8 +1214,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.xv.nxv1i16.iXLen.i16.iXLen(iXLen, <1 x i16>, i16, iXLen) - define <2 x i16> @test_sf_vc_v_xv_e16mf2(<2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1445,8 +1225,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.xv.nxv2i16.iXLen.i16.iXLen(iXLen, <2 x i16>, i16, iXLen) - define <4 x i16> @test_sf_vc_v_xv_e16m1(<4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16m1: ; CHECK: # %bb.0: # %entry @@ -1458,8 +1236,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.xv.nxv4i16.iXLen.i16.iXLen(iXLen, <4 x i16>, i16, iXLen) - define <8 x i16> @test_sf_vc_v_xv_e16m2(<8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16m2: ; CHECK: # %bb.0: # %entry @@ -1471,8 +1247,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.xv.nxv8i16.iXLen.i16.iXLen(iXLen, <8 x i16>, i16, iXLen) - define <16 x i16> @test_sf_vc_v_xv_e16m4(<16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16m4: ; CHECK: # %bb.0: # %entry @@ -1484,8 +1258,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.xv.nxv16i16.iXLen.i16.iXLen(iXLen, <16 x i16>, i16, iXLen) - define <32 x i16> @test_sf_vc_v_xv_e16m8(<32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16m8: ; CHECK: # %bb.0: # %entry @@ -1497,8 +1269,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.xv.nxv32i16.iXLen.i16.iXLen(iXLen, <32 x i16>, i16, iXLen) - define <1 x i32> @test_sf_vc_v_xv_e32mf2(<1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1510,8 +1280,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32.iXLen(iXLen, <1 x i32>, i32, iXLen) - define <2 x i32> @test_sf_vc_v_xv_e32m1(<2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32m1: ; CHECK: # %bb.0: # %entry @@ -1523,8 +1291,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32.iXLen(iXLen, <2 x i32>, i32, iXLen) - define <4 x i32> @test_sf_vc_v_xv_e32m2(<4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32m2: ; CHECK: # %bb.0: # %entry @@ -1536,8 +1302,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32.iXLen(iXLen, <4 x i32>, i32, iXLen) - define <8 x i32> @test_sf_vc_v_xv_e32m4(<8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32m4: ; CHECK: # %bb.0: # %entry @@ -1549,8 +1313,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32.iXLen(iXLen, <8 x i32>, i32, iXLen) - define <16 x i32> @test_sf_vc_v_xv_e32m8(<16 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32m8: ; CHECK: # %bb.0: # %entry @@ -1562,8 +1324,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32.iXLen(iXLen, <16 x i32>, i32, iXLen) - define void @test_sf_vc_iv_se_e8mf8(<1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1575,8 +1335,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, iXLen, <1 x i8>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e8mf4(<2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1588,8 +1346,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, iXLen, <2 x i8>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e8mf2(<4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1601,8 +1357,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, iXLen, <4 x i8>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e8m1(<8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1614,8 +1368,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, iXLen, <8 x i8>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e8m2(<16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1627,8 +1379,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, iXLen, <16 x i8>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e8m4(<32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1640,8 +1390,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, iXLen, <32 x i8>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e8m8(<64 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1653,8 +1401,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, iXLen, <64 x i8>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e16mf4(<1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1412,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, iXLen, <1 x i16>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e16mf2(<2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1679,8 +1423,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, iXLen, <2 x i16>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e16m1(<4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1692,8 +1434,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, iXLen, <4 x i16>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e16m2(<8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1705,8 +1445,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, iXLen, <8 x i16>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e16m4(<16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1718,8 +1456,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, iXLen, <16 x i16>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e16m8(<32 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1731,8 +1467,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, iXLen, <32 x i16>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e32mf2(<1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1744,8 +1478,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, iXLen, <1 x i32>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e32m1(<2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1757,8 +1489,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, iXLen, <2 x i32>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e32m2(<4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1770,8 +1500,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, iXLen, <4 x i32>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e32m4(<8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1783,8 +1511,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, iXLen, <8 x i32>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e32m8(<16 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1796,8 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, iXLen, <16 x i32>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e64m1(<1 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1809,8 +1533,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, iXLen, <1 x i64>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e64m2(<2 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1822,8 +1544,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, iXLen, <2 x i64>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e64m4(<4 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1835,8 +1555,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, iXLen, <4 x i64>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e64m8(<8 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1848,8 +1566,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, iXLen, <8 x i64>, iXLen, iXLen) - define <1 x i8> @test_sf_vc_v_iv_se_e8mf8(<1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1861,8 +1577,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, <1 x i8>, iXLen, iXLen) - define <2 x i8> @test_sf_vc_v_iv_se_e8mf4(<2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1874,8 +1588,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, <2 x i8>, iXLen, iXLen) - define <4 x i8> @test_sf_vc_v_iv_se_e8mf2(<4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1887,8 +1599,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, <4 x i8>, iXLen, iXLen) - define <8 x i8> @test_sf_vc_v_iv_se_e8m1(<8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1900,8 +1610,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, <8 x i8>, iXLen, iXLen) - define <16 x i8> @test_sf_vc_v_iv_se_e8m2(<16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1913,8 +1621,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, <16 x i8>, iXLen, iXLen) - define <32 x i8> @test_sf_vc_v_iv_se_e8m4(<32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1926,8 +1632,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, <32 x i8>, iXLen, iXLen) - define <64 x i8> @test_sf_vc_v_iv_se_e8m8(<64 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1939,8 +1643,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, <64 x i8>, iXLen, iXLen) - define <1 x i16> @test_sf_vc_v_iv_se_e16mf4(<1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1952,8 +1654,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, <1 x i16>, iXLen, iXLen) - define <2 x i16> @test_sf_vc_v_iv_se_e16mf2(<2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1965,8 +1665,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, <2 x i16>, iXLen, iXLen) - define <4 x i16> @test_sf_vc_v_iv_se_e16m1(<4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1676,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, <4 x i16>, iXLen, iXLen) - define <8 x i16> @test_sf_vc_v_iv_se_e16m2(<8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1991,8 +1687,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, <8 x i16>, iXLen, iXLen) - define <16 x i16> @test_sf_vc_v_iv_se_e16m4(<16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2004,8 +1698,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, <16 x i16>, iXLen, iXLen) - define <32 x i16> @test_sf_vc_v_iv_se_e16m8(<32 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2017,8 +1709,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, <32 x i16>, iXLen, iXLen) - define <1 x i32> @test_sf_vc_v_iv_se_e32mf2(<1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2030,8 +1720,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, <1 x i32>, iXLen, iXLen) - define <2 x i32> @test_sf_vc_v_iv_se_e32m1(<2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2043,8 +1731,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, <2 x i32>, iXLen, iXLen) - define <4 x i32> @test_sf_vc_v_iv_se_e32m2(<4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2056,8 +1742,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, <4 x i32>, iXLen, iXLen) - define <8 x i32> @test_sf_vc_v_iv_se_e32m4(<8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2069,8 +1753,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, <8 x i32>, iXLen, iXLen) - define <16 x i32> @test_sf_vc_v_iv_se_e32m8(<16 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2082,8 +1764,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, <16 x i32>, iXLen, iXLen) - define <1 x i64> @test_sf_vc_v_iv_se_e64m1(<1 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2095,8 +1775,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, <1 x i64>, iXLen, iXLen) - define <2 x i64> @test_sf_vc_v_iv_se_e64m2(<2 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2108,8 +1786,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, <2 x i64>, iXLen, iXLen) - define <4 x i64> @test_sf_vc_v_iv_se_e64m4(<4 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2121,8 +1797,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, <4 x i64>, iXLen, iXLen) - define <8 x i64> @test_sf_vc_v_iv_se_e64m8(<8 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2134,8 +1808,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, <8 x i64>, iXLen, iXLen) - define <1 x i8> @test_sf_vc_v_iv_e8mf8(<1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -2147,8 +1819,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.iv.nxv1i8.iXLen.iXLen.iXLen(iXLen, <1 x i8>, iXLen, iXLen) - define <2 x i8> @test_sf_vc_v_iv_e8mf4(<2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -2160,8 +1830,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.iv.nxv2i8.iXLen.iXLen.iXLen(iXLen, <2 x i8>, iXLen, iXLen) - define <4 x i8> @test_sf_vc_v_iv_e8mf2(<4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -2173,8 +1841,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.iv.nxv4i8.iXLen.iXLen.iXLen(iXLen, <4 x i8>, iXLen, iXLen) - define <8 x i8> @test_sf_vc_v_iv_e8m1(<8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8m1: ; CHECK: # %bb.0: # %entry @@ -2186,8 +1852,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.iv.nxv8i8.iXLen.iXLen.iXLen(iXLen, <8 x i8>, iXLen, iXLen) - define <16 x i8> @test_sf_vc_v_iv_e8m2(<16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8m2: ; CHECK: # %bb.0: # %entry @@ -2199,8 +1863,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.iv.nxv16i8.iXLen.iXLen.iXLen(iXLen, <16 x i8>, iXLen, iXLen) - define <32 x i8> @test_sf_vc_v_iv_e8m4(<32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8m4: ; CHECK: # %bb.0: # %entry @@ -2212,8 +1874,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.iv.nxv32i8.iXLen.iXLen.iXLen(iXLen, <32 x i8>, iXLen, iXLen) - define <64 x i8> @test_sf_vc_v_iv_e8m8(<64 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8m8: ; CHECK: # %bb.0: # %entry @@ -2225,8 +1885,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.iv.nxv64i8.iXLen.iXLen.iXLen(iXLen, <64 x i8>, iXLen, iXLen) - define <1 x i16> @test_sf_vc_v_iv_e16mf4(<1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2238,8 +1896,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.iv.nxv1i16.iXLen.iXLen.iXLen(iXLen, <1 x i16>, iXLen, iXLen) - define <2 x i16> @test_sf_vc_v_iv_e16mf2(<2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2251,8 +1907,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.iv.nxv2i16.iXLen.iXLen.iXLen(iXLen, <2 x i16>, iXLen, iXLen) - define <4 x i16> @test_sf_vc_v_iv_e16m1(<4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16m1: ; CHECK: # %bb.0: # %entry @@ -2264,8 +1918,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.iv.nxv4i16.iXLen.iXLen.iXLen(iXLen, <4 x i16>, iXLen, iXLen) - define <8 x i16> @test_sf_vc_v_iv_e16m2(<8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16m2: ; CHECK: # %bb.0: # %entry @@ -2277,8 +1929,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.iv.nxv8i16.iXLen.iXLen.iXLen(iXLen, <8 x i16>, iXLen, iXLen) - define <16 x i16> @test_sf_vc_v_iv_e16m4(<16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16m4: ; CHECK: # %bb.0: # %entry @@ -2290,8 +1940,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.iv.nxv16i16.iXLen.iXLen.iXLen(iXLen, <16 x i16>, iXLen, iXLen) - define <32 x i16> @test_sf_vc_v_iv_e16m8(<32 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16m8: ; CHECK: # %bb.0: # %entry @@ -2303,8 +1951,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.iv.nxv32i16.iXLen.iXLen.iXLen(iXLen, <32 x i16>, iXLen, iXLen) - define <1 x i32> @test_sf_vc_v_iv_e32mf2(<1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2316,8 +1962,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.iv.nxv1i32.iXLen.iXLen.iXLen(iXLen, <1 x i32>, iXLen, iXLen) - define <2 x i32> @test_sf_vc_v_iv_e32m1(<2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32m1: ; CHECK: # %bb.0: # %entry @@ -2329,8 +1973,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.iv.nxv2i32.iXLen.iXLen.iXLen(iXLen, <2 x i32>, iXLen, iXLen) - define <4 x i32> @test_sf_vc_v_iv_e32m2(<4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32m2: ; CHECK: # %bb.0: # %entry @@ -2342,8 +1984,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.iv.nxv4i32.iXLen.iXLen.iXLen(iXLen, <4 x i32>, iXLen, iXLen) - define <8 x i32> @test_sf_vc_v_iv_e32m4(<8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32m4: ; CHECK: # %bb.0: # %entry @@ -2355,8 +1995,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.iv.nxv8i32.iXLen.iXLen.iXLen(iXLen, <8 x i32>, iXLen, iXLen) - define <16 x i32> @test_sf_vc_v_iv_e32m8(<16 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32m8: ; CHECK: # %bb.0: # %entry @@ -2368,8 +2006,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.iv.nxv16i32.iXLen.iXLen.iXLen(iXLen, <16 x i32>, iXLen, iXLen) - define <1 x i64> @test_sf_vc_v_iv_e64m1(<1 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e64m1: ; CHECK: # %bb.0: # %entry @@ -2381,8 +2017,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.iv.nxv1i64.iXLen.iXLen.iXLen(iXLen, <1 x i64>, iXLen, iXLen) - define <2 x i64> @test_sf_vc_v_iv_e64m2(<2 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e64m2: ; CHECK: # %bb.0: # %entry @@ -2394,8 +2028,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.iv.nxv2i64.iXLen.iXLen.iXLen(iXLen, <2 x i64>, iXLen, iXLen) - define <4 x i64> @test_sf_vc_v_iv_e64m4(<4 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e64m4: ; CHECK: # %bb.0: # %entry @@ -2407,8 +2039,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.iv.nxv4i64.iXLen.iXLen.iXLen(iXLen, <4 x i64>, iXLen, iXLen) - define <8 x i64> @test_sf_vc_v_iv_e64m8(<8 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e64m8: ; CHECK: # %bb.0: # %entry @@ -2420,8 +2050,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.iXLen.iXLen.iXLen(iXLen, <8 x i64>, iXLen, iXLen) - define void @test_sf_vc_fvv_se_e16mf4(<1 x half> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2433,8 +2061,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f16.nxv1i16.iXLen(iXLen, iXLen, <1 x half>, <1 x i16>, iXLen) - define <1 x half> @test_sf_vc_v_fvv_se_e16mf4(<1 x half> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2446,8 +2072,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.vv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, <1 x half>, <1 x i16>, iXLen) - define void @test_sf_vc_fvv_se_e16mf2(<2 x half> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2459,8 +2083,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f16.nxv2i16.iXLen(iXLen, iXLen, <2 x half>, <2 x i16>, iXLen) - define <2 x half> @test_sf_vc_v_fvv_se_e16mf2(<2 x half> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2472,8 +2094,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.vv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, <2 x half>, <2 x i16>, iXLen) - define void @test_sf_vc_fvv_se_e16m1(<4 x half> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2485,8 +2105,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f16.nxv4i16.iXLen(iXLen, iXLen, <4 x half>, <4 x i16>, iXLen) - define <4 x half> @test_sf_vc_v_fvv_se_e16m1(<4 x half> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2498,8 +2116,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.vv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, <4 x half>, <4 x i16>, iXLen) - define void @test_sf_vc_fvv_se_e16m2(<8 x half> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2511,8 +2127,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f16.nxv8i16.iXLen(iXLen, iXLen, <8 x half>, <8 x i16>, iXLen) - define <8 x half> @test_sf_vc_v_fvv_se_e16m2(<8 x half> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2524,8 +2138,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.vv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, <8 x half>, <8 x i16>, iXLen) - define void @test_sf_vc_fvv_se_e16m4(<16 x half> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2537,8 +2149,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f16.nxv16i16.iXLen(iXLen, iXLen, <16 x half>, <16 x i16>, iXLen) - define <16 x half> @test_sf_vc_v_fvv_se_e16m4(<16 x half> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2550,8 +2160,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.vv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, <16 x half>, <16 x i16>, iXLen) - define void @test_sf_vc_fvv_se_e16m8(<32 x half> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2563,8 +2171,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32f16.nxv32i16.iXLen(iXLen, iXLen, <32 x half>, <32 x i16>, iXLen) - define <32 x half> @test_sf_vc_v_fvv_se_e16m8(<32 x half> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2576,8 +2182,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.vv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, <32 x half>, <32 x i16>, iXLen) - define void @test_sf_vc_fvv_se_e32mf2(<1 x float> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2589,8 +2193,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f32.nxv1i32.iXLen(iXLen, iXLen, <1 x float>, <1 x i32>, iXLen) - define <1 x float> @test_sf_vc_v_fvv_se_e32mf2(<1 x float> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2602,8 +2204,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.vv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, <1 x float>, <1 x i32>, iXLen) - define void @test_sf_vc_fvv_se_e32m1(<2 x float> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2615,8 +2215,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f32.nxv2i32.iXLen(iXLen, iXLen, <2 x float>, <2 x i32>, iXLen) - define <2 x float> @test_sf_vc_v_fvv_se_e32m1(<2 x float> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2628,8 +2226,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.vv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, <2 x float>, <2 x i32>, iXLen) - define void @test_sf_vc_fvv_se_e32m2(<4 x float> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2641,8 +2237,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f32.nxv4i32.iXLen(iXLen, iXLen, <4 x float>, <4 x i32>, iXLen) - define <4 x float> @test_sf_vc_v_fvv_se_e32m2(<4 x float> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2654,8 +2248,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.vv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, <4 x float>, <4 x i32>, iXLen) - define void @test_sf_vc_fvv_se_e32m4(<8 x float> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2667,8 +2259,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f32.nxv8i32.iXLen(iXLen, iXLen, <8 x float>, <8 x i32>, iXLen) - define <8 x float> @test_sf_vc_v_fvv_se_e32m4(<8 x float> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2680,8 +2270,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.vv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, <8 x float>, <8 x i32>, iXLen) - define void @test_sf_vc_fvv_se_e32m8(<16 x float> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2693,8 +2281,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f32.nxv16i32.iXLen(iXLen, iXLen, <16 x float>, <16 x i32>, iXLen) - define <16 x float> @test_sf_vc_v_fvv_se_e32m8(<16 x float> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2706,8 +2292,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.vv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, <16 x float>, <16 x i32>, iXLen) - define void @test_sf_vc_fvv_se_e64m1(<1 x double> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2719,8 +2303,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f64.nxv1i64.iXLen(iXLen, iXLen, <1 x double>, <1 x i64>, iXLen) - define <1 x double> @test_sf_vc_v_fvv_se_e64m1(<1 x double> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2732,8 +2314,6 @@ entry: ret <1 x double> %0 } -declare <1 x double> @llvm.riscv.sf.vc.v.vv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, <1 x double>, <1 x i64>, iXLen) - define void @test_sf_vc_fvv_se_e64m2(<2 x double> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2745,8 +2325,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f64.nxv2i64.iXLen(iXLen, iXLen, <2 x double>, <2 x i64>, iXLen) - define <2 x double> @test_sf_vc_v_fvv_se_e64m2(<2 x double> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2758,8 +2336,6 @@ entry: ret <2 x double> %0 } -declare <2 x double> @llvm.riscv.sf.vc.v.vv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, <2 x double>, <2 x i64>, iXLen) - define void @test_sf_vc_fvv_se_e64m4(<4 x double> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2771,8 +2347,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f64.nxv4i64.iXLen(iXLen, iXLen, <4 x double>, <4 x i64>, iXLen) - define <4 x double> @test_sf_vc_v_fvv_se_e64m4(<4 x double> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2784,8 +2358,6 @@ entry: ret <4 x double> %0 } -declare <4 x double> @llvm.riscv.sf.vc.v.vv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, <4 x double>, <4 x i64>, iXLen) - define void @test_sf_vc_fvv_se_e64m8(<8 x double> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2797,8 +2369,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f64.nxv8i64.iXLen(iXLen, iXLen, <8 x double>, <8 x i64>, iXLen) - define <8 x double> @test_sf_vc_v_fvv_se_e64m8(<8 x double> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2810,8 +2380,6 @@ entry: ret <8 x double> %0 } -declare <8 x double> @llvm.riscv.sf.vc.v.vv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, <8 x double>, <8 x i64>, iXLen) - define void @test_sf_vc_fvx_se_e16mf4(<1 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2823,8 +2391,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f16.nxv1f16.i16.iXLen(iXLen, iXLen, <1 x half>, i16, iXLen) - define <1 x half> @test_sf_vc_v_fvx_se_e16mf4(<1 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2836,8 +2402,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.xv.se.nxv1f16.nxv1f16.i16.iXLen(iXLen, <1 x half>, i16, iXLen) - define void @test_sf_vc_fvx_se_e16mf2(<2 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2849,8 +2413,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f16.nxv2f16.i16.iXLen(iXLen, iXLen, <2 x half>, i16, iXLen) - define <2 x half> @test_sf_vc_v_fvx_se_e16mf2(<2 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2862,8 +2424,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.xv.se.nxv2f16.nxv2f16.i16.iXLen(iXLen, <2 x half>, i16, iXLen) - define void @test_sf_vc_fvx_se_e16m1(<4 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2875,8 +2435,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f16.nxv4f16.i16.iXLen(iXLen, iXLen, <4 x half>, i16, iXLen) - define <4 x half> @test_sf_vc_v_fvx_se_e16m1(<4 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2888,8 +2446,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.xv.se.nxv4f16.nxv4f16.i16.iXLen(iXLen, <4 x half>, i16, iXLen) - define void @test_sf_vc_fvx_se_e16m2(<8 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2901,8 +2457,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f16.nxv8f16.i16.iXLen(iXLen, iXLen, <8 x half>, i16, iXLen) - define <8 x half> @test_sf_vc_v_fvx_se_e16m2(<8 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2914,8 +2468,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.xv.se.nxv8f16.nxv8f16.i16.iXLen(iXLen, <8 x half>, i16, iXLen) - define void @test_sf_vc_fvx_se_e16m4(<16 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2927,8 +2479,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f16.nxv16f16.i16.iXLen(iXLen, iXLen, <16 x half>, i16, iXLen) - define <16 x half> @test_sf_vc_v_fvx_se_e16m4(<16 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2940,8 +2490,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.xv.se.nxv16f16.nxv16f16.i16.iXLen(iXLen, <16 x half>, i16, iXLen) - define void @test_sf_vc_fvx_se_e16m8(<32 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2953,8 +2501,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32f16.nxv32f16.i16.iXLen(iXLen, iXLen, <32 x half>, i16, iXLen) - define <32 x half> @test_sf_vc_v_fvx_se_e16m8(<32 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2966,8 +2512,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.xv.se.nxv32f16.nxv32f16.i16.iXLen(iXLen, <32 x half>, i16, iXLen) - define void @test_sf_vc_fvx_se_e32mf2(<1 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2979,8 +2523,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f32.nxv1f32.i32.iXLen(iXLen, iXLen, <1 x float>, i32, iXLen) - define <1 x float> @test_sf_vc_v_fvx_se_e32mf2(<1 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2992,8 +2534,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.xv.se.nxv1f32.nxv1f32.i32.iXLen(iXLen, <1 x float>, i32, iXLen) - define void @test_sf_vc_fvx_se_e32m1(<2 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3005,8 +2545,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f32.nxv2f32.i32.iXLen(iXLen, iXLen, <2 x float>, i32, iXLen) - define <2 x float> @test_sf_vc_v_fvx_se_e32m1(<2 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3018,8 +2556,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.xv.se.nxv2f32.nxv2f32.i32.iXLen(iXLen, <2 x float>, i32, iXLen) - define void @test_sf_vc_fvx_se_e32m2(<4 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3031,8 +2567,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f32.nxv4f32.i32.iXLen(iXLen, iXLen, <4 x float>, i32, iXLen) - define <4 x float> @test_sf_vc_v_fvx_se_e32m2(<4 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3044,8 +2578,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.nxv4f32.i32.iXLen(iXLen, <4 x float>, i32, iXLen) - define void @test_sf_vc_fvx_se_e32m4(<8 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3057,8 +2589,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f32.nxv8f32.i32.iXLen(iXLen, iXLen, <8 x float>, i32, iXLen) - define <8 x float> @test_sf_vc_v_fvx_se_e32m4(<8 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3070,8 +2600,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.xv.se.nxv8f32.nxv8f32.i32.iXLen(iXLen, <8 x float>, i32, iXLen) - define void @test_sf_vc_fvx_se_e32m8(<16 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3083,8 +2611,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f32.nxv16f32.i32.iXLen(iXLen, iXLen, <16 x float>, i32, iXLen) - define <16 x float> @test_sf_vc_v_fvx_se_e32m8(<16 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3096,8 +2622,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.xv.se.nxv16f32.nxv16f32.i32.iXLen(iXLen, <16 x float>, i32, iXLen) - define void @test_sf_vc_fvi_se_e16mf4(<1 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3109,8 +2633,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f16.nxv1f16.iXLen.iXLen(iXLen, iXLen, <1 x half>, iXLen, iXLen) - define <1 x half> @test_sf_vc_v_fvi_se_e16mf4(<1 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3122,8 +2644,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.iv.se.nxv1f16.nxv1f16.iXLen.iXLen(iXLen, <1 x half>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16mf2(<2 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3135,8 +2655,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f16.nxv2f16.iXLen.iXLen(iXLen, iXLen, <2 x half>, iXLen, iXLen) - define <2 x half> @test_sf_vc_v_fvi_se_e16mf2(<2 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3148,8 +2666,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.iv.se.nxv2f16.nxv2f16.iXLen.iXLen(iXLen, <2 x half>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16m1(<4 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3161,8 +2677,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f16.nxv4f16.iXLen.iXLen(iXLen, iXLen, <4 x half>, iXLen, iXLen) - define <4 x half> @test_sf_vc_v_fvi_se_e16m1(<4 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3174,8 +2688,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.iv.se.nxv4f16.nxv4f16.iXLen.iXLen(iXLen, <4 x half>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16m2(<8 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3187,8 +2699,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f16.nxv8f16.iXLen.iXLen(iXLen, iXLen, <8 x half>, iXLen, iXLen) - define <8 x half> @test_sf_vc_v_fvi_se_e16m2(<8 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3200,8 +2710,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.iv.se.nxv8f16.nxv8f16.iXLen.iXLen(iXLen, <8 x half>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16m4(<16 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3213,8 +2721,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f16.nxv16f16.iXLen.iXLen(iXLen, iXLen, <16 x half>, iXLen, iXLen) - define <16 x half> @test_sf_vc_v_fvi_se_e16m4(<16 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3226,8 +2732,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.iv.se.nxv16f16.nxv16f16.iXLen.iXLen(iXLen, <16 x half>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16m8(<32 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3239,8 +2743,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32f16.nxv32f16.iXLen.iXLen(iXLen, iXLen, <32 x half>, iXLen, iXLen) - define <32 x half> @test_sf_vc_v_fvi_se_e16m8(<32 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3252,8 +2754,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.iv.se.nxv32f16.nxv32f16.iXLen.iXLen(iXLen, <32 x half>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32mf2(<1 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3265,8 +2765,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f32.nxv1f32.iXLen.iXLen(iXLen, iXLen, <1 x float>, iXLen, iXLen) - define <1 x float> @test_sf_vc_v_fvi_se_e32mf2(<1 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3278,8 +2776,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.iv.se.nxv1f32.nxv1f32.iXLen.iXLen(iXLen, <1 x float>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32m1(<2 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3291,8 +2787,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f32.nxv2f32.iXLen.iXLen(iXLen, iXLen, <2 x float>, iXLen, iXLen) - define <2 x float> @test_sf_vc_v_fvi_se_e32m1(<2 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3304,8 +2798,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.iv.se.nxv2f32.nxv2f32.iXLen.iXLen(iXLen, <2 x float>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32m2(<4 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3317,8 +2809,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f32.nxv4f32.iXLen.iXLen(iXLen, iXLen, <4 x float>, iXLen, iXLen) - define <4 x float> @test_sf_vc_v_fvi_se_e32m2(<4 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3330,8 +2820,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.iv.se.nxv4f32.nxv4f32.iXLen.iXLen(iXLen, <4 x float>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32m4(<8 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3343,8 +2831,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f32.nxv8f32.iXLen.iXLen(iXLen, iXLen, <8 x float>, iXLen, iXLen) - define <8 x float> @test_sf_vc_v_fvi_se_e32m4(<8 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3356,8 +2842,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.iv.se.nxv8f32.nxv8f32.iXLen.iXLen(iXLen, <8 x float>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32m8(<16 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3369,8 +2853,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f32.nxv16f32.iXLen.iXLen(iXLen, iXLen, <16 x float>, iXLen, iXLen) - define <16 x float> @test_sf_vc_v_fvi_se_e32m8(<16 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3382,8 +2864,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.iv.se.nxv16f32.nxv16f32.iXLen.iXLen(iXLen, <16 x float>, iXLen, iXLen) - define void @test_sf_vc_fvf_se_e16mf4(<1 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3395,8 +2875,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f16.nxv1f16.f16.iXLen(iXLen, iXLen, <1 x half>, half, iXLen) - define <1 x half> @test_sf_vc_v_fvf_se_e16mf4(<1 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3408,8 +2886,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.fv.se.nxv1f16.nxv1f16.iXLen.f16(iXLen, <1 x half>, half, iXLen) - define void @test_sf_vc_fvf_se_e16mf2(<2 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3421,8 +2897,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f16.nxv2f16.f16.iXLen(iXLen, iXLen, <2 x half>, half, iXLen) - define <2 x half> @test_sf_vc_v_fvf_se_e16mf2(<2 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3434,8 +2908,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.fv.se.nxv2f16.nxv2f16.iXLen.f16(iXLen, <2 x half>, half, iXLen) - define void @test_sf_vc_fvf_se_e16m1(<4 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3447,8 +2919,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f16.nxv4f16.f16.iXLen(iXLen, iXLen, <4 x half>, half, iXLen) - define <4 x half> @test_sf_vc_v_fvf_se_e16m1(<4 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3460,8 +2930,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.fv.se.nxv4f16.nxv4f16.iXLen.f16(iXLen, <4 x half>, half, iXLen) - define void @test_sf_vc_fvf_se_e16m2(<8 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3473,8 +2941,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f16.nxv8f16.f16.iXLen(iXLen, iXLen, <8 x half>, half, iXLen) - define <8 x half> @test_sf_vc_v_fvf_se_e16m2(<8 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3486,8 +2952,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.fv.se.nxv8f16.nxv8f16.iXLen.f16(iXLen, <8 x half>, half, iXLen) - define void @test_sf_vc_fvf_se_e16m4(<16 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3499,8 +2963,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f16.nxv16f16.f16.iXLen(iXLen, iXLen, <16 x half>, half, iXLen) - define <16 x half> @test_sf_vc_v_fvf_se_e16m4(<16 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3512,8 +2974,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.fv.se.nxv16f16.nxv16f16.iXLen.f16(iXLen, <16 x half>, half, iXLen) - define void @test_sf_vc_fvf_se_e16m8(<32 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3525,8 +2985,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32f16.nxv32f16.f16.iXLen(iXLen, iXLen, <32 x half>, half, iXLen) - define <32 x half> @test_sf_vc_v_fvf_se_e16m8(<32 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3538,8 +2996,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.fv.se.nxv32f16.nxv32f16.iXLen.f16(iXLen, <32 x half>, half, iXLen) - define void @test_sf_vc_fvf_se_e32mf2(<1 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3551,8 +3007,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f32.nxv1f32.f32.iXLen(iXLen, iXLen, <1 x float>, float, iXLen) - define <1 x float> @test_sf_vc_v_fvf_se_e32mf2(<1 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3564,8 +3018,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.fv.se.nxv1f32.nxv1f32.iXLen.f32(iXLen, <1 x float>, float, iXLen) - define void @test_sf_vc_fvf_se_e32m1(<2 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3577,8 +3029,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f32.nxv2f32.f32.iXLen(iXLen, iXLen, <2 x float>, float, iXLen) - define <2 x float> @test_sf_vc_v_fvf_se_e32m1(<2 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3590,8 +3040,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.fv.se.nxv2f32.nxv2f32.iXLen.f32(iXLen, <2 x float>, float, iXLen) - define void @test_sf_vc_fvf_se_e32m2(<4 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3603,8 +3051,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f32.nxv4f32.f32.iXLen(iXLen, iXLen, <4 x float>, float, iXLen) - define <4 x float> @test_sf_vc_v_fvf_se_e32m2(<4 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3616,8 +3062,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.fv.se.nxv4f32.nxv4f32.iXLen.f32(iXLen, <4 x float>, float, iXLen) - define void @test_sf_vc_fvf_se_e32m4(<8 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3629,8 +3073,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f32.nxv8f32.f32.iXLen(iXLen, iXLen, <8 x float>, float, iXLen) - define <8 x float> @test_sf_vc_v_fvf_se_e32m4(<8 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3642,8 +3084,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.fv.se.nxv8f32.nxv8f32.iXLen.f32(iXLen, <8 x float>, float, iXLen) - define void @test_sf_vc_fvf_se_e32m8(<16 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3655,8 +3095,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f32.nxv16f32.f32.iXLen(iXLen, iXLen, <16 x float>, float, iXLen) - define <16 x float> @test_sf_vc_v_fvf_se_e32m8(<16 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3668,4 +3106,3 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.fv.se.nxv16f32.nxv16f32.iXLen.f32(iXLen, <16 x float>, float, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll index e44ff31406f4a..29b101eb754c4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll @@ -15,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <1 x i8>, <1 x i8>, <1 x i8>, iXLen) - define void @test_sf_vc_vvv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <2 x i8>, <2 x i8>, <2 x i8>, iXLen) - define void @test_sf_vc_vvv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -41,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <4 x i8>, <4 x i8>, <4 x i8>, iXLen) - define void @test_sf_vc_vvv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -54,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <8 x i8>, <8 x i8>, <8 x i8>, iXLen) - define void @test_sf_vc_vvv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -67,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <16 x i8>, <16 x i8>, <16 x i8>, iXLen) - define void @test_sf_vc_vvv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -80,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <32 x i8>, <32 x i8>, <32 x i8>, iXLen) - define void @test_sf_vc_vvv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -93,8 +81,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, <64 x i8>, <64 x i8>, <64 x i8>, iXLen) - define void @test_sf_vc_vvv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -106,8 +92,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <1 x i16>, <1 x i16>, <1 x i16>, iXLen) - define void @test_sf_vc_vvv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -119,8 +103,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <2 x i16>, <2 x i16>, <2 x i16>, iXLen) - define void @test_sf_vc_vvv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -132,8 +114,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <4 x i16>, <4 x i16>, <4 x i16>, iXLen) - define void @test_sf_vc_vvv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -145,8 +125,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <8 x i16>, <8 x i16>, <8 x i16>, iXLen) - define void @test_sf_vc_vvv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -158,8 +136,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <16 x i16>, <16 x i16>, <16 x i16>, iXLen) - define void @test_sf_vc_vvv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -171,8 +147,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, <32 x i16>, <32 x i16>, <32 x i16>, iXLen) - define void @test_sf_vc_vvv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -184,8 +158,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <1 x i32>, <1 x i32>, <1 x i32>, iXLen) - define void @test_sf_vc_vvv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -197,8 +169,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <2 x i32>, <2 x i32>, <2 x i32>, iXLen) - define void @test_sf_vc_vvv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -210,8 +180,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <4 x i32>, <4 x i32>, <4 x i32>, iXLen) - define void @test_sf_vc_vvv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -223,8 +191,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <8 x i32>, <8 x i32>, <8 x i32>, iXLen) - define void @test_sf_vc_vvv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -236,8 +202,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, <16 x i32>, <16 x i32>, <16 x i32>, iXLen) - define void @test_sf_vc_vvv_se_e64m1(<1 x i64> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -249,8 +213,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, <1 x i64>, <1 x i64>, <1 x i64>, iXLen) - define void @test_sf_vc_vvv_se_e64m2(<2 x i64> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -262,8 +224,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, <2 x i64>, <2 x i64>, <2 x i64>, iXLen) - define void @test_sf_vc_vvv_se_e64m4(<4 x i64> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -275,8 +235,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, <4 x i64>, <4 x i64>, <4 x i64>, iXLen) - define void @test_sf_vc_vvv_se_e64m8(<8 x i64> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -288,8 +246,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, <8 x i64>, <8 x i64>, <8 x i64>, iXLen) - define <1 x i8> @test_sf_vc_v_vvv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -301,8 +257,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <1 x i8>, <1 x i8>, <1 x i8>, iXLen) - define <2 x i8> @test_sf_vc_v_vvv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -314,8 +268,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <2 x i8>, <2 x i8>, <2 x i8>, iXLen) - define <4 x i8> @test_sf_vc_v_vvv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -327,8 +279,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <4 x i8>, <4 x i8>, <4 x i8>, iXLen) - define <8 x i8> @test_sf_vc_v_vvv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -340,8 +290,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <8 x i8>, <8 x i8>, <8 x i8>, iXLen) - define <16 x i8> @test_sf_vc_v_vvv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -353,8 +301,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <16 x i8>, <16 x i8>, <16 x i8>, iXLen) - define <32 x i8> @test_sf_vc_v_vvv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -366,8 +312,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <32 x i8>, <32 x i8>, <32 x i8>, iXLen) - define <64 x i8> @test_sf_vc_v_vvv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -379,8 +323,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <64 x i8>, <64 x i8>, <64 x i8>, iXLen) - define <1 x i16> @test_sf_vc_v_vvv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -392,8 +334,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <1 x i16>, <1 x i16>, <1 x i16>, iXLen) - define <2 x i16> @test_sf_vc_v_vvv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -405,8 +345,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <2 x i16>, <2 x i16>, <2 x i16>, iXLen) - define <4 x i16> @test_sf_vc_v_vvv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -418,8 +356,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <4 x i16>, <4 x i16>, <4 x i16>, iXLen) - define <8 x i16> @test_sf_vc_v_vvv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -431,8 +367,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <8 x i16>, <8 x i16>, <8 x i16>, iXLen) - define <16 x i16> @test_sf_vc_v_vvv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -444,8 +378,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <16 x i16>, <16 x i16>, <16 x i16>, iXLen) - define <32 x i16> @test_sf_vc_v_vvv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -457,8 +389,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <32 x i16>, <32 x i16>, <32 x i16>, iXLen) - define <1 x i32> @test_sf_vc_v_vvv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -470,8 +400,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <1 x i32>, <1 x i32>, <1 x i32>, iXLen) - define <2 x i32> @test_sf_vc_v_vvv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -483,8 +411,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <2 x i32>, <2 x i32>, <2 x i32>, iXLen) - define <4 x i32> @test_sf_vc_v_vvv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -496,8 +422,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <4 x i32>, <4 x i32>, <4 x i32>, iXLen) - define <8 x i32> @test_sf_vc_v_vvv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -509,8 +433,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <8 x i32>, <8 x i32>, <8 x i32>, iXLen) - define <16 x i32> @test_sf_vc_v_vvv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -522,8 +444,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <16 x i32>, <16 x i32>, <16 x i32>, iXLen) - define <1 x i64> @test_sf_vc_v_vvv_se_e64m1(<1 x i64> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -535,8 +455,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <1 x i64>, <1 x i64>, <1 x i64>, iXLen) - define <2 x i64> @test_sf_vc_v_vvv_se_e64m2(<2 x i64> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -548,8 +466,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <2 x i64>, <2 x i64>, <2 x i64>, iXLen) - define <4 x i64> @test_sf_vc_v_vvv_se_e64m4(<4 x i64> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -561,8 +477,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <4 x i64>, <4 x i64>, <4 x i64>, iXLen) - define <8 x i64> @test_sf_vc_v_vvv_se_e64m8(<8 x i64> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -574,8 +488,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <8 x i64>, <8 x i64>, <8 x i64>, iXLen) - define <1 x i8> @test_sf_vc_v_vvv_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -587,8 +499,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <1 x i8>, <1 x i8>, <1 x i8>, iXLen) - define <2 x i8> @test_sf_vc_v_vvv_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -600,8 +510,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <2 x i8>, <2 x i8>, <2 x i8>, iXLen) - define <4 x i8> @test_sf_vc_v_vvv_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -613,8 +521,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <4 x i8>, <4 x i8>, <4 x i8>, iXLen) - define <8 x i8> @test_sf_vc_v_vvv_e8m1(<8 x i8> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8m1: ; CHECK: # %bb.0: # %entry @@ -626,8 +532,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <8 x i8>, <8 x i8>, <8 x i8>, iXLen) - define <16 x i8> @test_sf_vc_v_vvv_e8m2(<16 x i8> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8m2: ; CHECK: # %bb.0: # %entry @@ -639,8 +543,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <16 x i8>, <16 x i8>, <16 x i8>, iXLen) - define <32 x i8> @test_sf_vc_v_vvv_e8m4(<32 x i8> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8m4: ; CHECK: # %bb.0: # %entry @@ -652,8 +554,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <32 x i8>, <32 x i8>, <32 x i8>, iXLen) - define <64 x i8> @test_sf_vc_v_vvv_e8m8(<64 x i8> %vd, <64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8m8: ; CHECK: # %bb.0: # %entry @@ -665,8 +565,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <64 x i8>, <64 x i8>, <64 x i8>, iXLen) - define <1 x i16> @test_sf_vc_v_vvv_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -678,8 +576,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <1 x i16>, <1 x i16>, <1 x i16>, iXLen) - define <2 x i16> @test_sf_vc_v_vvv_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -691,8 +587,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <2 x i16>, <2 x i16>, <2 x i16>, iXLen) - define <4 x i16> @test_sf_vc_v_vvv_e16m1(<4 x i16> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16m1: ; CHECK: # %bb.0: # %entry @@ -704,8 +598,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <4 x i16>, <4 x i16>, <4 x i16>, iXLen) - define <8 x i16> @test_sf_vc_v_vvv_e16m2(<8 x i16> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16m2: ; CHECK: # %bb.0: # %entry @@ -717,8 +609,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <8 x i16>, <8 x i16>, <8 x i16>, iXLen) - define <16 x i16> @test_sf_vc_v_vvv_e16m4(<16 x i16> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16m4: ; CHECK: # %bb.0: # %entry @@ -730,8 +620,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <16 x i16>, <16 x i16>, <16 x i16>, iXLen) - define <32 x i16> @test_sf_vc_v_vvv_e16m8(<32 x i16> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16m8: ; CHECK: # %bb.0: # %entry @@ -743,8 +631,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <32 x i16>, <32 x i16>, <32 x i16>, iXLen) - define <1 x i32> @test_sf_vc_v_vvv_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -756,8 +642,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <1 x i32>, <1 x i32>, <1 x i32>, iXLen) - define <2 x i32> @test_sf_vc_v_vvv_e32m1(<2 x i32> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32m1: ; CHECK: # %bb.0: # %entry @@ -769,8 +653,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <2 x i32>, <2 x i32>, <2 x i32>, iXLen) - define <4 x i32> @test_sf_vc_v_vvv_e32m2(<4 x i32> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32m2: ; CHECK: # %bb.0: # %entry @@ -782,8 +664,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <4 x i32>, <4 x i32>, <4 x i32>, iXLen) - define <8 x i32> @test_sf_vc_v_vvv_e32m4(<8 x i32> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32m4: ; CHECK: # %bb.0: # %entry @@ -795,8 +675,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <8 x i32>, <8 x i32>, <8 x i32>, iXLen) - define <16 x i32> @test_sf_vc_v_vvv_e32m8(<16 x i32> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32m8: ; CHECK: # %bb.0: # %entry @@ -808,8 +686,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <16 x i32>, <16 x i32>, <16 x i32>, iXLen) - define <1 x i64> @test_sf_vc_v_vvv_e64m1(<1 x i64> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e64m1: ; CHECK: # %bb.0: # %entry @@ -821,8 +697,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <1 x i64>, <1 x i64>, <1 x i64>, iXLen) - define <2 x i64> @test_sf_vc_v_vvv_e64m2(<2 x i64> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e64m2: ; CHECK: # %bb.0: # %entry @@ -834,8 +708,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <2 x i64>, <2 x i64>, <2 x i64>, iXLen) - define <4 x i64> @test_sf_vc_v_vvv_e64m4(<4 x i64> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e64m4: ; CHECK: # %bb.0: # %entry @@ -847,8 +719,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <4 x i64>, <4 x i64>, <4 x i64>, iXLen) - define <8 x i64> @test_sf_vc_v_vvv_e64m8(<8 x i64> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e64m8: ; CHECK: # %bb.0: # %entry @@ -860,8 +730,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <8 x i64>, <8 x i64>, <8 x i64>, iXLen) - define void @test_sf_vc_xvv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -873,8 +741,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i8.i8.iXLen(iXLen, <1 x i8>, <1 x i8>, i8, iXLen) - define void @test_sf_vc_xvv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -886,8 +752,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i8.i8.iXLen(iXLen, <2 x i8>, <2 x i8>, i8, iXLen) - define void @test_sf_vc_xvv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -899,8 +763,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i8.i8.iXLen(iXLen, <4 x i8>, <4 x i8>, i8, iXLen) - define void @test_sf_vc_xvv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -912,8 +774,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i8.i8.iXLen(iXLen, <8 x i8>, <8 x i8>, i8, iXLen) - define void @test_sf_vc_xvv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -925,8 +785,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i8.i8.iXLen(iXLen, <16 x i8>, <16 x i8>, i8, iXLen) - define void @test_sf_vc_xvv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -938,8 +796,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i8.i8.iXLen(iXLen, <32 x i8>, <32 x i8>, i8, iXLen) - define void @test_sf_vc_xvv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -951,8 +807,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv64i8.i8.iXLen(iXLen, <64 x i8>, <64 x i8>, i8, iXLen) - define void @test_sf_vc_xvv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -964,8 +818,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i16.i16.iXLen(iXLen, <1 x i16>, <1 x i16>, i16, iXLen) - define void @test_sf_vc_xvv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -977,8 +829,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i16.i16.iXLen(iXLen, <2 x i16>, <2 x i16>, i16, iXLen) - define void @test_sf_vc_xvv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -990,8 +840,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i16.i16.iXLen(iXLen, <4 x i16>, <4 x i16>, i16, iXLen) - define void @test_sf_vc_xvv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1003,8 +851,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i16.i16.iXLen(iXLen, <8 x i16>, <8 x i16>, i16, iXLen) - define void @test_sf_vc_xvv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1016,8 +862,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i16.i16.iXLen(iXLen, <16 x i16>, <16 x i16>, i16, iXLen) - define void @test_sf_vc_xvv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1029,8 +873,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i16.i16.iXLen(iXLen, <32 x i16>, <32 x i16>, i16, iXLen) - define void @test_sf_vc_xvv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1042,8 +884,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.iXLen.iXLen(iXLen, <1 x i32>, <1 x i32>, i32, iXLen) - define void @test_sf_vc_xvv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1055,8 +895,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.iXLen.iXLen(iXLen, <2 x i32>, <2 x i32>, i32, iXLen) - define void @test_sf_vc_xvv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1068,8 +906,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.iXLen.iXLen(iXLen, <4 x i32>, <4 x i32>, i32, iXLen) - define void @test_sf_vc_xvv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1081,8 +917,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.iXLen.iXLen(iXLen, <8 x i32>, <8 x i32>, i32, iXLen) - define void @test_sf_vc_xvv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1094,8 +928,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.iXLen.iXLen(iXLen, <16 x i32>, <16 x i32>, i32, iXLen) - define <1 x i8> @test_sf_vc_v_xvv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1107,8 +939,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.iXLen.i8.iXLen(iXLen, <1 x i8>, <1 x i8>, i8, iXLen) - define <2 x i8> @test_sf_vc_v_xvv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1120,8 +950,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.iXLen.i8.iXLen(iXLen, <2 x i8>, <2 x i8>, i8, iXLen) - define <4 x i8> @test_sf_vc_v_xvv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1133,8 +961,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.iXLen.i8.iXLen(iXLen, <4 x i8>, <4 x i8>, i8, iXLen) - define <8 x i8> @test_sf_vc_v_xvv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1146,8 +972,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.iXLen.i8.iXLen(iXLen, <8 x i8>, <8 x i8>, i8, iXLen) - define <16 x i8> @test_sf_vc_v_xvv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1159,8 +983,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.iXLen.i8.iXLen(iXLen, <16 x i8>, <16 x i8>, i8, iXLen) - define <32 x i8> @test_sf_vc_v_xvv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1172,8 +994,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.iXLen.i8.iXLen(iXLen, <32 x i8>, <32 x i8>, i8, iXLen) - define <64 x i8> @test_sf_vc_v_xvv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1185,8 +1005,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.iXLen.i8.iXLen(iXLen, <64 x i8>, <64 x i8>, i8, iXLen) - define <1 x i16> @test_sf_vc_v_xvv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1198,8 +1016,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.iXLen.i16.iXLen(iXLen, <1 x i16>, <1 x i16>, i16, iXLen) - define <2 x i16> @test_sf_vc_v_xvv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1211,8 +1027,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.iXLen.i16.iXLen(iXLen, <2 x i16>, <2 x i16>, i16, iXLen) - define <4 x i16> @test_sf_vc_v_xvv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1224,8 +1038,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.iXLen.i16.iXLen(iXLen, <4 x i16>, <4 x i16>, i16, iXLen) - define <8 x i16> @test_sf_vc_v_xvv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1237,8 +1049,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.iXLen.i16.iXLen(iXLen, <8 x i16>, <8 x i16>, i16, iXLen) - define <16 x i16> @test_sf_vc_v_xvv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1250,8 +1060,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.iXLen.i16.iXLen(iXLen, <16 x i16>, <16 x i16>, i16, iXLen) - define <32 x i16> @test_sf_vc_v_xvv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1263,8 +1071,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.iXLen.i16.iXLen(iXLen, <32 x i16>, <32 x i16>, i16, iXLen) - define <1 x i32> @test_sf_vc_v_xvv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1276,8 +1082,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.iXLen.i32.iXLen(iXLen, <1 x i32>, <1 x i32>, i32, iXLen) - define <2 x i32> @test_sf_vc_v_xvv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1289,8 +1093,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.iXLen.i32.iXLen(iXLen, <2 x i32>, <2 x i32>, i32, iXLen) - define <4 x i32> @test_sf_vc_v_xvv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1302,8 +1104,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.iXLen.i32.iXLen(iXLen, <4 x i32>, <4 x i32>, i32, iXLen) - define <8 x i32> @test_sf_vc_v_xvv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1315,8 +1115,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.iXLen.i32.iXLen(iXLen, <8 x i32>, <8 x i32>, i32, iXLen) - define <16 x i32> @test_sf_vc_v_xvv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1328,8 +1126,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.iXLen.i32.iXLen(iXLen, <16 x i32>, <16 x i32>, i32, iXLen) - define <1 x i8> @test_sf_vc_v_xvv_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1341,8 +1137,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.iXLen.i8.iXLen(iXLen, <1 x i8>, <1 x i8>, i8, iXLen) - define <2 x i8> @test_sf_vc_v_xvv_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1148,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.iXLen.i8.iXLen(iXLen, <2 x i8>, <2 x i8>, i8, iXLen) - define <4 x i8> @test_sf_vc_v_xvv_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1367,8 +1159,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.iXLen.i8.iXLen(iXLen, <4 x i8>, <4 x i8>, i8, iXLen) - define <8 x i8> @test_sf_vc_v_xvv_e8m1(<8 x i8> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8m1: ; CHECK: # %bb.0: # %entry @@ -1380,8 +1170,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.iXLen.i8.iXLen(iXLen, <8 x i8>, <8 x i8>, i8, iXLen) - define <16 x i8> @test_sf_vc_v_xvv_e8m2(<16 x i8> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8m2: ; CHECK: # %bb.0: # %entry @@ -1393,8 +1181,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.iXLen.i8.iXLen(iXLen, <16 x i8>, <16 x i8>, i8, iXLen) - define <32 x i8> @test_sf_vc_v_xvv_e8m4(<32 x i8> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8m4: ; CHECK: # %bb.0: # %entry @@ -1406,8 +1192,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.iXLen.i8.iXLen(iXLen, <32 x i8>, <32 x i8>, i8, iXLen) - define <64 x i8> @test_sf_vc_v_xvv_e8m8(<64 x i8> %vd, <64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8m8: ; CHECK: # %bb.0: # %entry @@ -1419,8 +1203,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.iXLen.i8.iXLen(iXLen, <64 x i8>, <64 x i8>, i8, iXLen) - define <1 x i16> @test_sf_vc_v_xvv_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1432,8 +1214,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.iXLen.i16.iXLen(iXLen, <1 x i16>, <1 x i16>, i16, iXLen) - define <2 x i16> @test_sf_vc_v_xvv_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1445,8 +1225,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.iXLen.i16.iXLen(iXLen, <2 x i16>, <2 x i16>, i16, iXLen) - define <4 x i16> @test_sf_vc_v_xvv_e16m1(<4 x i16> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16m1: ; CHECK: # %bb.0: # %entry @@ -1458,8 +1236,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.iXLen.i16.iXLen(iXLen, <4 x i16>, <4 x i16>, i16, iXLen) - define <8 x i16> @test_sf_vc_v_xvv_e16m2(<8 x i16> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16m2: ; CHECK: # %bb.0: # %entry @@ -1471,8 +1247,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.iXLen.i16.iXLen(iXLen, <8 x i16>, <8 x i16>, i16, iXLen) - define <16 x i16> @test_sf_vc_v_xvv_e16m4(<16 x i16> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16m4: ; CHECK: # %bb.0: # %entry @@ -1484,8 +1258,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.iXLen.i16.iXLen(iXLen, <16 x i16>, <16 x i16>, i16, iXLen) - define <32 x i16> @test_sf_vc_v_xvv_e16m8(<32 x i16> %vd, <32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16m8: ; CHECK: # %bb.0: # %entry @@ -1497,8 +1269,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.iXLen.i16.iXLen(iXLen, <32 x i16>, <32 x i16>, i16, iXLen) - define <1 x i32> @test_sf_vc_v_xvv_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1510,8 +1280,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.iXLen.i32.iXLen(iXLen, <1 x i32>, <1 x i32>, i32, iXLen) - define <2 x i32> @test_sf_vc_v_xvv_e32m1(<2 x i32> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32m1: ; CHECK: # %bb.0: # %entry @@ -1523,8 +1291,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.iXLen.i32.iXLen(iXLen, <2 x i32>, <2 x i32>, i32, iXLen) - define <4 x i32> @test_sf_vc_v_xvv_e32m2(<4 x i32> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32m2: ; CHECK: # %bb.0: # %entry @@ -1536,8 +1302,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.iXLen.i32.iXLen(iXLen, <4 x i32>, <4 x i32>, i32, iXLen) - define <8 x i32> @test_sf_vc_v_xvv_e32m4(<8 x i32> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32m4: ; CHECK: # %bb.0: # %entry @@ -1549,8 +1313,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.iXLen.i32.iXLen(iXLen, <8 x i32>, <8 x i32>, i32, iXLen) - define <16 x i32> @test_sf_vc_v_xvv_e32m8(<16 x i32> %vd, <16 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32m8: ; CHECK: # %bb.0: # %entry @@ -1562,8 +1324,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.iXLen.i32.iXLen(iXLen, <16 x i32>, <16 x i32>, i32, iXLen) - define void @test_sf_vc_ivv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1575,8 +1335,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, <1 x i8>, <1 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1588,8 +1346,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, <2 x i8>, <2 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1601,8 +1357,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, <4 x i8>, <4 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1614,8 +1368,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, <8 x i8>, <8 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1627,8 +1379,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, <16 x i8>, <16 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1640,8 +1390,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, <32 x i8>, <32 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1653,8 +1401,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, <64 x i8>, <64 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1412,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, <1 x i16>, <1 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1679,8 +1423,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, <2 x i16>, <2 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1692,8 +1434,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, <4 x i16>, <4 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1705,8 +1445,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, <8 x i16>, <8 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1718,8 +1456,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, <16 x i16>, <16 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1731,8 +1467,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, <32 x i16>, <32 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1744,8 +1478,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, <1 x i32>, <1 x i32>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1757,8 +1489,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, <2 x i32>, <2 x i32>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1770,8 +1500,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, <4 x i32>, <4 x i32>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1783,8 +1511,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, <8 x i32>, <8 x i32>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1796,8 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, <16 x i32>, <16 x i32>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e64m1(<1 x i64> %vd, <1 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1809,8 +1533,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, <1 x i64>, <1 x i64>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e64m2(<2 x i64> %vd, <2 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1822,8 +1544,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, <2 x i64>, <2 x i64>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e64m4(<4 x i64> %vd, <4 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1835,8 +1555,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, <4 x i64>, <4 x i64>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e64m8(<8 x i64> %vd, <8 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1848,8 +1566,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, <8 x i64>, <8 x i64>, iXLen, iXLen) - define <1 x i8> @test_sf_vc_v_ivv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1861,8 +1577,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, <1 x i8>, <1 x i8>, iXLen, iXLen) - define <2 x i8> @test_sf_vc_v_ivv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1874,8 +1588,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, <2 x i8>, <2 x i8>, iXLen, iXLen) - define <4 x i8> @test_sf_vc_v_ivv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1887,8 +1599,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, <4 x i8>, <4 x i8>, iXLen, iXLen) - define <8 x i8> @test_sf_vc_v_ivv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1900,8 +1610,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, <8 x i8>, <8 x i8>, iXLen, iXLen) - define <16 x i8> @test_sf_vc_v_ivv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1913,8 +1621,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, <16 x i8>, <16 x i8>, iXLen, iXLen) - define <32 x i8> @test_sf_vc_v_ivv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1926,8 +1632,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, <32 x i8>, <32 x i8>, iXLen, iXLen) - define <64 x i8> @test_sf_vc_v_ivv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1939,8 +1643,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, <64 x i8>, <64 x i8>, iXLen, iXLen) - define <1 x i16> @test_sf_vc_v_ivv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1952,8 +1654,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, <1 x i16>, <1 x i16>, iXLen, iXLen) - define <2 x i16> @test_sf_vc_v_ivv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1965,8 +1665,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, <2 x i16>, <2 x i16>, iXLen, iXLen) - define <4 x i16> @test_sf_vc_v_ivv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1676,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, <4 x i16>, <4 x i16>, iXLen, iXLen) - define <8 x i16> @test_sf_vc_v_ivv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1991,8 +1687,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, <8 x i16>, <8 x i16>, iXLen, iXLen) - define <16 x i16> @test_sf_vc_v_ivv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2004,8 +1698,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, <16 x i16>, <16 x i16>, iXLen, iXLen) - define <32 x i16> @test_sf_vc_v_ivv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2017,8 +1709,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, <32 x i16>, <32 x i16>, iXLen, iXLen) - define <1 x i32> @test_sf_vc_v_ivv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2030,8 +1720,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, <1 x i32>, <1 x i32>, iXLen, iXLen) - define <2 x i32> @test_sf_vc_v_ivv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2043,8 +1731,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, <2 x i32>, <2 x i32>, iXLen, iXLen) - define <4 x i32> @test_sf_vc_v_ivv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2056,8 +1742,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, <4 x i32>, <4 x i32>, iXLen, iXLen) - define <8 x i32> @test_sf_vc_v_ivv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2069,8 +1753,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, <8 x i32>, <8 x i32>, iXLen, iXLen) - define <16 x i32> @test_sf_vc_v_ivv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2082,8 +1764,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, <16 x i32>, <16 x i32>, iXLen, iXLen) - define <1 x i64> @test_sf_vc_v_ivv_se_e64m1(<1 x i64> %vd, <1 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2095,8 +1775,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, <1 x i64>, <1 x i64>, iXLen, iXLen) - define <2 x i64> @test_sf_vc_v_ivv_se_e64m2(<2 x i64> %vd, <2 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2108,8 +1786,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, <2 x i64>, <2 x i64>, iXLen, iXLen) - define <4 x i64> @test_sf_vc_v_ivv_se_e64m4(<4 x i64> %vd, <4 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2121,8 +1797,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, <4 x i64>, <4 x i64>, iXLen, iXLen) - define <8 x i64> @test_sf_vc_v_ivv_se_e64m8(<8 x i64> %vd, <8 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2134,8 +1808,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, <8 x i64>, <8 x i64>, iXLen, iXLen) - define <1 x i8> @test_sf_vc_v_ivv_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -2147,8 +1819,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.iXLen.iXLen.iXLen(iXLen, <1 x i8>, <1 x i8>, iXLen, iXLen) - define <2 x i8> @test_sf_vc_v_ivv_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -2160,8 +1830,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.iXLen.iXLen.iXLen(iXLen, <2 x i8>, <2 x i8>, iXLen, iXLen) - define <4 x i8> @test_sf_vc_v_ivv_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -2173,8 +1841,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.iXLen.iXLen.iXLen(iXLen, <4 x i8>, <4 x i8>, iXLen, iXLen) - define <8 x i8> @test_sf_vc_v_ivv_e8m1(<8 x i8> %vd, <8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8m1: ; CHECK: # %bb.0: # %entry @@ -2186,8 +1852,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.iXLen.iXLen.iXLen(iXLen, <8 x i8>, <8 x i8>, iXLen, iXLen) - define <16 x i8> @test_sf_vc_v_ivv_e8m2(<16 x i8> %vd, <16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8m2: ; CHECK: # %bb.0: # %entry @@ -2199,8 +1863,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.iXLen.iXLen.iXLen(iXLen, <16 x i8>, <16 x i8>, iXLen, iXLen) - define <32 x i8> @test_sf_vc_v_ivv_e8m4(<32 x i8> %vd, <32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8m4: ; CHECK: # %bb.0: # %entry @@ -2212,8 +1874,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.iXLen.iXLen.iXLen(iXLen, <32 x i8>, <32 x i8>, iXLen, iXLen) - define <64 x i8> @test_sf_vc_v_ivv_e8m8(<64 x i8> %vd, <64 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8m8: ; CHECK: # %bb.0: # %entry @@ -2225,8 +1885,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.iXLen.iXLen.iXLen(iXLen, <64 x i8>, <64 x i8>, iXLen, iXLen) - define <1 x i16> @test_sf_vc_v_ivv_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2238,8 +1896,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.iXLen.iXLen.iXLen(iXLen, <1 x i16>, <1 x i16>, iXLen, iXLen) - define <2 x i16> @test_sf_vc_v_ivv_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2251,8 +1907,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.iXLen.iXLen.iXLen(iXLen, <2 x i16>, <2 x i16>, iXLen, iXLen) - define <4 x i16> @test_sf_vc_v_ivv_e16m1(<4 x i16> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16m1: ; CHECK: # %bb.0: # %entry @@ -2264,8 +1918,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.iXLen.iXLen.iXLen(iXLen, <4 x i16>, <4 x i16>, iXLen, iXLen) - define <8 x i16> @test_sf_vc_v_ivv_e16m2(<8 x i16> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16m2: ; CHECK: # %bb.0: # %entry @@ -2277,8 +1929,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.iXLen.iXLen.iXLen(iXLen, <8 x i16>, <8 x i16>, iXLen, iXLen) - define <16 x i16> @test_sf_vc_v_ivv_e16m4(<16 x i16> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16m4: ; CHECK: # %bb.0: # %entry @@ -2290,8 +1940,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.iXLen.iXLen.iXLen(iXLen, <16 x i16>, <16 x i16>, iXLen, iXLen) - define <32 x i16> @test_sf_vc_v_ivv_e16m8(<32 x i16> %vd, <32 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16m8: ; CHECK: # %bb.0: # %entry @@ -2303,8 +1951,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.iXLen.iXLen.iXLen(iXLen, <32 x i16>, <32 x i16>, iXLen, iXLen) - define <1 x i32> @test_sf_vc_v_ivv_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2316,8 +1962,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.iXLen.iXLen.iXLen(iXLen, <1 x i32>, <1 x i32>, iXLen, iXLen) - define <2 x i32> @test_sf_vc_v_ivv_e32m1(<2 x i32> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32m1: ; CHECK: # %bb.0: # %entry @@ -2329,8 +1973,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.iXLen.iXLen.iXLen(iXLen, <2 x i32>, <2 x i32>, iXLen, iXLen) - define <4 x i32> @test_sf_vc_v_ivv_e32m2(<4 x i32> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32m2: ; CHECK: # %bb.0: # %entry @@ -2342,8 +1984,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.iXLen.iXLen.iXLen(iXLen, <4 x i32>, <4 x i32>, iXLen, iXLen) - define <8 x i32> @test_sf_vc_v_ivv_e32m4(<8 x i32> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32m4: ; CHECK: # %bb.0: # %entry @@ -2355,8 +1995,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.iXLen.iXLen.iXLen(iXLen, <8 x i32>, <8 x i32>, iXLen, iXLen) - define <16 x i32> @test_sf_vc_v_ivv_e32m8(<16 x i32> %vd, <16 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32m8: ; CHECK: # %bb.0: # %entry @@ -2368,8 +2006,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.iXLen.iXLen.iXLen(iXLen, <16 x i32>, <16 x i32>, iXLen, iXLen) - define <1 x i64> @test_sf_vc_v_ivv_e64m1(<1 x i64> %vd, <1 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e64m1: ; CHECK: # %bb.0: # %entry @@ -2381,8 +2017,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.iXLen.iXLen.iXLen(iXLen, <1 x i64>, <1 x i64>, iXLen, iXLen) - define <2 x i64> @test_sf_vc_v_ivv_e64m2(<2 x i64> %vd, <2 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e64m2: ; CHECK: # %bb.0: # %entry @@ -2394,8 +2028,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.iXLen.iXLen.iXLen(iXLen, <2 x i64>, <2 x i64>, iXLen, iXLen) - define <4 x i64> @test_sf_vc_v_ivv_e64m4(<4 x i64> %vd, <4 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e64m4: ; CHECK: # %bb.0: # %entry @@ -2407,8 +2039,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.iXLen.iXLen.iXLen(iXLen, <4 x i64>, <4 x i64>, iXLen, iXLen) - define <8 x i64> @test_sf_vc_v_ivv_e64m8(<8 x i64> %vd, <8 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e64m8: ; CHECK: # %bb.0: # %entry @@ -2420,8 +2050,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen, <8 x i64>, <8 x i64>, iXLen, iXLen) - define void @test_sf_vc_fvvv_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2433,8 +2061,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen, <1 x half>, <1 x i16>, <1 x i16>, iXLen) - define <1 x half> @test_sf_vc_fv_fvv_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2446,8 +2072,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen, <1 x half>, <1 x i16>, <1 x i16>, iXLen) - define void @test_sf_vc_fvvv_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2459,8 +2083,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen, <2 x half>, <2 x i16>, <2 x i16>, iXLen) - define <2 x half> @test_sf_vc_fv_fvv_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2472,8 +2094,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen, <2 x half>, <2 x i16>, <2 x i16>, iXLen) - define void @test_sf_vc_fvvv_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2485,8 +2105,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen, <4 x half>, <4 x i16>, <4 x i16>, iXLen) - define <4 x half> @test_sf_vc_fv_fvv_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2498,8 +2116,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen, <4 x half>, <4 x i16>, <4 x i16>, iXLen) - define void @test_sf_vc_fvvv_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2511,8 +2127,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen, <8 x half>, <8 x i16>, <8 x i16>, iXLen) - define <8 x half> @test_sf_vc_fv_fvv_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2524,8 +2138,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen, <8 x half>, <8 x i16>, <8 x i16>, iXLen) - define void @test_sf_vc_fvvv_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2537,8 +2149,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen, <16 x half>, <16 x i16>, <16 x i16>, iXLen) - define <16 x half> @test_sf_vc_fv_fvv_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2550,8 +2160,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen, <16 x half>, <16 x i16>, <16 x i16>, iXLen) - define void @test_sf_vc_fvvv_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2563,8 +2171,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen, <32 x half>, <32 x i16>, <32 x i16>, iXLen) - define <32 x half> @test_sf_vc_fv_fvv_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2576,8 +2182,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen, <32 x half>, <32 x i16>, <32 x i16>, iXLen) - define void @test_sf_vc_fvvv_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2589,8 +2193,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen, <1 x float>, <1 x i32>, <1 x i32>, iXLen) - define <1 x float> @test_sf_vc_fv_fvv_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2602,8 +2204,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen, <1 x float>, <1 x i32>, <1 x i32>, iXLen) - define void @test_sf_vc_fvvv_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2615,8 +2215,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen, <2 x float>, <2 x i32>, <2 x i32>, iXLen) - define <2 x float> @test_sf_vc_fv_fvv_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2628,8 +2226,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen, <2 x float>, <2 x i32>, <2 x i32>, iXLen) - define void @test_sf_vc_fvvv_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2641,8 +2237,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen, <4 x float>, <4 x i32>, <4 x i32>, iXLen) - define <4 x float> @test_sf_vc_fv_fvv_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2654,8 +2248,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen, <4 x float>, <4 x i32>, <4 x i32>, iXLen) - define void @test_sf_vc_fvvv_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2667,8 +2259,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen, <8 x float>, <8 x i32>, <8 x i32>, iXLen) - define <8 x float> @test_sf_vc_fv_fvv_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2680,8 +2270,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen, <8 x float>, <8 x i32>, <8 x i32>, iXLen) - define void @test_sf_vc_fvvv_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2693,8 +2281,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen, <16 x float>, <16 x i32>, <16 x i32>, iXLen) - define <16 x float> @test_sf_vc_fv_fvv_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2706,8 +2292,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen, <16 x float>, <16 x i32>, <16 x i32>, iXLen) - define void @test_sf_vc_fvvv_se_e64m1(<1 x double> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2719,8 +2303,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen, <1 x double>, <1 x i64>, <1 x i64>, iXLen) - define <1 x double> @test_sf_vc_fv_fvv_se_e64m1(<1 x double> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2732,8 +2314,6 @@ entry: ret <1 x double> %0 } -declare <1 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen, <1 x double>, <1 x i64>, <1 x i64>, iXLen) - define void @test_sf_vc_fvvv_se_e64m2(<2 x double> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2745,8 +2325,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen, <2 x double>, <2 x i64>, <2 x i64>, iXLen) - define <2 x double> @test_sf_vc_fv_fvv_se_e64m2(<2 x double> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2758,8 +2336,6 @@ entry: ret <2 x double> %0 } -declare <2 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen, <2 x double>, <2 x i64>, <2 x i64>, iXLen) - define void @test_sf_vc_fvvv_se_e64m4(<4 x double> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2771,8 +2347,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen, <4 x double>, <4 x i64>, <4 x i64>, iXLen) - define <4 x double> @test_sf_vc_fv_fvv_se_e64m4(<4 x double> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2784,8 +2358,6 @@ entry: ret <4 x double> %0 } -declare <4 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen, <4 x double>, <4 x i64>, <4 x i64>, iXLen) - define void @test_sf_vc_fvvv_se_e64m8(<8 x double> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2797,8 +2369,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen, <8 x double>, <8 x i64>, <8 x i64>, iXLen) - define <8 x double> @test_sf_vc_fv_fvv_se_e64m8(<8 x double> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2810,8 +2380,6 @@ entry: ret <8 x double> %0 } -declare <8 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen, <8 x double>, <8 x i64>, <8 x i64>, iXLen) - define void @test_sf_vc_fvvx_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2823,8 +2391,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.nxv1i16.i16.iXLen(iXLen, <1 x half>, <1 x i16>, i16, iXLen) - define <1 x half> @test_sf_vc_v_fvvx_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2836,8 +2402,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.nxv1f16.nxv1i16.i16.iXLen(iXLen, <1 x half>, <1 x i16>, i16, iXLen) - define void @test_sf_vc_fvvx_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2849,8 +2413,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.nxv2i16.i16.iXLen(iXLen, <2 x half>, <2 x i16>, i16, iXLen) - define <2 x half> @test_sf_vc_v_fvvx_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2862,8 +2424,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.nxv2f16.nxv2i16.i16.iXLen(iXLen, <2 x half>, <2 x i16>, i16, iXLen) - define void @test_sf_vc_fvvx_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2875,8 +2435,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.nxv4i16.i16.iXLen(iXLen, <4 x half>, <4 x i16>, i16, iXLen) - define <4 x half> @test_sf_vc_v_fvvx_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2888,8 +2446,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.nxv4f16.nxv4i16.i16.iXLen(iXLen, <4 x half>, <4 x i16>, i16, iXLen) - define void @test_sf_vc_fvvx_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2901,8 +2457,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.nxv8i16.i16.iXLen(iXLen, <8 x half>, <8 x i16>, i16, iXLen) - define <8 x half> @test_sf_vc_v_fvvx_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2914,8 +2468,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.nxv8f16.nxv8i16.i16.iXLen(iXLen, <8 x half>, <8 x i16>, i16, iXLen) - define void @test_sf_vc_fvvx_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2927,8 +2479,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.nxv16i16.i16.iXLen(iXLen, <16 x half>, <16 x i16>, i16, iXLen) - define <16 x half> @test_sf_vc_v_fvvx_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2940,8 +2490,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.nxv16f16.nxv16i16.i16.iXLen(iXLen, <16 x half>, <16 x i16>, i16, iXLen) - define void @test_sf_vc_fvvx_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2953,8 +2501,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.nxv32i16.i16.iXLen(iXLen, <32 x half>, <32 x i16>, i16, iXLen) - define <32 x half> @test_sf_vc_v_fvvx_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2966,8 +2512,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.nxv32f16.nxv32i16.i16.iXLen(iXLen, <32 x half>, <32 x i16>, i16, iXLen) - define void @test_sf_vc_fvvx_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2979,8 +2523,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f32.nxv1i32.i32.iXLen(iXLen, <1 x float>, <1 x i32>, i32, iXLen) - define <1 x float> @test_sf_vc_v_fvvx_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2992,8 +2534,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.nxv1f32.nxv1i32.i32.iXLen(iXLen, <1 x float>, <1 x i32>, i32, iXLen) - define void @test_sf_vc_fvvx_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3005,8 +2545,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f32.nxv2i32.i32.iXLen(iXLen, <2 x float>, <2 x i32>, i32, iXLen) - define <2 x float> @test_sf_vc_v_fvvx_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3018,8 +2556,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.nxv2f32.nxv2i32.i32.iXLen(iXLen, <2 x float>, <2 x i32>, i32, iXLen) - define void @test_sf_vc_fvvx_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3031,8 +2567,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f32.nxv4i32.i32.iXLen(iXLen, <4 x float>, <4 x i32>, i32, iXLen) - define <4 x float> @test_sf_vc_v_fvvx_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3044,8 +2578,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.nxv4f32.nxv4i32.i32.iXLen(iXLen, <4 x float>, <4 x i32>, i32, iXLen) - define void @test_sf_vc_fvvx_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3057,8 +2589,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f32.nxv8i32.i32.iXLen(iXLen, <8 x float>, <8 x i32>, i32, iXLen) - define <8 x float> @test_sf_vc_v_fvvx_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3070,8 +2600,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.nxv8f32.nxv8i32.i32.iXLen(iXLen, <8 x float>, <8 x i32>, i32, iXLen) - define void @test_sf_vc_fvvx_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3083,8 +2611,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f32.nxv16i32.i32.iXLen(iXLen, <16 x float>, <16 x i32>, i32, iXLen) - define <16 x float> @test_sf_vc_v_fvvx_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3096,8 +2622,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.nxv16f32.nxv16i32.i32.iXLen(iXLen, <16 x float>, <16 x i32>, i32, iXLen) - define void @test_sf_vc_fvvi_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3109,8 +2633,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, <1 x half>, <1 x i16>, iXLen, iXLen) - define <1 x half> @test_sf_vc_fv_fvvi_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3122,8 +2644,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, <1 x half>, <1 x i16>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3135,8 +2655,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, <2 x half>, <2 x i16>, iXLen, iXLen) - define <2 x half> @test_sf_vc_fv_fvvi_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3148,8 +2666,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, <2 x half>, <2 x i16>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3161,8 +2677,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, <4 x half>, <4 x i16>, iXLen, iXLen) - define <4 x half> @test_sf_vc_fv_fvvi_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3174,8 +2688,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, <4 x half>, <4 x i16>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3187,8 +2699,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, <8 x half>, <8 x i16>, iXLen, iXLen) - define <8 x half> @test_sf_vc_fv_fvvi_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3200,8 +2710,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, <8 x half>, <8 x i16>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3213,8 +2721,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, <16 x half>, <16 x i16>, iXLen, iXLen) - define <16 x half> @test_sf_vc_fv_fvvi_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3226,8 +2732,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, <16 x half>, <16 x i16>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3239,8 +2743,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.nxv32i16.iXLen.iXLen(iXLen, <32 x half>, <32 x i16>, iXLen, iXLen) - define <32 x half> @test_sf_vc_fv_fvvi_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3252,8 +2754,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.nxv32f16.nxv32i16.iXLen.iXLen(iXLen, <32 x half>, <32 x i16>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3265,8 +2765,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, <1 x float>, <1 x i32>, iXLen, iXLen) - define <1 x float> @test_sf_vc_fv_fvvi_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3278,8 +2776,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, <1 x float>, <1 x i32>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3291,8 +2787,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, <2 x float>, <2 x i32>, iXLen, iXLen) - define <2 x float> @test_sf_vc_fv_fvvi_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3304,8 +2798,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, <2 x float>, <2 x i32>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3317,8 +2809,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, <4 x float>, <4 x i32>, iXLen, iXLen) - define <4 x float> @test_sf_vc_fv_fvvi_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3330,8 +2820,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, <4 x float>, <4 x i32>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3343,8 +2831,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, <8 x float>, <8 x i32>, iXLen, iXLen) - define <8 x float> @test_sf_vc_fv_fvvi_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3356,8 +2842,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, <8 x float>, <8 x i32>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3369,8 +2853,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.nxv16i32.iXLen.iXLen(iXLen, <16 x float>, <16 x i32>, iXLen, iXLen) - define <16 x float> @test_sf_vc_fv_fvvi_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3382,8 +2864,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.nxv16f32.nxv16i32.iXLen.iXLen(iXLen, <16 x float>, <16 x i32>, iXLen, iXLen) - define void @test_sf_vc_fvvf_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3395,8 +2875,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.nxv1i16.f16.iXLen(iXLen, <1 x half>, <1 x i16>, half, iXLen) - define <1 x half> @test_sf_vc_fv_fvvf_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3408,8 +2886,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.nxv1f16.nxv1i16.f16.iXLen(iXLen, <1 x half>, <1 x i16>, half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3421,8 +2897,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.nxv2i16.f16.iXLen(iXLen, <2 x half>, <2 x i16>, half, iXLen) - define <2 x half> @test_sf_vc_fv_fvvf_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3434,8 +2908,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.nxv2f16.nxv2i16.f16.iXLen(iXLen, <2 x half>, <2 x i16>, half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3447,8 +2919,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.nxv4i16.f16.iXLen(iXLen, <4 x half>, <4 x i16>, half, iXLen) - define <4 x half> @test_sf_vc_fv_fvvf_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3460,8 +2930,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.nxv4f16.nxv4i16.f16.iXLen(iXLen, <4 x half>, <4 x i16>, half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3473,8 +2941,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.nxv8i16.f16.iXLen(iXLen, <8 x half>, <8 x i16>, half, iXLen) - define <8 x half> @test_sf_vc_fv_fvvf_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3486,8 +2952,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.nxv8f16.nxv8i16.f16.iXLen(iXLen, <8 x half>, <8 x i16>, half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3499,8 +2963,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.nxv16i16.f16.iXLen(iXLen, <16 x half>, <16 x i16>, half, iXLen) - define <16 x half> @test_sf_vc_fv_fvvf_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3512,8 +2974,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.nxv16f16.nxv16i16.f16.iXLen(iXLen, <16 x half>, <16 x i16>, half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3525,8 +2985,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.nxv32i16.f16.iXLen(iXLen, <32 x half>, <32 x i16>, half, iXLen) - define <32 x half> @test_sf_vc_fv_fvvf_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3538,8 +2996,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.nxv32f16.nxv32i16.f16.iXLen(iXLen, <32 x half>, <32 x i16>, half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3551,8 +3007,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.nxv1i32.f32.iXLen(iXLen, <1 x float>, <1 x i32>, float, iXLen) - define <1 x float> @test_sf_vc_fv_fvvf_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3564,8 +3018,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.nxv1f32.nxv1i32.f32.iXLen(iXLen, <1 x float>, <1 x i32>, float %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3577,8 +3029,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.nxv2i32.f32.iXLen(iXLen, <2 x float>, <2 x i32>, float, iXLen) - define <2 x float> @test_sf_vc_fv_fvvf_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3590,8 +3040,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.nxv2f32.nxv2i32.f32.iXLen(iXLen, <2 x float>, <2 x i32>, float %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3603,8 +3051,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.nxv4i32.f32.iXLen(iXLen, <4 x float>, <4 x i32>, float, iXLen) - define <4 x float> @test_sf_vc_fv_fvvf_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3616,8 +3062,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.nxv4f32.nxv4i32.f32.iXLen(iXLen, <4 x float>, <4 x i32>, float %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3629,8 +3073,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.nxv8i32.f32.iXLen(iXLen, <8 x float>, <8 x i32>, float, iXLen) - define <8 x float> @test_sf_vc_fv_fvvf_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3642,8 +3084,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.nxv8f32.nxv8i32.f32.iXLen(iXLen, <8 x float>, <8 x i32>, float %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3655,8 +3095,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.nxv16i32.f32.iXLen(iXLen, <16 x float>, <16 x i32>, float, iXLen) - define <16 x float> @test_sf_vc_fv_fvvf_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3668,4 +3106,3 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.nxv16f32.nxv16i32.f32.iXLen(iXLen, <16 x float>, <16 x i32>, float %rs1, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll index ea6b936843c2f..09f770dfcfed8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll @@ -15,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i16.nxv1i8.nxv1i8.iXLen(iXLen, <1 x i16>, <1 x i8>, <1 x i8>, iXLen) - define void @test_sf_vc_vvw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i16.nxv2i8.nxv2i8.iXLen(iXLen, <2 x i16>, <2 x i8>, <2 x i8>, iXLen) - define void @test_sf_vc_vvw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -41,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i16.nxv4i8.nxv4i8.iXLen(iXLen, <4 x i16>, <4 x i8>, <4 x i8>, iXLen) - define void @test_sf_vc_vvw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -54,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i16.nxv8i8.nxv8i8.iXLen(iXLen, <8 x i16>, <8 x i8>, <8 x i8>, iXLen) - define void @test_sf_vc_vvw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -67,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i16.nxv16i8.nxv16i8.iXLen(iXLen, <16 x i16>, <16 x i8>, <16 x i8>, iXLen) - define void @test_sf_vc_vvw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -80,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv32i16.nxv32i8.nxv32i8.iXLen(iXLen, <32 x i16>, <32 x i8>, <32 x i8>, iXLen) - define void @test_sf_vc_vvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -93,8 +81,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i32.nxv1i16.nxv1i16.iXLen(iXLen, <1 x i32>, <1 x i16>, <1 x i16>, iXLen) - define void @test_sf_vc_vvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -106,8 +92,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i32.nxv2i16.nxv2i16.iXLen(iXLen, <2 x i32>, <2 x i16>, <2 x i16>, iXLen) - define void @test_sf_vc_vvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -119,8 +103,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i32.nxv4i16.nxv4i16.iXLen(iXLen, <4 x i32>, <4 x i16>, <4 x i16>, iXLen) - define void @test_sf_vc_vvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -132,8 +114,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i32.nxv8i16.nxv8i16.iXLen(iXLen, <8 x i32>, <8 x i16>, <8 x i16>, iXLen) - define void @test_sf_vc_vvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -145,8 +125,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i32.nxv16i16.nxv16i16.iXLen(iXLen, <16 x i32>, <16 x i16>, <16 x i16>, iXLen) - define void @test_sf_vc_vvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -158,8 +136,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i64.nxv1i32.nxv1i32.iXLen(iXLen, <1 x i64>, <1 x i32>, <1 x i32>, iXLen) - define void @test_sf_vc_vvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -171,8 +147,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i64.nxv2i32.nxv2i32.iXLen(iXLen, <2 x i64>, <2 x i32>, <2 x i32>, iXLen) - define void @test_sf_vc_vvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -184,8 +158,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i64.nxv4i32.nxv4i32.iXLen(iXLen, <4 x i64>, <4 x i32>, <4 x i32>, iXLen) - define void @test_sf_vc_vvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -197,8 +169,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i64.nxv8i32.nxv8i32.iXLen(iXLen, <8 x i64>, <8 x i32>, <8 x i32>, iXLen) - define <1 x i16> @test_sf_vc_v_vvw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -210,8 +180,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <1 x i16>, <1 x i8>, <1 x i8>, iXLen) - define <2 x i16> @test_sf_vc_v_vvw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -223,8 +191,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <2 x i16>, <2 x i8>, <2 x i8>, iXLen) - define <4 x i16> @test_sf_vc_v_vvw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -236,8 +202,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <4 x i16>, <4 x i8>, <4 x i8>, iXLen) - define <8 x i16> @test_sf_vc_v_vvw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -249,8 +213,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <8 x i16>, <8 x i8>, <8 x i8>, iXLen) - define <16 x i16> @test_sf_vc_v_vvw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -262,8 +224,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <16 x i16>, <16 x i8>, <16 x i8>, iXLen) - define <32 x i16> @test_sf_vc_v_vvw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -275,8 +235,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <32 x i16>, <32 x i8>, <32 x i8>, iXLen) - define <1 x i32> @test_sf_vc_v_vvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -288,8 +246,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <1 x i32>, <1 x i16>, <1 x i16>, iXLen) - define <2 x i32> @test_sf_vc_v_vvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -301,8 +257,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <2 x i32>, <2 x i16>, <2 x i16>, iXLen) - define <4 x i32> @test_sf_vc_v_vvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -314,8 +268,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <4 x i32>, <4 x i16>, <4 x i16>, iXLen) - define <8 x i32> @test_sf_vc_v_vvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -327,8 +279,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <8 x i32>, <8 x i16>, <8 x i16>, iXLen) - define <16 x i32> @test_sf_vc_v_vvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -340,8 +290,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <16 x i32>, <16 x i16>, <16 x i16>, iXLen) - define <1 x i64> @test_sf_vc_v_vvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -353,8 +301,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <1 x i64>, <1 x i32>, <1 x i32>, iXLen) - define <2 x i64> @test_sf_vc_v_vvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -366,8 +312,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <2 x i64>, <2 x i32>, <2 x i32>, iXLen) - define <4 x i64> @test_sf_vc_v_vvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -379,8 +323,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <4 x i64>, <4 x i32>, <4 x i32>, iXLen) - define <8 x i64> @test_sf_vc_v_vvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -392,8 +334,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <8 x i64>, <8 x i32>, <8 x i32>, iXLen) - define <1 x i16> @test_sf_vc_v_vvw_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf8: ; CHECK: # %bb.0: # %entry @@ -405,8 +345,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <1 x i16>, <1 x i8>, <1 x i8>, iXLen) - define <2 x i16> @test_sf_vc_v_vvw_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf4: ; CHECK: # %bb.0: # %entry @@ -418,8 +356,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <2 x i16>, <2 x i8>, <2 x i8>, iXLen) - define <4 x i16> @test_sf_vc_v_vvw_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf2: ; CHECK: # %bb.0: # %entry @@ -431,8 +367,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <4 x i16>, <4 x i8>, <4 x i8>, iXLen) - define <8 x i16> @test_sf_vc_v_vvw_e8m1(<8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8m1: ; CHECK: # %bb.0: # %entry @@ -444,8 +378,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <8 x i16>, <8 x i8>, <8 x i8>, iXLen) - define <16 x i16> @test_sf_vc_v_vvw_e8m2(<16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8m2: ; CHECK: # %bb.0: # %entry @@ -457,8 +389,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <16 x i16>, <16 x i8>, <16 x i8>, iXLen) - define <32 x i16> @test_sf_vc_v_vvw_e8m4(<32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8m4: ; CHECK: # %bb.0: # %entry @@ -470,8 +400,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <32 x i16>, <32 x i8>, <32 x i8>, iXLen) - define <1 x i32> @test_sf_vc_v_vvw_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16mf4: ; CHECK: # %bb.0: # %entry @@ -483,8 +411,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <1 x i32>, <1 x i16>, <1 x i16>, iXLen) - define <2 x i32> @test_sf_vc_v_vvw_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16mf2: ; CHECK: # %bb.0: # %entry @@ -496,8 +422,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <2 x i32>, <2 x i16>, <2 x i16>, iXLen) - define <4 x i32> @test_sf_vc_v_vvw_e16m1(<4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16m1: ; CHECK: # %bb.0: # %entry @@ -509,8 +433,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <4 x i32>, <4 x i16>, <4 x i16>, iXLen) - define <8 x i32> @test_sf_vc_v_vvw_e16m2(<8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16m2: ; CHECK: # %bb.0: # %entry @@ -522,8 +444,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <8 x i32>, <8 x i16>, <8 x i16>, iXLen) - define <16 x i32> @test_sf_vc_v_vvw_e16m4(<16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16m4: ; CHECK: # %bb.0: # %entry @@ -535,8 +455,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <16 x i32>, <16 x i16>, <16 x i16>, iXLen) - define <1 x i64> @test_sf_vc_v_vvw_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e32mf2: ; CHECK: # %bb.0: # %entry @@ -548,8 +466,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <1 x i64>, <1 x i32>, <1 x i32>, iXLen) - define <2 x i64> @test_sf_vc_v_vvw_e32m1(<2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e32m1: ; CHECK: # %bb.0: # %entry @@ -561,8 +477,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <2 x i64>, <2 x i32>, <2 x i32>, iXLen) - define <4 x i64> @test_sf_vc_v_vvw_e32m2(<4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e32m2: ; CHECK: # %bb.0: # %entry @@ -574,8 +488,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <4 x i64>, <4 x i32>, <4 x i32>, iXLen) - define <8 x i64> @test_sf_vc_v_vvw_e32m4(<8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e32m4: ; CHECK: # %bb.0: # %entry @@ -587,8 +499,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <8 x i64>, <8 x i32>, <8 x i32>, iXLen) - define void @test_sf_vc_xvw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -600,8 +510,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i16.nxv1i8.i8.iXLen(iXLen, <1 x i16>, <1 x i8>, i8, iXLen) - define void @test_sf_vc_xvw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -613,8 +521,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i16.nxv2i8.i8.iXLen(iXLen, <2 x i16>, <2 x i8>, i8, iXLen) - define void @test_sf_vc_xvw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -626,8 +532,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i16.nxv4i8.i8.iXLen(iXLen, <4 x i16>, <4 x i8>, i8, iXLen) - define void @test_sf_vc_xvw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -639,8 +543,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i16.nxv8i8.i8.iXLen(iXLen, <8 x i16>, <8 x i8>, i8, iXLen) - define void @test_sf_vc_xvw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -652,8 +554,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i16.nxv16i8.i8.iXLen(iXLen, <16 x i16>, <16 x i8>, i8, iXLen) - define void @test_sf_vc_xvw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -665,8 +565,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv32i16.nxv32i8.i8.iXLen(iXLen, <32 x i16>, <32 x i8>, i8, iXLen) - define void @test_sf_vc_xvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -678,8 +576,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i32.nxv1i16.i16.iXLen(iXLen, <1 x i32>, <1 x i16>, i16, iXLen) - define void @test_sf_vc_xvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -691,8 +587,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i32.nxv2i16.i16.iXLen(iXLen, <2 x i32>, <2 x i16>, i16, iXLen) - define void @test_sf_vc_xvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -704,8 +598,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i32.nxv4i16.i16.iXLen(iXLen, <4 x i32>, <4 x i16>, i16, iXLen) - define void @test_sf_vc_xvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -717,8 +609,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i32.nxv8i16.i16.iXLen(iXLen, <8 x i32>, <8 x i16>, i16, iXLen) - define void @test_sf_vc_xvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -730,8 +620,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i32.nxv16i16.i16.iXLen(iXLen, <16 x i32>, <16 x i16>, i16, iXLen) - define void @test_sf_vc_xvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -743,8 +631,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i64.nxv1i32.i32.iXLen(iXLen, <1 x i64>, <1 x i32>, i32, iXLen) - define void @test_sf_vc_xvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -756,8 +642,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i64.nxv2i32.i32.iXLen(iXLen, <2 x i64>, <2 x i32>, i32, iXLen) - define void @test_sf_vc_xvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -769,8 +653,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i64.nxv4i32.i32.iXLen(iXLen, <4 x i64>, <4 x i32>, i32, iXLen) - define void @test_sf_vc_xvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -782,8 +664,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i64.nxv8i32.i32.iXLen(iXLen, <8 x i64>, <8 x i32>, i32, iXLen) - define <1 x i16> @test_sf_vc_v_xvw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -795,8 +675,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, <1 x i16>, <1 x i8>, i8, iXLen) - define <2 x i16> @test_sf_vc_v_xvw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -808,8 +686,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, <2 x i16>, <2 x i8>, i8, iXLen) - define <4 x i16> @test_sf_vc_v_xvw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -821,8 +697,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, <4 x i16>, <4 x i8>, i8, iXLen) - define <8 x i16> @test_sf_vc_v_xvw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -834,8 +708,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, <8 x i16>, <8 x i8>, i8, iXLen) - define <16 x i16> @test_sf_vc_v_xvw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -847,8 +719,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, <16 x i16>, <16 x i8>, i8, iXLen) - define <32 x i16> @test_sf_vc_v_xvw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -860,8 +730,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, <32 x i16>, <32 x i8>, i8, iXLen) - define <1 x i32> @test_sf_vc_v_xvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -873,8 +741,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, <1 x i32>, <1 x i16>, i16, iXLen) - define <2 x i32> @test_sf_vc_v_xvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -886,8 +752,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, <2 x i32>, <2 x i16>, i16, iXLen) - define <4 x i32> @test_sf_vc_v_xvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -899,8 +763,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, <4 x i32>, <4 x i16>, i16, iXLen) - define <8 x i32> @test_sf_vc_v_xvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -912,8 +774,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, <8 x i32>, <8 x i16>, i16, iXLen) - define <16 x i32> @test_sf_vc_v_xvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -925,8 +785,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, <16 x i32>, <16 x i16>, i16, iXLen) - define <1 x i64> @test_sf_vc_v_xvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -938,8 +796,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.iXLen.iXLen(iXLen, <1 x i64>, <1 x i32>, i32, iXLen) - define <2 x i64> @test_sf_vc_v_xvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -951,8 +807,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.iXLen.iXLen(iXLen, <2 x i64>, <2 x i32>, i32, iXLen) - define <4 x i64> @test_sf_vc_v_xvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -964,8 +818,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.iXLen.iXLen(iXLen, <4 x i64>, <4 x i32>, i32, iXLen) - define <8 x i64> @test_sf_vc_v_xvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -977,8 +829,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, i32, iXLen) - define <1 x i16> @test_sf_vc_v_xvw_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf8: ; CHECK: # %bb.0: # %entry @@ -990,8 +840,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, <1 x i16>, <1 x i8>, i8, iXLen) - define <2 x i16> @test_sf_vc_v_xvw_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1003,8 +851,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, <2 x i16>, <2 x i8>, i8, iXLen) - define <4 x i16> @test_sf_vc_v_xvw_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1016,8 +862,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, <4 x i16>, <4 x i8>, i8, iXLen) - define <8 x i16> @test_sf_vc_v_xvw_e8m1(<8 x i16> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8m1: ; CHECK: # %bb.0: # %entry @@ -1029,8 +873,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, <8 x i16>, <8 x i8>, i8, iXLen) - define <16 x i16> @test_sf_vc_v_xvw_e8m2(<16 x i16> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8m2: ; CHECK: # %bb.0: # %entry @@ -1042,8 +884,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, <16 x i16>, <16 x i8>, i8, iXLen) - define <32 x i16> @test_sf_vc_v_xvw_e8m4(<32 x i16> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8m4: ; CHECK: # %bb.0: # %entry @@ -1055,8 +895,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, <32 x i16>, <32 x i8>, i8, iXLen) - define <1 x i32> @test_sf_vc_v_xvw_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1068,8 +906,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, <1 x i32>, <1 x i16>, i16, iXLen) - define <2 x i32> @test_sf_vc_v_xvw_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1081,8 +917,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, <2 x i32>, <2 x i16>, i16, iXLen) - define <4 x i32> @test_sf_vc_v_xvw_e16m1(<4 x i32> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16m1: ; CHECK: # %bb.0: # %entry @@ -1094,8 +928,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, <4 x i32>, <4 x i16>, i16, iXLen) - define <8 x i32> @test_sf_vc_v_xvw_e16m2(<8 x i32> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16m2: ; CHECK: # %bb.0: # %entry @@ -1107,8 +939,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, <8 x i32>, <8 x i16>, i16, iXLen) - define <16 x i32> @test_sf_vc_v_xvw_e16m4(<16 x i32> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16m4: ; CHECK: # %bb.0: # %entry @@ -1120,8 +950,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, <16 x i32>, <16 x i16>, i16, iXLen) - define <1 x i64> @test_sf_vc_v_xvw_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1133,8 +961,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.iXLen.nxv1i32.i32.iXLen(iXLen, <1 x i64>, <1 x i32>, i32, iXLen) - define <2 x i64> @test_sf_vc_v_xvw_e32m1(<2 x i64> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e32m1: ; CHECK: # %bb.0: # %entry @@ -1146,8 +972,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.iXLen.nxv2i32.i32.iXLen(iXLen, <2 x i64>, <2 x i32>, i32, iXLen) - define <4 x i64> @test_sf_vc_v_xvw_e32m2(<4 x i64> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e32m2: ; CHECK: # %bb.0: # %entry @@ -1159,8 +983,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.iXLen.nxv4i32.i32.iXLen(iXLen, <4 x i64>, <4 x i32>, i32, iXLen) - define <8 x i64> @test_sf_vc_v_xvw_e32m4(<8 x i64> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e32m4: ; CHECK: # %bb.0: # %entry @@ -1172,8 +994,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.iXLen.nxv8i32.i32.iXLen(iXLen, <8 x i64>, <8 x i32>, i32, iXLen) - define void @test_sf_vc_ivw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1185,8 +1005,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i16.nxv1i8.iXLen.iXLen(iXLen, <1 x i16>, <1 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1198,8 +1016,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i16.nxv2i8.iXLen.iXLen(iXLen, <2 x i16>, <2 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1211,8 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i16.nxv4i8.iXLen.iXLen(iXLen, <4 x i16>, <4 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1224,8 +1038,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i16.nxv8i8.iXLen.iXLen(iXLen, <8 x i16>, <8 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1237,8 +1049,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i16.nxv16i8.iXLen.iXLen(iXLen, <16 x i16>, <16 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1250,8 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv32i16.nxv32i8.iXLen.iXLen(iXLen, <32 x i16>, <32 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1263,8 +1071,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i32.nxv1i16.iXLen.iXLen(iXLen, <1 x i32>, <1 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1276,8 +1082,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i32.nxv2i16.iXLen.iXLen(iXLen, <2 x i32>, <2 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1289,8 +1093,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i32.nxv4i16.iXLen.iXLen(iXLen, <4 x i32>, <4 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1302,8 +1104,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i32.nxv8i16.iXLen.iXLen(iXLen, <8 x i32>, <8 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1315,8 +1115,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i32.nxv16i16.iXLen.iXLen(iXLen, <16 x i32>, <16 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1328,8 +1126,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i64.nxv1i32.iXLen.iXLen(iXLen, <1 x i64>, <1 x i32>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1341,8 +1137,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i64.nxv2i32.iXLen.iXLen(iXLen, <2 x i64>, <2 x i32>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1148,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i64.nxv4i32.iXLen.iXLen(iXLen, <4 x i64>, <4 x i32>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1367,8 +1159,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i64.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, iXLen, iXLen) - define <1 x i16> @test_sf_vc_v_ivw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1380,8 +1170,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, <1 x i16>, <1 x i8>, iXLen, iXLen) - define <2 x i16> @test_sf_vc_v_ivw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1393,8 +1181,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, <2 x i16>, <2 x i8>, iXLen, iXLen) - define <4 x i16> @test_sf_vc_v_ivw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1406,8 +1192,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, <4 x i16>, <4 x i8>, iXLen, iXLen) - define <8 x i16> @test_sf_vc_v_ivw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1419,8 +1203,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, <8 x i16>, <8 x i8>, iXLen, iXLen) - define <16 x i16> @test_sf_vc_v_ivw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1432,8 +1214,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, <16 x i16>, <16 x i8>, iXLen, iXLen) - define <32 x i16> @test_sf_vc_v_ivw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1445,8 +1225,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, <32 x i16>, <32 x i8>, iXLen, iXLen) - define <1 x i32> @test_sf_vc_v_ivw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1458,8 +1236,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, <1 x i32>, <1 x i16>, iXLen, iXLen) - define <2 x i32> @test_sf_vc_v_ivw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1471,8 +1247,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, <2 x i32>, <2 x i16>, iXLen, iXLen) - define <4 x i32> @test_sf_vc_v_ivw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1484,8 +1258,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, <4 x i32>, <4 x i16>, iXLen, iXLen) - define <8 x i32> @test_sf_vc_v_ivw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1497,8 +1269,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, <8 x i32>, <8 x i16>, iXLen, iXLen) - define <16 x i32> @test_sf_vc_v_ivw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1510,8 +1280,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, <16 x i32>, <16 x i16>, iXLen, iXLen) - define <1 x i64> @test_sf_vc_v_ivw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1523,8 +1291,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, <1 x i64>, <1 x i32>, iXLen, iXLen) - define <2 x i64> @test_sf_vc_v_ivw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1536,8 +1302,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, <2 x i64>, <2 x i32>, iXLen, iXLen) - define <4 x i64> @test_sf_vc_v_ivw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1549,8 +1313,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, <4 x i64>, <4 x i32>, iXLen, iXLen) - define <8 x i64> @test_sf_vc_v_ivw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1562,8 +1324,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, iXLen, iXLen) - define <1 x i16> @test_sf_vc_v_ivw_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1575,8 +1335,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, <1 x i16>, <1 x i8>, iXLen, iXLen) - define <2 x i16> @test_sf_vc_v_ivw_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1588,8 +1346,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, <2 x i16>, <2 x i8>, iXLen, iXLen) - define <4 x i16> @test_sf_vc_v_ivw_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1601,8 +1357,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, <4 x i16>, <4 x i8>, iXLen, iXLen) - define <8 x i16> @test_sf_vc_v_ivw_e8m1(<8 x i16> %vd, <8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8m1: ; CHECK: # %bb.0: # %entry @@ -1614,8 +1368,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, <8 x i16>, <8 x i8>, iXLen, iXLen) - define <16 x i16> @test_sf_vc_v_ivw_e8m2(<16 x i16> %vd, <16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8m2: ; CHECK: # %bb.0: # %entry @@ -1627,8 +1379,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, <16 x i16>, <16 x i8>, iXLen, iXLen) - define <32 x i16> @test_sf_vc_v_ivw_e8m4(<32 x i16> %vd, <32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8m4: ; CHECK: # %bb.0: # %entry @@ -1640,8 +1390,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, <32 x i16>, <32 x i8>, iXLen, iXLen) - define <1 x i32> @test_sf_vc_v_ivw_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1653,8 +1401,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, <1 x i32>, <1 x i16>, iXLen, iXLen) - define <2 x i32> @test_sf_vc_v_ivw_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1412,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, <2 x i32>, <2 x i16>, iXLen, iXLen) - define <4 x i32> @test_sf_vc_v_ivw_e16m1(<4 x i32> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16m1: ; CHECK: # %bb.0: # %entry @@ -1679,8 +1423,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, <4 x i32>, <4 x i16>, iXLen, iXLen) - define <8 x i32> @test_sf_vc_v_ivw_e16m2(<8 x i32> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16m2: ; CHECK: # %bb.0: # %entry @@ -1692,8 +1434,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, <8 x i32>, <8 x i16>, iXLen, iXLen) - define <16 x i32> @test_sf_vc_v_ivw_e16m4(<16 x i32> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16m4: ; CHECK: # %bb.0: # %entry @@ -1705,8 +1445,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, <16 x i32>, <16 x i16>, iXLen, iXLen) - define <1 x i64> @test_sf_vc_v_ivw_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1718,8 +1456,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, <1 x i64>, <1 x i32>, iXLen, iXLen) - define <2 x i64> @test_sf_vc_v_ivw_e32m1(<2 x i64> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e32m1: ; CHECK: # %bb.0: # %entry @@ -1731,8 +1467,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, <2 x i64>, <2 x i32>, iXLen, iXLen) - define <4 x i64> @test_sf_vc_v_ivw_e32m2(<4 x i64> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e32m2: ; CHECK: # %bb.0: # %entry @@ -1744,8 +1478,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, <4 x i64>, <4 x i32>, iXLen, iXLen) - define <8 x i64> @test_sf_vc_v_ivw_e32m4(<8 x i64> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e32m4: ; CHECK: # %bb.0: # %entry @@ -1757,8 +1489,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, iXLen, iXLen) - define void @test_sf_vc_fwvv_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1770,8 +1500,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen, <1 x float>, <1 x i16>, <1 x i16>, iXLen) - define <1 x float> @test_sf_vc_fw_fwvvv_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1783,8 +1511,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen, <1 x float>, <1 x i16>, <1 x i16>, iXLen) - define void @test_sf_vc_fwvv_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1796,8 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen, <2 x float>, <2 x i16>, <2 x i16>, iXLen) - define <2 x float> @test_sf_vc_fw_fwvvv_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1809,8 +1533,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen, <2 x float>, <2 x i16>, <2 x i16>, iXLen) - define void @test_sf_vc_fwvv_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1822,8 +1544,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen, <4 x float>, <4 x i16>, <4 x i16>, iXLen) - define <4 x float> @test_sf_vc_fw_fwvvv_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1835,8 +1555,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen, <4 x float>, <4 x i16>, <4 x i16>, iXLen) - define void @test_sf_vc_fwvv_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1848,8 +1566,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen, <8 x float>, <8 x i16>, <8 x i16>, iXLen) - define <8 x float> @test_sf_vc_fw_fwvvv_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1861,8 +1577,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen, <8 x float>, <8 x i16>, <8 x i16>, iXLen) - define void @test_sf_vc_fwvv_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1874,8 +1588,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen, <16 x float>, <16 x i16>, <16 x i16>, iXLen) - define <16 x float> @test_sf_vc_fw_fwvvv_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1887,8 +1599,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen, <16 x float>, <16 x i16>, <16 x i16>, iXLen) - define void @test_sf_vc_fwvv_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1900,8 +1610,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen, <1 x double>, <1 x i32>, <1 x i32>, iXLen) - define <1 x double> @test_sf_vc_fw_fwvvv_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1913,8 +1621,6 @@ entry: ret <1 x double> %0 } -declare <1 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen, <1 x double>, <1 x i32>, <1 x i32>, iXLen) - define void @test_sf_vc_fwvv_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1926,8 +1632,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen, <2 x double>, <2 x i32>, <2 x i32>, iXLen) - define <2 x double> @test_sf_vc_fw_fwvvv_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1939,8 +1643,6 @@ entry: ret <2 x double> %0 } -declare <2 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen, <2 x double>, <2 x i32>, <2 x i32>, iXLen) - define void @test_sf_vc_fwvv_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1952,8 +1654,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen, <4 x double>, <4 x i32>, <4 x i32>, iXLen) - define <4 x double> @test_sf_vc_fw_fwvvv_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1965,8 +1665,6 @@ entry: ret <4 x double> %0 } -declare <4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen, <4 x double>, <4 x i32>, <4 x i32>, iXLen) - define void @test_sf_vc_fwvv_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1676,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen, <8 x double>, <8 x i32>, <8 x i32>, iXLen) - define <8 x double> @test_sf_vc_fw_fwvvv_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1991,8 +1687,6 @@ entry: ret <8 x double> %0 } -declare <8 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen, <8 x double>, <8 x i32>, <8 x i32>, iXLen) - define void @test_sf_vc_fwvx_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2004,8 +1698,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1i16.i16.iXLen(iXLen, <1 x float>, <1 x i16>, i16, iXLen) - define <1 x float> @test_sf_vc_w_fwvx_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2017,8 +1709,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.nxv1f16.nxv1i16.i16.iXLen(iXLen, <1 x float>, <1 x i16>, i16, iXLen) - define void @test_sf_vc_fwvx_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2030,8 +1720,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2i16.i16.iXLen(iXLen, <2 x float>, <2 x i16>, i16, iXLen) - define <2 x float> @test_sf_vc_w_fwvx_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2043,8 +1731,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.nxv2f16.nxv2i16.i16.iXLen(iXLen, <2 x float>, <2 x i16>, i16, iXLen) - define void @test_sf_vc_fwvx_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2056,8 +1742,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4i16.i16.iXLen(iXLen, <4 x float>, <4 x i16>, i16, iXLen) - define <4 x float> @test_sf_vc_w_fwvx_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2069,8 +1753,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.nxv4f16.nxv4i16.i16.iXLen(iXLen, <4 x float>, <4 x i16>, i16, iXLen) - define void @test_sf_vc_fwvx_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2082,8 +1764,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8i16.i16.iXLen(iXLen, <8 x float>, <8 x i16>, i16, iXLen) - define <8 x float> @test_sf_vc_w_fwvx_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2095,8 +1775,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.nxv8f16.nxv8i16.i16.iXLen(iXLen, <8 x float>, <8 x i16>, i16, iXLen) - define void @test_sf_vc_fwvx_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2108,8 +1786,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16i16.i16.iXLen(iXLen, <16 x float>, <16 x i16>, i16, iXLen) - define <16 x float> @test_sf_vc_w_fwvx_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2121,8 +1797,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.nxv16f16.nxv16i16.i16.iXLen(iXLen, <16 x float>, <16 x i16>, i16, iXLen) - define void @test_sf_vc_fwvx_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2134,8 +1808,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1i32.i32.iXLen(iXLen, <1 x double>, <1 x i32>, i32, iXLen) - define <1 x double> @test_sf_vc_w_fwvx_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2147,8 +1819,6 @@ entry: ret <1 x double> %0 } -declare <1 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.nxv1f32.nxv1i32.i32.iXLen(iXLen, <1 x double>, <1 x i32>, i32, iXLen) - define void @test_sf_vc_fwvx_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2160,8 +1830,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2i32.i32.iXLen(iXLen, <2 x double>, <2 x i32>, i32, iXLen) - define <2 x double> @test_sf_vc_w_fwvx_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2173,8 +1841,6 @@ entry: ret <2 x double> %0 } -declare <2 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.nxv2f32.nxv2i32.i32.iXLen(iXLen, <2 x double>, <2 x i32>, i32, iXLen) - define void @test_sf_vc_fwvx_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2186,8 +1852,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4i32.i32.iXLen(iXLen, <4 x double>, <4 x i32>, i32, iXLen) - define <4 x double> @test_sf_vc_w_fwvx_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2199,8 +1863,6 @@ entry: ret <4 x double> %0 } -declare <4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.nxv4f32.nxv4i32.i32.iXLen(iXLen, <4 x double>, <4 x i32>, i32, iXLen) - define void @test_sf_vc_fwvx_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2212,8 +1874,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8i32.i32.iXLen(iXLen, <8 x double>, <8 x i32>, i32, iXLen) - define <8 x double> @test_sf_vc_w_fwvx_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2225,8 +1885,6 @@ entry: ret <8 x double> %0 } -declare <8 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.nxv8f32.nxv8i32.i32.iXLen(iXLen, <8 x double>, <8 x i32>, i32, iXLen) - define void @test_sf_vc_fwvi_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2238,8 +1896,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1i16.iXLen.iXLen(iXLen, <1 x float>, <1 x i16>, iXLen, iXLen) - define <1 x float> @test_sf_vc_fw_fwvi_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2251,8 +1907,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, <1 x float>, <1 x i16>, iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2264,8 +1918,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2i16.iXLen.iXLen(iXLen, <2 x float>, <2 x i16>, iXLen, iXLen) - define <2 x float> @test_sf_vc_fw_fwvi_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2277,8 +1929,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, <2 x float>, <2 x i16>, iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2290,8 +1940,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4i16.iXLen.iXLen(iXLen, <4 x float>, <4 x i16>, iXLen, iXLen) - define <4 x float> @test_sf_vc_fw_fwvi_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2303,8 +1951,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, <4 x float>, <4 x i16>, iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2316,8 +1962,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8i16.iXLen.iXLen(iXLen, <8 x float>, <8 x i16>, iXLen, iXLen) - define <8 x float> @test_sf_vc_fw_fwvi_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2329,8 +1973,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, <8 x float>, <8 x i16>, iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2342,8 +1984,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16i16.iXLen.iXLen(iXLen, <16 x float>, <16 x i16>, iXLen, iXLen) - define <16 x float> @test_sf_vc_fw_fwvi_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2355,8 +1995,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, <16 x float>, <16 x i16>, iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2368,8 +2006,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1i32.iXLen.iXLen(iXLen, <1 x double>, <1 x i32>, iXLen, iXLen) - define <1 x double> @test_sf_vc_fw_fwvi_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2381,8 +2017,6 @@ entry: ret <1 x double> %0 } -declare <1 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, <1 x double>, <1 x i32>, iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2394,8 +2028,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2i32.iXLen.iXLen(iXLen, <2 x double>, <2 x i32>, iXLen, iXLen) - define <2 x double> @test_sf_vc_fw_fwvi_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2407,8 +2039,6 @@ entry: ret <2 x double> %0 } -declare <2 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, <2 x double>, <2 x i32>, iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2420,8 +2050,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4i32.iXLen.iXLen(iXLen, <4 x double>, <4 x i32>, iXLen, iXLen) - define <4 x double> @test_sf_vc_fw_fwvi_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2433,8 +2061,6 @@ entry: ret <4 x double> %0 } -declare <4 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, <4 x double>, <4 x i32>, iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2446,8 +2072,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8i32.iXLen.iXLen(iXLen, <8 x double>, <8 x i32>, iXLen, iXLen) - define <8 x double> @test_sf_vc_fw_fwvi_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2459,8 +2083,6 @@ entry: ret <8 x double> %0 } -declare <8 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, <8 x double>, <8 x i32>, iXLen, iXLen) - define void @test_sf_vc_fwvf_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2472,8 +2094,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1i16.f16.iXLen(iXLen, <1 x float>, <1 x i16>, half, iXLen) - define <1 x float> @test_sf_vc_fw_fwvf_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2485,8 +2105,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.nxv1f16.nxv1i16.f16.iXLen(iXLen, <1 x float>, <1 x i16>, half, iXLen) - define void @test_sf_vc_fwvf_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2498,8 +2116,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2i16.f16.iXLen(iXLen, <2 x float>, <2 x i16>, half, iXLen) - define <2 x float> @test_sf_vc_fw_fwvf_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2511,8 +2127,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.nxv2f16.nxv2i16.f16.iXLen(iXLen, <2 x float>, <2 x i16>, half, iXLen) - define void @test_sf_vc_fwvf_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2524,8 +2138,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4i16.f16.iXLen(iXLen, <4 x float>, <4 x i16>, half, iXLen) - define <4 x float> @test_sf_vc_fw_fwvf_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2537,8 +2149,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.nxv4f16.nxv4i16.f16.iXLen(iXLen, <4 x float>, <4 x i16>, half, iXLen) - define void @test_sf_vc_fwvf_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2550,8 +2160,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8i16.f16.iXLen(iXLen, <8 x float>, <8 x i16>, half, iXLen) - define <8 x float> @test_sf_vc_fw_fwvf_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2563,8 +2171,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.nxv8f16.nxv8i16.f16.iXLen(iXLen, <8 x float>, <8 x i16>, half, iXLen) - define void @test_sf_vc_fwvf_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2576,8 +2182,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16i16.f16.iXLen(iXLen, <16 x float>, <16 x i16>, half, iXLen) - define <16 x float> @test_sf_vc_fw_fwvf_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2589,8 +2193,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.nxv16f16.nxv16i16.f16.iXLen(iXLen, <16 x float>, <16 x i16>, half, iXLen) - define void @test_sf_vc_fwvf_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2602,8 +2204,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1i32.f32.iXLen(iXLen, <1 x double>, <1 x i32>, float, iXLen) - define <1 x double> @test_sf_vc_fw_fwvf_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2615,8 +2215,6 @@ entry: ret <1 x double> %0 } -declare <1 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.nxv1f32.nxv1i32.f32.iXLen(iXLen, <1 x double>, <1 x i32>, float, iXLen) - define void @test_sf_vc_fwvf_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2628,8 +2226,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2i32.f32.iXLen(iXLen, <2 x double>, <2 x i32>, float, iXLen) - define <2 x double> @test_sf_vc_fw_fwvf_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2641,8 +2237,6 @@ entry: ret <2 x double> %0 } -declare <2 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.nxv2f32.nxv2i32.f32.iXLen(iXLen, <2 x double>, <2 x i32>, float, iXLen) - define void @test_sf_vc_fwvf_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2654,8 +2248,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4i32.f32.iXLen(iXLen, <4 x double>, <4 x i32>, float, iXLen) - define <4 x double> @test_sf_vc_fw_fwvf_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2667,8 +2259,6 @@ entry: ret <4 x double> %0 } -declare <4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.nxv4f32.nxv4i32.f32.iXLen(iXLen, <4 x double>, <4 x i32>, float, iXLen) - define void @test_sf_vc_fwvf_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2680,8 +2270,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8i32.f32.iXLen(iXLen, <8 x double>, <8 x i32>, float, iXLen) - define <8 x double> @test_sf_vc_fw_fwvf_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2693,4 +2281,3 @@ entry: ret <8 x double> %0 } -declare <8 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.nxv8f32.nxv8i32.f32.iXLen(iXLen, <8 x double>, <8 x i32>, float, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp-mask.ll index d292978c1d5eb..10f0f7cd7ae83 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s -declare <4 x i16> @llvm.vp.zext.v4i16.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x i16> @vzext_v4i16_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i16_v4i1: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define <4 x i16> @vzext_v4i16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.zext.v4i32.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x i32> @vzext_v4i32_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i32_v4i1: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define <4 x i32> @vzext_v4i32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.zext.v4i64.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x i64> @vzext_v4i64_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i64_v4i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll index 8259336e8668c..e2d9e0ac2deea 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s -declare <4 x i16> @llvm.vp.zext.v4i16.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i16> @vzext_v4i16_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i16_v4i8: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define <4 x i16> @vzext_v4i16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.zext.v4i32.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i32> @vzext_v4i32_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i32_v4i8: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define <4 x i32> @vzext_v4i32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.zext.v4i64.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i64> @vzext_v4i64_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i64_v4i8: ; CHECK: # %bb.0: @@ -74,8 +68,6 @@ define <4 x i64> @vzext_v4i64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <4 x i32> @llvm.vp.zext.v4i32.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x i32> @vzext_v4i32_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i32_v4i16: ; CHECK: # %bb.0: @@ -98,8 +90,6 @@ define <4 x i32> @vzext_v4i32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.zext.v4i64.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x i64> @vzext_v4i64_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i64_v4i16: ; CHECK: # %bb.0: @@ -122,8 +112,6 @@ define <4 x i64> @vzext_v4i64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <4 x i64> @llvm.vp.zext.v4i64.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x i64> @vzext_v4i64_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i64_v4i32: ; CHECK: # %bb.0: @@ -146,8 +134,6 @@ define <4 x i64> @vzext_v4i64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <32 x i64> @llvm.vp.zext.v32i64.v32i32(<32 x i32>, <32 x i1>, i32) - define <32 x i64> @vzext_v32i64_v32i32(<32 x i32> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v32i64_v32i32: ; CHECK: # %bb.0: @@ -202,8 +188,6 @@ define <32 x i64> @vzext_v32i64_v32i32_unmasked(<32 x i32> %va, i32 zeroext %evl ret <32 x i64> %v } -declare <4 x i16> @llvm.vp.zext.v4i16.v4i7(<4 x i7>, <4 x i1>, i32) - define <4 x i16> @vzext_v4i16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i16_v4i7: ; CHECK: # %bb.0: @@ -216,8 +200,6 @@ define <4 x i16> @vzext_v4i16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) ret <4 x i16> %v } -declare <4 x i8> @llvm.vp.zext.v4i8.v4i7(<4 x i7>, <4 x i1>, i32) - define <4 x i8> @vzext_v4i8_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i8_v4i7: ; CHECK: # %bb.0: @@ -229,8 +211,6 @@ define <4 x i8> @vzext_v4i8_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ret <4 x i8> %v } -declare <4 x i15> @llvm.vp.zext.v4i15.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i15> @vzext_v4i15_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i15_v4i8: ; CHECK: # %bb.0: @@ -242,8 +222,6 @@ define <4 x i15> @vzext_v4i15_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) ret <4 x i15> %v } -declare <4 x i15> @llvm.vp.zext.v4i15.v4i9(<4 x i9>, <4 x i1>, i32) - define <4 x i15> @vzext_v4i15_v4i9(<4 x i9> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i15_v4i9: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll b/llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll index 4512d809995a4..17c2244001082 100644 --- a/llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll @@ -8,8 +8,6 @@ ; trunc ; ================================================================================ -declare @llvm.trunc.nxv1f32() - define @trunc_nxv1f32_to_si8( %x) { ; RV32-LABEL: trunc_nxv1f32_to_si8: ; RV32: # %bb.0: @@ -202,8 +200,6 @@ define @trunc_nxv1f32_to_ui64( %x) { ; trunc ; ================================================================================ -declare @llvm.trunc.nxv4f32() - define @trunc_nxv4f32_to_si8( %x) { ; RV32-LABEL: trunc_nxv4f32_to_si8: ; RV32: # %bb.0: @@ -396,8 +392,6 @@ define @trunc_nxv4f32_to_ui64( %x) { ; ceil ; ================================================================================ -declare @llvm.ceil.nxv1f32() - define @ceil_nxv1f32_to_si8( %x) { ; RV32-LABEL: ceil_nxv1f32_to_si8: ; RV32: # %bb.0: @@ -622,8 +616,6 @@ define @ceil_nxv1f32_to_ui64( %x) { ; ceil ; ================================================================================ -declare @llvm.ceil.nxv4f32() - define @ceil_nxv4f32_to_si8( %x) { ; RV32-LABEL: ceil_nxv4f32_to_si8: ; RV32: # %bb.0: @@ -848,8 +840,6 @@ define @ceil_nxv4f32_to_ui64( %x) { ; rint ; ================================================================================ -declare @llvm.rint.nxv4f32() - define @rint_nxv4f32_to_si8( %x) { ; RV32-LABEL: rint_nxv4f32_to_si8: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll index 8f2aec3140e9d..e2deefa26ecb3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare @llvm.vp.floor.nxv1bf16(, , i32) - define @vp_floor_nxv1bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv1bf16: ; CHECK: # %bb.0: @@ -66,8 +64,6 @@ define @vp_floor_nxv1bf16_unmasked( % ret %v } -declare @llvm.vp.floor.nxv2bf16(, , i32) - define @vp_floor_nxv2bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv2bf16: ; CHECK: # %bb.0: @@ -120,8 +116,6 @@ define @vp_floor_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.floor.nxv4bf16(, , i32) - define @vp_floor_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv4bf16: ; CHECK: # %bb.0: @@ -174,8 +168,6 @@ define @vp_floor_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.floor.nxv8bf16(, , i32) - define @vp_floor_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv8bf16: ; CHECK: # %bb.0: @@ -228,8 +220,6 @@ define @vp_floor_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.floor.nxv16bf16(, , i32) - define @vp_floor_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv16bf16: ; CHECK: # %bb.0: @@ -282,8 +272,6 @@ define @vp_floor_nxv16bf16_unmasked( %v } -declare @llvm.vp.floor.nxv32bf16(, , i32) - define @vp_floor_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv32bf16: ; CHECK: # %bb.0: @@ -402,7 +390,6 @@ define @vp_floor_nxv32bf16_unmasked( @llvm.vp.floor.nxv32bf16( %va, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.floor.nxv1f16(, , i32) define @vp_floor_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv1f16: @@ -490,8 +477,6 @@ define @vp_floor_nxv1f16_unmasked( %va, i ret %v } -declare @llvm.vp.floor.nxv2f16(, , i32) - define @vp_floor_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv2f16: ; ZVFH: # %bb.0: @@ -578,8 +563,6 @@ define @vp_floor_nxv2f16_unmasked( %va, i ret %v } -declare @llvm.vp.floor.nxv4f16(, , i32) - define @vp_floor_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv4f16: ; ZVFH: # %bb.0: @@ -666,8 +649,6 @@ define @vp_floor_nxv4f16_unmasked( %va, i ret %v } -declare @llvm.vp.floor.nxv8f16(, , i32) - define @vp_floor_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv8f16: ; ZVFH: # %bb.0: @@ -756,8 +737,6 @@ define @vp_floor_nxv8f16_unmasked( %va, i ret %v } -declare @llvm.vp.floor.nxv16f16(, , i32) - define @vp_floor_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv16f16: ; ZVFH: # %bb.0: @@ -846,8 +825,6 @@ define @vp_floor_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.floor.nxv32f16(, , i32) - define @vp_floor_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv32f16: ; ZVFH: # %bb.0: @@ -1003,8 +980,6 @@ define @vp_floor_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.floor.nxv1f32(, , i32) - define @vp_floor_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv1f32: ; CHECK: # %bb.0: @@ -1045,8 +1020,6 @@ define @vp_floor_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.floor.nxv2f32(, , i32) - define @vp_floor_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv2f32: ; CHECK: # %bb.0: @@ -1087,8 +1060,6 @@ define @vp_floor_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.floor.nxv4f32(, , i32) - define @vp_floor_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv4f32: ; CHECK: # %bb.0: @@ -1131,8 +1102,6 @@ define @vp_floor_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.floor.nxv8f32(, , i32) - define @vp_floor_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv8f32: ; CHECK: # %bb.0: @@ -1175,8 +1144,6 @@ define @vp_floor_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.floor.nxv16f32(, , i32) - define @vp_floor_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv16f32: ; CHECK: # %bb.0: @@ -1219,8 +1186,6 @@ define @vp_floor_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.floor.nxv1f64(, , i32) - define @vp_floor_nxv1f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_nxv1f64: ; RV32ZVFH: # %bb.0: @@ -1361,8 +1326,6 @@ define @vp_floor_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.floor.nxv2f64(, , i32) - define @vp_floor_nxv2f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_nxv2f64: ; RV32ZVFH: # %bb.0: @@ -1511,8 +1474,6 @@ define @vp_floor_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.floor.nxv4f64(, , i32) - define @vp_floor_nxv4f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_nxv4f64: ; RV32ZVFH: # %bb.0: @@ -1661,8 +1622,6 @@ define @vp_floor_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.floor.nxv7f64(, , i32) - define @vp_floor_nxv7f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_nxv7f64: ; RV32ZVFH: # %bb.0: @@ -1811,8 +1770,6 @@ define @vp_floor_nxv7f64_unmasked( %v ret %v } -declare @llvm.vp.floor.nxv8f64(, , i32) - define @vp_floor_nxv8f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_nxv8f64: ; RV32ZVFH: # %bb.0: @@ -1962,7 +1919,6 @@ define @vp_floor_nxv8f64_unmasked( %v } ; Test splitting. -declare @llvm.vp.floor.nxv16f64(, , i32) define @vp_floor_nxv16f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll index 3faaf210086cb..25a4eb74eeba7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll @@ -16,8 +16,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zfbfmin,+zvfbfmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs -early-live-intervals < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare @llvm.maximum.nxv1bf16(, ) - define @vfmax_nxv1bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv1bf16_vv: ; CHECK: # %bb.0: @@ -39,8 +37,6 @@ define @vfmax_nxv1bf16_vv( %a, %v } -declare @llvm.maximum.nxv2bf16(, ) - define @vfmax_nxv2bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv2bf16_vv: ; CHECK: # %bb.0: @@ -62,8 +58,6 @@ define @vfmax_nxv2bf16_vv( %a, %v } -declare @llvm.maximum.nxv4bf16(, ) - define @vfmax_nxv4bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv4bf16_vv: ; CHECK: # %bb.0: @@ -85,8 +79,6 @@ define @vfmax_nxv4bf16_vv( %a, %v } -declare @llvm.maximum.nxv8bf16(, ) - define @vfmax_nxv8bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv8bf16_vv: ; CHECK: # %bb.0: @@ -108,8 +100,6 @@ define @vfmax_nxv8bf16_vv( %a, %v } -declare @llvm.maximum.nxv16bf16(, ) - define @vfmax_nxv16bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv16bf16_vv: ; CHECK: # %bb.0: @@ -130,8 +120,6 @@ define @vfmax_nxv16bf16_vv( %a, %v } -declare @llvm.maximum.nxv32bf16(, ) - define @vfmax_nxv32bf16_vv( %a, %b) nounwind { ; ZVFH-LABEL: vfmax_nxv32bf16_vv: ; ZVFH: # %bb.0: @@ -286,8 +274,6 @@ define @vfmax_nxv32bf16_vv( %a, %v } -declare @llvm.maximum.nxv1f16(, ) - define @vfmax_nxv1f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv1f16_vv: ; ZVFH: # %bb.0: @@ -319,8 +305,6 @@ define @vfmax_nxv1f16_vv( %a, %v } -declare @llvm.maximum.nxv2f16(, ) - define @vfmax_nxv2f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv2f16_vv: ; ZVFH: # %bb.0: @@ -352,8 +336,6 @@ define @vfmax_nxv2f16_vv( %a, %v } -declare @llvm.maximum.nxv4f16(, ) - define @vfmax_nxv4f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv4f16_vv: ; ZVFH: # %bb.0: @@ -385,8 +367,6 @@ define @vfmax_nxv4f16_vv( %a, %v } -declare @llvm.maximum.nxv8f16(, ) - define @vfmax_nxv8f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv8f16_vv: ; ZVFH: # %bb.0: @@ -418,8 +398,6 @@ define @vfmax_nxv8f16_vv( %a, %v } -declare @llvm.maximum.nxv16f16(, ) - define @vfmax_nxv16f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv16f16_vv: ; ZVFH: # %bb.0: @@ -450,8 +428,6 @@ define @vfmax_nxv16f16_vv( %a, %v } -declare @llvm.maximum.nxv32f16(, ) - define @vfmax_nxv32f16_vv( %a, %b) nounwind { ; ZVFH-LABEL: vfmax_nxv32f16_vv: ; ZVFH: # %bb.0: @@ -540,8 +516,6 @@ define @vfmax_nxv32f16_vv( %a, %v } -declare @llvm.maximum.nxv1f32(, ) - define @vfmax_nxv1f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv1f32_vv: ; CHECK: # %bb.0: @@ -556,8 +530,6 @@ define @vfmax_nxv1f32_vv( %a, %v } -declare @llvm.maximum.nxv2f32(, ) - define @vfmax_nxv2f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv2f32_vv: ; CHECK: # %bb.0: @@ -572,8 +544,6 @@ define @vfmax_nxv2f32_vv( %a, %v } -declare @llvm.maximum.nxv4f32(, ) - define @vfmax_nxv4f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv4f32_vv: ; CHECK: # %bb.0: @@ -588,8 +558,6 @@ define @vfmax_nxv4f32_vv( %a, %v } -declare @llvm.maximum.nxv8f32(, ) - define @vfmax_nxv8f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv8f32_vv: ; CHECK: # %bb.0: @@ -604,8 +572,6 @@ define @vfmax_nxv8f32_vv( %a, %v } -declare @llvm.maximum.nxv16f32(, ) - define @vfmax_nxv16f32_vv( %a, %b) nounwind { ; CHECK-LABEL: vfmax_nxv16f32_vv: ; CHECK: # %bb.0: @@ -621,8 +587,6 @@ define @vfmax_nxv16f32_vv( %a, %v } -declare @llvm.maximum.nxv1f64(, ) - define @vfmax_nxv1f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv1f64_vv: ; CHECK: # %bb.0: @@ -637,8 +601,6 @@ define @vfmax_nxv1f64_vv( %a, %v } -declare @llvm.maximum.nxv2f64(, ) - define @vfmax_nxv2f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv2f64_vv: ; CHECK: # %bb.0: @@ -653,8 +615,6 @@ define @vfmax_nxv2f64_vv( %a, %v } -declare @llvm.maximum.nxv4f64(, ) - define @vfmax_nxv4f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv4f64_vv: ; CHECK: # %bb.0: @@ -669,8 +629,6 @@ define @vfmax_nxv4f64_vv( %a, %v } -declare @llvm.maximum.nxv8f64(, ) - define @vfmax_nxv8f64_vv( %a, %b) nounwind { ; CHECK-LABEL: vfmax_nxv8f64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll index f6b94b41103ef..0e0c92b150d33 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll @@ -14,8 +14,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.maximum.nxv1bf16(, , , i32) - define @vfmax_vv_nxv1bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -59,8 +57,6 @@ define @vfmax_vv_nxv1bf16_unmasked( % ret %v } -declare @llvm.vp.maximum.nxv2bf16(, , , i32) - define @vfmax_vv_nxv2bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -104,8 +100,6 @@ define @vfmax_vv_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.maximum.nxv4bf16(, , , i32) - define @vfmax_vv_nxv4bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -151,8 +145,6 @@ define @vfmax_vv_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.maximum.nxv8bf16(, , , i32) - define @vfmax_vv_nxv8bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -198,8 +190,6 @@ define @vfmax_vv_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.maximum.nxv16bf16(, , , i32) - define @vfmax_vv_nxv16bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -259,8 +249,6 @@ define @vfmax_vv_nxv16bf16_unmasked( %v } -declare @llvm.vp.maximum.nxv32bf16(, , , i32) - define @vfmax_vv_nxv32bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -498,7 +486,6 @@ define @vfmax_vv_nxv32bf16_unmasked( @llvm.vp.maximum.nxv32bf16( %va, %vb, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.maximum.nxv1f16(, , , i32) define @vfmax_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv1f16: @@ -566,8 +553,6 @@ define @vfmax_vv_nxv1f16_unmasked( %va, < ret %v } -declare @llvm.vp.maximum.nxv2f16(, , , i32) - define @vfmax_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -634,8 +619,6 @@ define @vfmax_vv_nxv2f16_unmasked( %va, < ret %v } -declare @llvm.vp.maximum.nxv4f16(, , , i32) - define @vfmax_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -704,8 +687,6 @@ define @vfmax_vv_nxv4f16_unmasked( %va, < ret %v } -declare @llvm.vp.maximum.nxv8f16(, , , i32) - define @vfmax_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -776,8 +757,6 @@ define @vfmax_vv_nxv8f16_unmasked( %va, < ret %v } -declare @llvm.vp.maximum.nxv16f16(, , , i32) - define @vfmax_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -862,8 +841,6 @@ define @vfmax_vv_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.maximum.nxv32f16(, , , i32) - define @vfmax_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -1143,8 +1120,6 @@ define @vfmax_vv_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.maximum.nxv1f32(, , , i32) - define @vfmax_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1f32: ; CHECK: # %bb.0: @@ -1176,8 +1151,6 @@ define @vfmax_vv_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.maximum.nxv2f32(, , , i32) - define @vfmax_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1209,8 +1182,6 @@ define @vfmax_vv_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.maximum.nxv4f32(, , , i32) - define @vfmax_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1244,8 +1215,6 @@ define @vfmax_vv_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.maximum.nxv8f32(, , , i32) - define @vfmax_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1279,8 +1248,6 @@ define @vfmax_vv_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.maximum.nxv1f64(, , , i32) - define @vfmax_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1312,8 +1279,6 @@ define @vfmax_vv_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.maximum.nxv2f64(, , , i32) - define @vfmax_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1347,8 +1312,6 @@ define @vfmax_vv_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.maximum.nxv4f64(, , , i32) - define @vfmax_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1382,8 +1345,6 @@ define @vfmax_vv_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.maximum.nxv8f64(, , , i32) - define @vfmax_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8f64: ; CHECK: # %bb.0: @@ -1433,8 +1394,6 @@ define @vfmax_vv_nxv8f64_unmasked( %v ret %v } -declare @llvm.vp.maximum.nxv16f64(, , , i32) - define @vfmax_vv_nxv16f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll index 919d63ca6e31a..6ffa71c6c908b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll @@ -16,8 +16,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zfbfmin,+zvfbfmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs -early-live-intervals < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare @llvm.minimum.nxv1bf16(, ) - define @vfmin_nxv1bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv1bf16_vv: ; CHECK: # %bb.0: @@ -39,8 +37,6 @@ define @vfmin_nxv1bf16_vv( %a, %v } -declare @llvm.minimum.nxv2bf16(, ) - define @vfmin_nxv2bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv2bf16_vv: ; CHECK: # %bb.0: @@ -62,8 +58,6 @@ define @vfmin_nxv2bf16_vv( %a, %v } -declare @llvm.minimum.nxv4bf16(, ) - define @vfmin_nxv4bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv4bf16_vv: ; CHECK: # %bb.0: @@ -85,8 +79,6 @@ define @vfmin_nxv4bf16_vv( %a, %v } -declare @llvm.minimum.nxv8bf16(, ) - define @vfmin_nxv8bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv8bf16_vv: ; CHECK: # %bb.0: @@ -108,8 +100,6 @@ define @vfmin_nxv8bf16_vv( %a, %v } -declare @llvm.minimum.nxv16bf16(, ) - define @vfmin_nxv16bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv16bf16_vv: ; CHECK: # %bb.0: @@ -130,8 +120,6 @@ define @vfmin_nxv16bf16_vv( %a, %v } -declare @llvm.minimum.nxv32bf16(, ) - define @vfmin_nxv32bf16_vv( %a, %b) nounwind { ; ZVFH-LABEL: vfmin_nxv32bf16_vv: ; ZVFH: # %bb.0: @@ -286,8 +274,6 @@ define @vfmin_nxv32bf16_vv( %a, %v } -declare @llvm.minimum.nxv1f16(, ) - define @vfmin_nxv1f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv1f16_vv: ; ZVFH: # %bb.0: @@ -319,8 +305,6 @@ define @vfmin_nxv1f16_vv( %a, %v } -declare @llvm.minimum.nxv2f16(, ) - define @vfmin_nxv2f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv2f16_vv: ; ZVFH: # %bb.0: @@ -352,8 +336,6 @@ define @vfmin_nxv2f16_vv( %a, %v } -declare @llvm.minimum.nxv4f16(, ) - define @vfmin_nxv4f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv4f16_vv: ; ZVFH: # %bb.0: @@ -385,8 +367,6 @@ define @vfmin_nxv4f16_vv( %a, %v } -declare @llvm.minimum.nxv8f16(, ) - define @vfmin_nxv8f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv8f16_vv: ; ZVFH: # %bb.0: @@ -418,8 +398,6 @@ define @vfmin_nxv8f16_vv( %a, %v } -declare @llvm.minimum.nxv16f16(, ) - define @vfmin_nxv16f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv16f16_vv: ; ZVFH: # %bb.0: @@ -450,8 +428,6 @@ define @vfmin_nxv16f16_vv( %a, %v } -declare @llvm.minimum.nxv32f16(, ) - define @vfmin_nxv32f16_vv( %a, %b) nounwind { ; ZVFH-LABEL: vfmin_nxv32f16_vv: ; ZVFH: # %bb.0: @@ -540,8 +516,6 @@ define @vfmin_nxv32f16_vv( %a, %v } -declare @llvm.minimum.nxv1f32(, ) - define @vfmin_nxv1f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv1f32_vv: ; CHECK: # %bb.0: @@ -556,8 +530,6 @@ define @vfmin_nxv1f32_vv( %a, %v } -declare @llvm.minimum.nxv2f32(, ) - define @vfmin_nxv2f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv2f32_vv: ; CHECK: # %bb.0: @@ -572,8 +544,6 @@ define @vfmin_nxv2f32_vv( %a, %v } -declare @llvm.minimum.nxv4f32(, ) - define @vfmin_nxv4f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv4f32_vv: ; CHECK: # %bb.0: @@ -588,8 +558,6 @@ define @vfmin_nxv4f32_vv( %a, %v } -declare @llvm.minimum.nxv8f32(, ) - define @vfmin_nxv8f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv8f32_vv: ; CHECK: # %bb.0: @@ -604,8 +572,6 @@ define @vfmin_nxv8f32_vv( %a, %v } -declare @llvm.minimum.nxv16f32(, ) - define @vfmin_nxv16f32_vv( %a, %b) nounwind { ; CHECK-LABEL: vfmin_nxv16f32_vv: ; CHECK: # %bb.0: @@ -621,8 +587,6 @@ define @vfmin_nxv16f32_vv( %a, %v } -declare @llvm.minimum.nxv1f64(, ) - define @vfmin_nxv1f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv1f64_vv: ; CHECK: # %bb.0: @@ -637,8 +601,6 @@ define @vfmin_nxv1f64_vv( %a, %v } -declare @llvm.minimum.nxv2f64(, ) - define @vfmin_nxv2f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv2f64_vv: ; CHECK: # %bb.0: @@ -653,8 +615,6 @@ define @vfmin_nxv2f64_vv( %a, %v } -declare @llvm.minimum.nxv4f64(, ) - define @vfmin_nxv4f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv4f64_vv: ; CHECK: # %bb.0: @@ -669,8 +629,6 @@ define @vfmin_nxv4f64_vv( %a, %v } -declare @llvm.minimum.nxv8f64(, ) - define @vfmin_nxv8f64_vv( %a, %b) nounwind { ; CHECK-LABEL: vfmin_nxv8f64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll index dc2dec55c4a1a..86ed239e99373 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll @@ -14,8 +14,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.minimum.nxv1bf16(, , , i32) - define @vfmin_vv_nxv1bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -59,8 +57,6 @@ define @vfmin_vv_nxv1bf16_unmasked( % ret %v } -declare @llvm.vp.minimum.nxv2bf16(, , , i32) - define @vfmin_vv_nxv2bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -104,8 +100,6 @@ define @vfmin_vv_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.minimum.nxv4bf16(, , , i32) - define @vfmin_vv_nxv4bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -151,8 +145,6 @@ define @vfmin_vv_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.minimum.nxv8bf16(, , , i32) - define @vfmin_vv_nxv8bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -198,8 +190,6 @@ define @vfmin_vv_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.minimum.nxv16bf16(, , , i32) - define @vfmin_vv_nxv16bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -259,8 +249,6 @@ define @vfmin_vv_nxv16bf16_unmasked( %v } -declare @llvm.vp.minimum.nxv32bf16(, , , i32) - define @vfmin_vv_nxv32bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -498,7 +486,6 @@ define @vfmin_vv_nxv32bf16_unmasked( @llvm.vp.minimum.nxv32bf16( %va, %vb, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.minimum.nxv1f16(, , , i32) define @vfmin_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv1f16: @@ -566,8 +553,6 @@ define @vfmin_vv_nxv1f16_unmasked( %va, < ret %v } -declare @llvm.vp.minimum.nxv2f16(, , , i32) - define @vfmin_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -634,8 +619,6 @@ define @vfmin_vv_nxv2f16_unmasked( %va, < ret %v } -declare @llvm.vp.minimum.nxv4f16(, , , i32) - define @vfmin_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -704,8 +687,6 @@ define @vfmin_vv_nxv4f16_unmasked( %va, < ret %v } -declare @llvm.vp.minimum.nxv8f16(, , , i32) - define @vfmin_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -776,8 +757,6 @@ define @vfmin_vv_nxv8f16_unmasked( %va, < ret %v } -declare @llvm.vp.minimum.nxv16f16(, , , i32) - define @vfmin_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -862,8 +841,6 @@ define @vfmin_vv_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.minimum.nxv32f16(, , , i32) - define @vfmin_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -1143,8 +1120,6 @@ define @vfmin_vv_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.minimum.nxv1f32(, , , i32) - define @vfmin_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1f32: ; CHECK: # %bb.0: @@ -1176,8 +1151,6 @@ define @vfmin_vv_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.minimum.nxv2f32(, , , i32) - define @vfmin_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1209,8 +1182,6 @@ define @vfmin_vv_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.minimum.nxv4f32(, , , i32) - define @vfmin_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1244,8 +1215,6 @@ define @vfmin_vv_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.minimum.nxv8f32(, , , i32) - define @vfmin_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1279,8 +1248,6 @@ define @vfmin_vv_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.minimum.nxv1f64(, , , i32) - define @vfmin_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1312,8 +1279,6 @@ define @vfmin_vv_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.minimum.nxv2f64(, , , i32) - define @vfmin_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1347,8 +1312,6 @@ define @vfmin_vv_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.minimum.nxv4f64(, , , i32) - define @vfmin_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1382,8 +1345,6 @@ define @vfmin_vv_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.minimum.nxv8f64(, , , i32) - define @vfmin_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8f64: ; CHECK: # %bb.0: @@ -1433,8 +1394,6 @@ define @vfmin_vv_nxv8f64_unmasked( %v ret %v } -declare @llvm.vp.minimum.nxv16f64(, , , i32) - define @vfmin_vv_nxv16f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll index 409235f7e1b2c..6c5b6ff31a24b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s -declare @llvm.experimental.constrained.nearbyint.nxv1f16(, metadata, metadata) - define @nearbyint_nxv1f16( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv1f16: ; CHECK: # %bb.0: @@ -29,8 +27,6 @@ define @nearbyint_nxv1f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv2f16(, metadata, metadata) - define @nearbyint_nxv2f16( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv2f16: ; CHECK: # %bb.0: @@ -54,8 +50,6 @@ define @nearbyint_nxv2f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv4f16(, metadata, metadata) - define @nearbyint_nxv4f16( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv4f16: ; CHECK: # %bb.0: @@ -79,8 +73,6 @@ define @nearbyint_nxv4f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv8f16(, metadata, metadata) - define @nearbyint_nxv8f16( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv8f16: ; CHECK: # %bb.0: @@ -104,8 +96,6 @@ define @nearbyint_nxv8f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv16f16(, metadata, metadata) - define @nearbyint_nxv16f16( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv16f16: ; CHECK: # %bb.0: @@ -129,8 +119,6 @@ define @nearbyint_nxv16f16( %v) strictf ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv32f16(, metadata, metadata) - define @nearbyint_nxv32f16( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv32f16: ; CHECK: # %bb.0: @@ -154,8 +142,6 @@ define @nearbyint_nxv32f16( %v) strictf ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv1f32(, metadata, metadata) - define @nearbyint_nxv1f32( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv1f32: ; CHECK: # %bb.0: @@ -178,8 +164,6 @@ define @nearbyint_nxv1f32( %v) strictfp ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv2f32(, metadata, metadata) - define @nearbyint_nxv2f32( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv2f32: ; CHECK: # %bb.0: @@ -202,8 +186,6 @@ define @nearbyint_nxv2f32( %v) strictfp ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv4f32(, metadata, metadata) - define @nearbyint_nxv4f32( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv4f32: ; CHECK: # %bb.0: @@ -226,8 +208,6 @@ define @nearbyint_nxv4f32( %v) strictfp ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv8f32(, metadata, metadata) - define @nearbyint_nxv8f32( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv8f32: ; CHECK: # %bb.0: @@ -250,8 +230,6 @@ define @nearbyint_nxv8f32( %v) strictfp ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv16f32(, metadata, metadata) - define @nearbyint_nxv16f32( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv16f32: ; CHECK: # %bb.0: @@ -274,8 +252,6 @@ define @nearbyint_nxv16f32( %v) stric ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv1f64(, metadata, metadata) - define @nearbyint_nxv1f64( %v) strictfp { ; RV32-LABEL: nearbyint_nxv1f64: ; RV32: # %bb.0: @@ -317,8 +293,6 @@ define @nearbyint_nxv1f64( %v) strict ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv2f64(, metadata, metadata) - define @nearbyint_nxv2f64( %v) strictfp { ; RV32-LABEL: nearbyint_nxv2f64: ; RV32: # %bb.0: @@ -360,8 +334,6 @@ define @nearbyint_nxv2f64( %v) strict ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv4f64(, metadata, metadata) - define @nearbyint_nxv4f64( %v) strictfp { ; RV32-LABEL: nearbyint_nxv4f64: ; RV32: # %bb.0: @@ -403,8 +375,6 @@ define @nearbyint_nxv4f64( %v) strict ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv8f64(, metadata, metadata) - define @nearbyint_nxv8f64( %v) strictfp { ; RV32-LABEL: nearbyint_nxv8f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll index 97e65f4e4b53a..8bfc002fa629b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll @@ -202,7 +202,6 @@ define @nearbyint_nxv1f16( %x) { %a = call @llvm.nearbyint.nxv1f16( %x) ret %a } -declare @llvm.nearbyint.nxv1f16() define @nearbyint_nxv2f16( %x) { ; ZVFH-LABEL: nearbyint_nxv2f16: @@ -242,7 +241,6 @@ define @nearbyint_nxv2f16( %x) { %a = call @llvm.nearbyint.nxv2f16( %x) ret %a } -declare @llvm.nearbyint.nxv2f16() define @nearbyint_nxv4f16( %x) { ; ZVFH-LABEL: nearbyint_nxv4f16: @@ -282,7 +280,6 @@ define @nearbyint_nxv4f16( %x) { %a = call @llvm.nearbyint.nxv4f16( %x) ret %a } -declare @llvm.nearbyint.nxv4f16() define @nearbyint_nxv8f16( %x) { ; ZVFH-LABEL: nearbyint_nxv8f16: @@ -322,7 +319,6 @@ define @nearbyint_nxv8f16( %x) { %a = call @llvm.nearbyint.nxv8f16( %x) ret %a } -declare @llvm.nearbyint.nxv8f16() define @nearbyint_nxv16f16( %x) { ; ZVFH-LABEL: nearbyint_nxv16f16: @@ -362,7 +358,6 @@ define @nearbyint_nxv16f16( %x) { %a = call @llvm.nearbyint.nxv16f16( %x) ret %a } -declare @llvm.nearbyint.nxv16f16() define @nearbyint_nxv32f16( %x) { ; ZVFH-LABEL: nearbyint_nxv32f16: @@ -416,7 +411,6 @@ define @nearbyint_nxv32f16( %x) { %a = call @llvm.nearbyint.nxv32f16( %x) ret %a } -declare @llvm.nearbyint.nxv32f16() define @nearbyint_nxv1f32( %x) { ; CHECK-LABEL: nearbyint_nxv1f32: @@ -436,7 +430,6 @@ define @nearbyint_nxv1f32( %x) { %a = call @llvm.nearbyint.nxv1f32( %x) ret %a } -declare @llvm.nearbyint.nxv1f32() define @nearbyint_nxv2f32( %x) { ; CHECK-LABEL: nearbyint_nxv2f32: @@ -456,7 +449,6 @@ define @nearbyint_nxv2f32( %x) { %a = call @llvm.nearbyint.nxv2f32( %x) ret %a } -declare @llvm.nearbyint.nxv2f32() define @nearbyint_nxv4f32( %x) { ; CHECK-LABEL: nearbyint_nxv4f32: @@ -476,7 +468,6 @@ define @nearbyint_nxv4f32( %x) { %a = call @llvm.nearbyint.nxv4f32( %x) ret %a } -declare @llvm.nearbyint.nxv4f32() define @nearbyint_nxv8f32( %x) { ; CHECK-LABEL: nearbyint_nxv8f32: @@ -496,7 +487,6 @@ define @nearbyint_nxv8f32( %x) { %a = call @llvm.nearbyint.nxv8f32( %x) ret %a } -declare @llvm.nearbyint.nxv8f32() define @nearbyint_nxv16f32( %x) { ; CHECK-LABEL: nearbyint_nxv16f32: @@ -516,7 +506,6 @@ define @nearbyint_nxv16f32( %x) { %a = call @llvm.nearbyint.nxv16f32( %x) ret %a } -declare @llvm.nearbyint.nxv16f32() define @nearbyint_nxv1f64( %x) { ; RV32ZVFH-LABEL: nearbyint_nxv1f64: @@ -583,7 +572,6 @@ define @nearbyint_nxv1f64( %x) { %a = call @llvm.nearbyint.nxv1f64( %x) ret %a } -declare @llvm.nearbyint.nxv1f64() define @nearbyint_nxv2f64( %x) { ; RV32ZVFH-LABEL: nearbyint_nxv2f64: @@ -650,7 +638,6 @@ define @nearbyint_nxv2f64( %x) { %a = call @llvm.nearbyint.nxv2f64( %x) ret %a } -declare @llvm.nearbyint.nxv2f64() define @nearbyint_nxv4f64( %x) { ; RV32ZVFH-LABEL: nearbyint_nxv4f64: @@ -717,7 +704,6 @@ define @nearbyint_nxv4f64( %x) { %a = call @llvm.nearbyint.nxv4f64( %x) ret %a } -declare @llvm.nearbyint.nxv4f64() define @nearbyint_nxv8f64( %x) { ; RV32ZVFH-LABEL: nearbyint_nxv8f64: @@ -784,4 +770,3 @@ define @nearbyint_nxv8f64( %x) { %a = call @llvm.nearbyint.nxv8f64( %x) ret %a } -declare @llvm.nearbyint.nxv8f64() diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll index 143545ccfd4f6..665ae1960affd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll @@ -324,25 +324,6 @@ entry: ret float %res } -; Function Attrs: nofree nosync nounwind readnone willreturn -declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>) -declare i64 @llvm.vector.reduce.and.v4i64(<4 x i64>) -declare i64 @llvm.vector.reduce.or.v4i64(<4 x i64>) -declare i64 @llvm.vector.reduce.xor.v4i64(<4 x i64>) -declare i64 @llvm.vector.reduce.umax.v4i64(<4 x i64>) -declare i64 @llvm.vector.reduce.umin.v4i64(<4 x i64>) -declare i64 @llvm.vector.reduce.smax.v4i64(<4 x i64>) -declare i64 @llvm.vector.reduce.smin.v4i64(<4 x i64>) -declare float @llvm.vector.reduce.fadd.v4f32(float, <4 x float>) -declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>) -declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>) -declare i64 @llvm.umax.i64(i64, i64) -declare i64 @llvm.umin.i64(i64, i64) -declare i64 @llvm.smax.i64(i64, i64) -declare i64 @llvm.smin.i64(i64, i64) -declare float @llvm.maxnum.f32(float ,float) -declare float @llvm.minnum.f32(float ,float) - define void @crash(<2 x i32> %0) { ; CHECK-LABEL: crash: ; CHECK: # %bb.0: # %entry @@ -364,7 +345,6 @@ entry: store i8 %conv18.us, ptr null, align 1 ret void } -declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) define i64 @op_then_reduce(<4 x i64> %v, <4 x i64> %v2) { ; CHECK-LABEL: op_then_reduce: @@ -382,7 +362,6 @@ entry: ret i64 %res } - define i64 @two_reduce_scalar_bypass(<4 x i64> %v, <4 x i64> %v2) { ; CHECK-LABEL: two_reduce_scalar_bypass: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-vp-fadd-and-vp-fmul.ll b/llvm/test/CodeGen/RISCV/rvv/fold-vp-fadd-and-vp-fmul.ll index 1d4d554d3a47d..9e0bf5f6f5261 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fold-vp-fadd-and-vp-fmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fold-vp-fadd-and-vp-fmul.ll @@ -1,9 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fmul.nxv1f64( %x, %y, %m, i32 %vl) -declare @llvm.vp.fadd.nxv1f64( %x, %y, %m, i32 %vl) - ; (fadd (fmul x, y), z)) -> (fma x, y, z) define @fma( %x, %y, %z, %m, i32 zeroext %vl) { ; CHECK-LABEL: fma: diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-vp-fsub-and-vp-fmul.ll b/llvm/test/CodeGen/RISCV/rvv/fold-vp-fsub-and-vp-fmul.ll index ab9adda516c07..de6ada6810bd8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fold-vp-fsub-and-vp-fmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fold-vp-fsub-and-vp-fmul.ll @@ -1,10 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fmul.nxv1f64( %x, %y, %m, i32 %vl) -declare @llvm.vp.fsub.nxv1f64( %x, %y, %m, i32 %vl) -declare @llvm.vp.fneg.nxv1f64( %x, %m, i32 %vl) - ; (fsub (fmul x, y), z)) -> (fma x, y, (fneg z)) define @test1( %x, %y, %z, %m, i32 zeroext %vl) { ; CHECK-LABEL: test1: diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll index f597762521006..2add54fa35539 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll @@ -3569,8 +3569,6 @@ entry: ret <2 x i64> %conv6 } - - ; i32 saturate define <2 x i32> @stest_f64i32_mm(<2 x double> %x) { @@ -7214,21 +7212,3 @@ entry: ret <4 x i32> %spec.store.select7 } -declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>) -declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>) -declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>) -declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>) -declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>) -declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>) -declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>) -declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>) -declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>) -declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>) -declare <2 x i128> @llvm.smin.v2i128(<2 x i128>, <2 x i128>) -declare <2 x i128> @llvm.smax.v2i128(<2 x i128>, <2 x i128>) -declare <2 x i128> @llvm.umin.v2i128(<2 x i128>, <2 x i128>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll index bc45671077106..c09d38d3347b3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll @@ -6,14 +6,6 @@ ; Float -declare @llvm.fptosi.sat.nxv2f32.nxv2i32() -declare @llvm.fptosi.sat.nxv4f32.nxv4i32() -declare @llvm.fptosi.sat.nxv8f32.nxv8i32() -declare @llvm.fptosi.sat.nxv4f32.nxv4i16() -declare @llvm.fptosi.sat.nxv8f32.nxv8i16() -declare @llvm.fptosi.sat.nxv2f32.nxv2i64() -declare @llvm.fptosi.sat.nxv4f32.nxv4i64() - define @test_signed_v2f32_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f32_v2i32: ; CHECK: # %bb.0: @@ -104,14 +96,6 @@ define @test_signed_v4f32_v4i64( %f) { ; Double -declare @llvm.fptosi.sat.nxv2f64.nxv2i32() -declare @llvm.fptosi.sat.nxv4f64.nxv4i32() -declare @llvm.fptosi.sat.nxv8f64.nxv8i32() -declare @llvm.fptosi.sat.nxv4f64.nxv4i16() -declare @llvm.fptosi.sat.nxv8f64.nxv8i16() -declare @llvm.fptosi.sat.nxv2f64.nxv2i64() -declare @llvm.fptosi.sat.nxv4f64.nxv4i64() - define @test_signed_v2f64_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f64_v2i32: ; CHECK: # %bb.0: @@ -205,17 +189,8 @@ define @test_signed_v4f64_v4i64( %f) { ret %x } - ; half -declare @llvm.fptosi.sat.nxv2f16.nxv2i32() -declare @llvm.fptosi.sat.nxv4f16.nxv4i32() -declare @llvm.fptosi.sat.nxv8f16.nxv8i32() -declare @llvm.fptosi.sat.nxv4f16.nxv4i16() -declare @llvm.fptosi.sat.nxv8f16.nxv8i16() -declare @llvm.fptosi.sat.nxv2f16.nxv2i64() -declare @llvm.fptosi.sat.nxv4f16.nxv4i64() - define @test_signed_v2f16_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f16_v2i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll index bb5ad6ba9d88a..ff7b81dbf61fa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll @@ -6,14 +6,6 @@ ; Float -declare @llvm.fptoui.sat.nxv2f32.nxv2i32() -declare @llvm.fptoui.sat.nxv4f32.nxv4i32() -declare @llvm.fptoui.sat.nxv8f32.nxv8i32() -declare @llvm.fptoui.sat.nxv4f32.nxv4i16() -declare @llvm.fptoui.sat.nxv8f32.nxv8i16() -declare @llvm.fptoui.sat.nxv2f32.nxv2i64() -declare @llvm.fptoui.sat.nxv4f32.nxv4i64() - define @test_signed_v2f32_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f32_v2i32: ; CHECK: # %bb.0: @@ -104,14 +96,6 @@ define @test_signed_v4f32_v4i64( %f) { ; Double -declare @llvm.fptoui.sat.nxv2f64.nxv2i32() -declare @llvm.fptoui.sat.nxv4f64.nxv4i32() -declare @llvm.fptoui.sat.nxv8f64.nxv8i32() -declare @llvm.fptoui.sat.nxv4f64.nxv4i16() -declare @llvm.fptoui.sat.nxv8f64.nxv8i16() -declare @llvm.fptoui.sat.nxv2f64.nxv2i64() -declare @llvm.fptoui.sat.nxv4f64.nxv4i64() - define @test_signed_v2f64_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f64_v2i32: ; CHECK: # %bb.0: @@ -205,17 +189,8 @@ define @test_signed_v4f64_v4i64( %f) { ret %x } - ; half -declare @llvm.fptoui.sat.nxv2f16.nxv2i32() -declare @llvm.fptoui.sat.nxv4f16.nxv4i32() -declare @llvm.fptoui.sat.nxv8f16.nxv8i32() -declare @llvm.fptoui.sat.nxv4f16.nxv4i16() -declare @llvm.fptoui.sat.nxv8f16.nxv8i16() -declare @llvm.fptoui.sat.nxv2f16.nxv2i64() -declare @llvm.fptoui.sat.nxv4f16.nxv4i64() - define @test_signed_v2f16_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f16_v2i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll index 5c592dd1a2d68..b1b8aac29b058 100644 --- a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll @@ -5,11 +5,6 @@ ; Done as a MIR test because eliminateFrameIndex will likely turn it ; back into an addi. -declare void @llvm.riscv.vse.nxv1i64( - , - ptr, - i64); - define i64 @test( %0) nounwind { ; CHECK-LABEL: name: test ; CHECK: bb.0.entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll index 5ed921d39590d..e7a856855c505 100644 --- a/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll @@ -184,7 +184,6 @@ define @rint_nxv1f16( %x) { %a = call @llvm.rint.nxv1f16( %x) ret %a } -declare @llvm.rint.nxv1f16() define @rint_nxv2f16( %x) { ; ZVFH-LABEL: rint_nxv2f16: @@ -220,7 +219,6 @@ define @rint_nxv2f16( %x) { %a = call @llvm.rint.nxv2f16( %x) ret %a } -declare @llvm.rint.nxv2f16() define @rint_nxv4f16( %x) { ; ZVFH-LABEL: rint_nxv4f16: @@ -256,7 +254,6 @@ define @rint_nxv4f16( %x) { %a = call @llvm.rint.nxv4f16( %x) ret %a } -declare @llvm.rint.nxv4f16() define @rint_nxv8f16( %x) { ; ZVFH-LABEL: rint_nxv8f16: @@ -292,7 +289,6 @@ define @rint_nxv8f16( %x) { %a = call @llvm.rint.nxv8f16( %x) ret %a } -declare @llvm.rint.nxv8f16() define @rint_nxv16f16( %x) { ; ZVFH-LABEL: rint_nxv16f16: @@ -328,7 +324,6 @@ define @rint_nxv16f16( %x) { %a = call @llvm.rint.nxv16f16( %x) ret %a } -declare @llvm.rint.nxv16f16() define @rint_nxv32f16( %x) { ; ZVFH-LABEL: rint_nxv32f16: @@ -376,7 +371,6 @@ define @rint_nxv32f16( %x) { %a = call @llvm.rint.nxv32f16( %x) ret %a } -declare @llvm.rint.nxv32f16() define @rint_nxv1f32( %x) { ; CHECK-LABEL: rint_nxv1f32: @@ -394,7 +388,6 @@ define @rint_nxv1f32( %x) { %a = call @llvm.rint.nxv1f32( %x) ret %a } -declare @llvm.rint.nxv1f32() define @rint_nxv2f32( %x) { ; CHECK-LABEL: rint_nxv2f32: @@ -412,7 +405,6 @@ define @rint_nxv2f32( %x) { %a = call @llvm.rint.nxv2f32( %x) ret %a } -declare @llvm.rint.nxv2f32() define @rint_nxv4f32( %x) { ; CHECK-LABEL: rint_nxv4f32: @@ -430,7 +422,6 @@ define @rint_nxv4f32( %x) { %a = call @llvm.rint.nxv4f32( %x) ret %a } -declare @llvm.rint.nxv4f32() define @rint_nxv8f32( %x) { ; CHECK-LABEL: rint_nxv8f32: @@ -448,7 +439,6 @@ define @rint_nxv8f32( %x) { %a = call @llvm.rint.nxv8f32( %x) ret %a } -declare @llvm.rint.nxv8f32() define @rint_nxv16f32( %x) { ; CHECK-LABEL: rint_nxv16f32: @@ -466,7 +456,6 @@ define @rint_nxv16f32( %x) { %a = call @llvm.rint.nxv16f32( %x) ret %a } -declare @llvm.rint.nxv16f32() define @rint_nxv1f64( %x) { ; RV32ZVFH-LABEL: rint_nxv1f64: @@ -525,7 +514,6 @@ define @rint_nxv1f64( %x) { %a = call @llvm.rint.nxv1f64( %x) ret %a } -declare @llvm.rint.nxv1f64() define @rint_nxv2f64( %x) { ; RV32ZVFH-LABEL: rint_nxv2f64: @@ -584,7 +572,6 @@ define @rint_nxv2f64( %x) { %a = call @llvm.rint.nxv2f64( %x) ret %a } -declare @llvm.rint.nxv2f64() define @rint_nxv4f64( %x) { ; RV32ZVFH-LABEL: rint_nxv4f64: @@ -643,7 +630,6 @@ define @rint_nxv4f64( %x) { %a = call @llvm.rint.nxv4f64( %x) ret %a } -declare @llvm.rint.nxv4f64() define @rint_nxv8f64( %x) { ; RV32ZVFH-LABEL: rint_nxv8f64: @@ -702,4 +688,3 @@ define @rint_nxv8f64( %x) { %a = call @llvm.rint.nxv8f64( %x) ret %a } -declare @llvm.rint.nxv8f64() diff --git a/llvm/test/CodeGen/RISCV/rvv/frm-insert.ll b/llvm/test/CodeGen/RISCV/rvv/frm-insert.ll index e5c5a83e9b2fd..c057d80a36be3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/frm-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/frm-insert.ll @@ -3,12 +3,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -target-abi=lp64d \ ; RUN: -riscv-disable-frm-insert-opt < %s | FileCheck %s --check-prefix=UNOPT -declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( - , - , - , - i64, i64) - ; Test only save/restore frm once. define @test( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: test: @@ -452,7 +446,6 @@ entry: ; Test restoring frm before reading frm and doing nothing with following ; dynamic rounding mode operations. ; TODO: The frrm could be elided. -declare i32 @llvm.get.rounding() define @test5( %0, %1, i64 %2, ptr %p) nounwind { ; CHECK-LABEL: test5: ; CHECK: # %bb.0: # %entry @@ -502,7 +495,6 @@ entry: } ; Test not set FRM for vfadd with DYN after WriteFRMImm. -declare void @llvm.set.rounding(i32) define @after_fsrm1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: after_fsrm1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll index 295c264e7d924..91897ef7fbac3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll @@ -28,7 +28,6 @@ define @round_nxv1f16( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv1f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv1f16(, metadata) define @round_nxv2f16( %x) strictfp { ; CHECK-LABEL: round_nxv2f16: @@ -52,7 +51,6 @@ define @round_nxv2f16( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv2f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv2f16(, metadata) define @round_nxv4f16( %x) strictfp { ; CHECK-LABEL: round_nxv4f16: @@ -76,7 +74,6 @@ define @round_nxv4f16( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv4f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv4f16(, metadata) define @round_nxv8f16( %x) strictfp { ; CHECK-LABEL: round_nxv8f16: @@ -100,7 +97,6 @@ define @round_nxv8f16( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv8f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv8f16(, metadata) define @round_nxv16f16( %x) strictfp { ; CHECK-LABEL: round_nxv16f16: @@ -124,7 +120,6 @@ define @round_nxv16f16( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv16f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv16f16(, metadata) define @round_nxv32f16( %x) strictfp { ; CHECK-LABEL: round_nxv32f16: @@ -148,7 +143,6 @@ define @round_nxv32f16( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv32f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv32f16(, metadata) define @round_nxv1f32( %x) strictfp { ; CHECK-LABEL: round_nxv1f32: @@ -171,7 +165,6 @@ define @round_nxv1f32( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv1f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv1f32(, metadata) define @round_nxv2f32( %x) strictfp { ; CHECK-LABEL: round_nxv2f32: @@ -194,7 +187,6 @@ define @round_nxv2f32( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv2f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv2f32(, metadata) define @round_nxv4f32( %x) strictfp { ; CHECK-LABEL: round_nxv4f32: @@ -217,7 +209,6 @@ define @round_nxv4f32( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv4f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv4f32(, metadata) define @round_nxv8f32( %x) strictfp { ; CHECK-LABEL: round_nxv8f32: @@ -240,7 +231,6 @@ define @round_nxv8f32( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv8f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv8f32(, metadata) define @round_nxv16f32( %x) strictfp { ; CHECK-LABEL: round_nxv16f32: @@ -263,7 +253,6 @@ define @round_nxv16f32( %x) strictfp %a = call @llvm.experimental.constrained.round.nxv16f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv16f32(, metadata) define @round_nxv1f64( %x) strictfp { ; RV32-LABEL: round_nxv1f64: @@ -305,7 +294,6 @@ define @round_nxv1f64( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv1f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv1f64(, metadata) define @round_nxv2f64( %x) strictfp { ; RV32-LABEL: round_nxv2f64: @@ -347,7 +335,6 @@ define @round_nxv2f64( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv2f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv2f64(, metadata) define @round_nxv4f64( %x) strictfp { ; RV32-LABEL: round_nxv4f64: @@ -389,7 +376,6 @@ define @round_nxv4f64( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv4f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv4f64(, metadata) define @round_nxv8f64( %x) strictfp { ; RV32-LABEL: round_nxv8f64: @@ -431,4 +417,3 @@ define @round_nxv8f64( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv8f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv8f64(, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll index d420636a573fe..0ebc2a82bd828 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll @@ -204,7 +204,6 @@ define @round_nxv1f16( %x) { %a = call @llvm.round.nxv1f16( %x) ret %a } -declare @llvm.round.nxv1f16() define @round_nxv2f16( %x) { ; ZVFH-LABEL: round_nxv2f16: @@ -244,7 +243,6 @@ define @round_nxv2f16( %x) { %a = call @llvm.round.nxv2f16( %x) ret %a } -declare @llvm.round.nxv2f16() define @round_nxv4f16( %x) { ; ZVFH-LABEL: round_nxv4f16: @@ -284,7 +282,6 @@ define @round_nxv4f16( %x) { %a = call @llvm.round.nxv4f16( %x) ret %a } -declare @llvm.round.nxv4f16() define @round_nxv8f16( %x) { ; ZVFH-LABEL: round_nxv8f16: @@ -324,7 +321,6 @@ define @round_nxv8f16( %x) { %a = call @llvm.round.nxv8f16( %x) ret %a } -declare @llvm.round.nxv8f16() define @round_nxv16f16( %x) { ; ZVFH-LABEL: round_nxv16f16: @@ -364,7 +360,6 @@ define @round_nxv16f16( %x) { %a = call @llvm.round.nxv16f16( %x) ret %a } -declare @llvm.round.nxv16f16() define @round_nxv32f16( %x) { ; ZVFH-LABEL: round_nxv32f16: @@ -418,7 +413,6 @@ define @round_nxv32f16( %x) { %a = call @llvm.round.nxv32f16( %x) ret %a } -declare @llvm.round.nxv32f16() define @round_nxv1f32( %x) { ; CHECK-LABEL: round_nxv1f32: @@ -438,7 +432,6 @@ define @round_nxv1f32( %x) { %a = call @llvm.round.nxv1f32( %x) ret %a } -declare @llvm.round.nxv1f32() define @round_nxv2f32( %x) { ; CHECK-LABEL: round_nxv2f32: @@ -458,7 +451,6 @@ define @round_nxv2f32( %x) { %a = call @llvm.round.nxv2f32( %x) ret %a } -declare @llvm.round.nxv2f32() define @round_nxv4f32( %x) { ; CHECK-LABEL: round_nxv4f32: @@ -478,7 +470,6 @@ define @round_nxv4f32( %x) { %a = call @llvm.round.nxv4f32( %x) ret %a } -declare @llvm.round.nxv4f32() define @round_nxv8f32( %x) { ; CHECK-LABEL: round_nxv8f32: @@ -498,7 +489,6 @@ define @round_nxv8f32( %x) { %a = call @llvm.round.nxv8f32( %x) ret %a } -declare @llvm.round.nxv8f32() define @round_nxv16f32( %x) { ; CHECK-LABEL: round_nxv16f32: @@ -518,7 +508,6 @@ define @round_nxv16f32( %x) { %a = call @llvm.round.nxv16f32( %x) ret %a } -declare @llvm.round.nxv16f32() define @round_nxv1f64( %x) { ; RV32ZVFH-LABEL: round_nxv1f64: @@ -585,7 +574,6 @@ define @round_nxv1f64( %x) { %a = call @llvm.round.nxv1f64( %x) ret %a } -declare @llvm.round.nxv1f64() define @round_nxv2f64( %x) { ; RV32ZVFH-LABEL: round_nxv2f64: @@ -652,7 +640,6 @@ define @round_nxv2f64( %x) { %a = call @llvm.round.nxv2f64( %x) ret %a } -declare @llvm.round.nxv2f64() define @round_nxv4f64( %x) { ; RV32ZVFH-LABEL: round_nxv4f64: @@ -719,7 +706,6 @@ define @round_nxv4f64( %x) { %a = call @llvm.round.nxv4f64( %x) ret %a } -declare @llvm.round.nxv4f64() define @round_nxv8f64( %x) { ; RV32ZVFH-LABEL: round_nxv8f64: @@ -786,4 +772,3 @@ define @round_nxv8f64( %x) { %a = call @llvm.round.nxv8f64( %x) ret %a } -declare @llvm.round.nxv8f64() diff --git a/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll index de766895c734f..cd9d124e4b08c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll @@ -28,7 +28,6 @@ define @roundeven_nxv1f16( %x) strictfp { %a = call @llvm.experimental.constrained.roundeven.nxv1f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv1f16(, metadata) define @roundeven_nxv2f16( %x) strictfp { ; CHECK-LABEL: roundeven_nxv2f16: @@ -52,7 +51,6 @@ define @roundeven_nxv2f16( %x) strictfp { %a = call @llvm.experimental.constrained.roundeven.nxv2f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv2f16(, metadata) define @roundeven_nxv4f16( %x) strictfp { ; CHECK-LABEL: roundeven_nxv4f16: @@ -76,7 +74,6 @@ define @roundeven_nxv4f16( %x) strictfp { %a = call @llvm.experimental.constrained.roundeven.nxv4f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv4f16(, metadata) define @roundeven_nxv8f16( %x) strictfp { ; CHECK-LABEL: roundeven_nxv8f16: @@ -100,7 +97,6 @@ define @roundeven_nxv8f16( %x) strictfp { %a = call @llvm.experimental.constrained.roundeven.nxv8f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv8f16(, metadata) define @roundeven_nxv16f16( %x) strictfp { ; CHECK-LABEL: roundeven_nxv16f16: @@ -124,7 +120,6 @@ define @roundeven_nxv16f16( %x) strictf %a = call @llvm.experimental.constrained.roundeven.nxv16f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv16f16(, metadata) define @roundeven_nxv32f16( %x) strictfp { ; CHECK-LABEL: roundeven_nxv32f16: @@ -148,7 +143,6 @@ define @roundeven_nxv32f16( %x) strictf %a = call @llvm.experimental.constrained.roundeven.nxv32f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv32f16(, metadata) define @roundeven_nxv1f32( %x) strictfp { ; CHECK-LABEL: roundeven_nxv1f32: @@ -171,7 +165,6 @@ define @roundeven_nxv1f32( %x) strictfp %a = call @llvm.experimental.constrained.roundeven.nxv1f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv1f32(, metadata) define @roundeven_nxv2f32( %x) strictfp { ; CHECK-LABEL: roundeven_nxv2f32: @@ -194,7 +187,6 @@ define @roundeven_nxv2f32( %x) strictfp %a = call @llvm.experimental.constrained.roundeven.nxv2f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv2f32(, metadata) define @roundeven_nxv4f32( %x) strictfp { ; CHECK-LABEL: roundeven_nxv4f32: @@ -217,7 +209,6 @@ define @roundeven_nxv4f32( %x) strictfp %a = call @llvm.experimental.constrained.roundeven.nxv4f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv4f32(, metadata) define @roundeven_nxv8f32( %x) strictfp { ; CHECK-LABEL: roundeven_nxv8f32: @@ -240,7 +231,6 @@ define @roundeven_nxv8f32( %x) strictfp %a = call @llvm.experimental.constrained.roundeven.nxv8f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv8f32(, metadata) define @roundeven_nxv16f32( %x) strictfp { ; CHECK-LABEL: roundeven_nxv16f32: @@ -263,7 +253,6 @@ define @roundeven_nxv16f32( %x) stric %a = call @llvm.experimental.constrained.roundeven.nxv16f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv16f32(, metadata) define @roundeven_nxv1f64( %x) strictfp { ; RV32-LABEL: roundeven_nxv1f64: @@ -305,7 +294,6 @@ define @roundeven_nxv1f64( %x) strict %a = call @llvm.experimental.constrained.roundeven.nxv1f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv1f64(, metadata) define @roundeven_nxv2f64( %x) strictfp { ; RV32-LABEL: roundeven_nxv2f64: @@ -347,7 +335,6 @@ define @roundeven_nxv2f64( %x) strict %a = call @llvm.experimental.constrained.roundeven.nxv2f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv2f64(, metadata) define @roundeven_nxv4f64( %x) strictfp { ; RV32-LABEL: roundeven_nxv4f64: @@ -389,7 +376,6 @@ define @roundeven_nxv4f64( %x) strict %a = call @llvm.experimental.constrained.roundeven.nxv4f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv4f64(, metadata) define @roundeven_nxv8f64( %x) strictfp { ; RV32-LABEL: roundeven_nxv8f64: @@ -431,4 +417,3 @@ define @roundeven_nxv8f64( %x) strict %a = call @llvm.experimental.constrained.roundeven.nxv8f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv8f64(, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll index b9121c55684ee..5991f1a5cecfa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll @@ -203,7 +203,6 @@ define @roundeven_nxv1f16( %x) { %a = call @llvm.roundeven.nxv1f16( %x) ret %a } -declare @llvm.roundeven.nxv1f16() define @roundeven_nxv2f16( %x) { ; ZVFH-LABEL: roundeven_nxv2f16: @@ -243,7 +242,6 @@ define @roundeven_nxv2f16( %x) { %a = call @llvm.roundeven.nxv2f16( %x) ret %a } -declare @llvm.roundeven.nxv2f16() define @roundeven_nxv4f16( %x) { ; ZVFH-LABEL: roundeven_nxv4f16: @@ -283,7 +281,6 @@ define @roundeven_nxv4f16( %x) { %a = call @llvm.roundeven.nxv4f16( %x) ret %a } -declare @llvm.roundeven.nxv4f16() define @roundeven_nxv8f16( %x) { ; ZVFH-LABEL: roundeven_nxv8f16: @@ -323,7 +320,6 @@ define @roundeven_nxv8f16( %x) { %a = call @llvm.roundeven.nxv8f16( %x) ret %a } -declare @llvm.roundeven.nxv8f16() define @roundeven_nxv16f16( %x) { ; ZVFH-LABEL: roundeven_nxv16f16: @@ -363,7 +359,6 @@ define @roundeven_nxv16f16( %x) { %a = call @llvm.roundeven.nxv16f16( %x) ret %a } -declare @llvm.roundeven.nxv16f16() define @roundeven_nxv32f16( %x) { ; ZVFH-LABEL: roundeven_nxv32f16: @@ -417,7 +412,6 @@ define @roundeven_nxv32f16( %x) { %a = call @llvm.roundeven.nxv32f16( %x) ret %a } -declare @llvm.roundeven.nxv32f16() define @roundeven_nxv1f32( %x) { ; CHECK-LABEL: roundeven_nxv1f32: @@ -437,7 +431,6 @@ define @roundeven_nxv1f32( %x) { %a = call @llvm.roundeven.nxv1f32( %x) ret %a } -declare @llvm.roundeven.nxv1f32() define @roundeven_nxv2f32( %x) { ; CHECK-LABEL: roundeven_nxv2f32: @@ -457,7 +450,6 @@ define @roundeven_nxv2f32( %x) { %a = call @llvm.roundeven.nxv2f32( %x) ret %a } -declare @llvm.roundeven.nxv2f32() define @roundeven_nxv4f32( %x) { ; CHECK-LABEL: roundeven_nxv4f32: @@ -477,7 +469,6 @@ define @roundeven_nxv4f32( %x) { %a = call @llvm.roundeven.nxv4f32( %x) ret %a } -declare @llvm.roundeven.nxv4f32() define @roundeven_nxv8f32( %x) { ; CHECK-LABEL: roundeven_nxv8f32: @@ -497,7 +488,6 @@ define @roundeven_nxv8f32( %x) { %a = call @llvm.roundeven.nxv8f32( %x) ret %a } -declare @llvm.roundeven.nxv8f32() define @roundeven_nxv16f32( %x) { ; CHECK-LABEL: roundeven_nxv16f32: @@ -517,7 +507,6 @@ define @roundeven_nxv16f32( %x) { %a = call @llvm.roundeven.nxv16f32( %x) ret %a } -declare @llvm.roundeven.nxv16f32() define @roundeven_nxv1f64( %x) { ; RV32ZVFH-LABEL: roundeven_nxv1f64: @@ -584,7 +573,6 @@ define @roundeven_nxv1f64( %x) { %a = call @llvm.roundeven.nxv1f64( %x) ret %a } -declare @llvm.roundeven.nxv1f64() define @roundeven_nxv2f64( %x) { ; RV32ZVFH-LABEL: roundeven_nxv2f64: @@ -651,7 +639,6 @@ define @roundeven_nxv2f64( %x) { %a = call @llvm.roundeven.nxv2f64( %x) ret %a } -declare @llvm.roundeven.nxv2f64() define @roundeven_nxv4f64( %x) { ; RV32ZVFH-LABEL: roundeven_nxv4f64: @@ -718,7 +705,6 @@ define @roundeven_nxv4f64( %x) { %a = call @llvm.roundeven.nxv4f64( %x) ret %a } -declare @llvm.roundeven.nxv4f64() define @roundeven_nxv8f64( %x) { ; RV32ZVFH-LABEL: roundeven_nxv8f64: @@ -785,4 +771,3 @@ define @roundeven_nxv8f64( %x) { %a = call @llvm.roundeven.nxv8f64( %x) ret %a } -declare @llvm.roundeven.nxv8f64() diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll index 352fda91ab9fa..736dd1225da88 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll @@ -2,7 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fshr.nxv1i8(, , , , i32) define @fshr_v1i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v1i8: ; CHECK: # %bb.0: @@ -19,7 +18,6 @@ define @fshr_v1i8( %a, %b, ret %res } -declare @llvm.vp.fshl.nxv1i8(, , , , i32) define @fshl_v1i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v1i8: ; CHECK: # %bb.0: @@ -36,7 +34,6 @@ define @fshl_v1i8( %a, %b, ret %res } -declare @llvm.vp.fshr.nxv2i8(, , , , i32) define @fshr_v2i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v2i8: ; CHECK: # %bb.0: @@ -53,7 +50,6 @@ define @fshr_v2i8( %a, %b, ret %res } -declare @llvm.vp.fshl.nxv2i8(, , , , i32) define @fshl_v2i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v2i8: ; CHECK: # %bb.0: @@ -70,7 +66,6 @@ define @fshl_v2i8( %a, %b, ret %res } -declare @llvm.vp.fshr.nxv4i8(, , , , i32) define @fshr_v4i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v4i8: ; CHECK: # %bb.0: @@ -87,7 +82,6 @@ define @fshr_v4i8( %a, %b, ret %res } -declare @llvm.vp.fshl.nxv4i8(, , , , i32) define @fshl_v4i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v4i8: ; CHECK: # %bb.0: @@ -104,7 +98,6 @@ define @fshl_v4i8( %a, %b, ret %res } -declare @llvm.vp.fshr.nxv8i8(, , , , i32) define @fshr_v8i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v8i8: ; CHECK: # %bb.0: @@ -121,7 +114,6 @@ define @fshr_v8i8( %a, %b, ret %res } -declare @llvm.vp.fshl.nxv8i8(, , , , i32) define @fshl_v8i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v8i8: ; CHECK: # %bb.0: @@ -138,7 +130,6 @@ define @fshl_v8i8( %a, %b, ret %res } -declare @llvm.vp.fshr.nxv16i8(, , , , i32) define @fshr_v16i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v16i8: ; CHECK: # %bb.0: @@ -155,7 +146,6 @@ define @fshr_v16i8( %a, ret %res } -declare @llvm.vp.fshl.nxv16i8(, , , , i32) define @fshl_v16i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v16i8: ; CHECK: # %bb.0: @@ -172,7 +162,6 @@ define @fshl_v16i8( %a, ret %res } -declare @llvm.vp.fshr.nxv32i8(, , , , i32) define @fshr_v32i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v32i8: ; CHECK: # %bb.0: @@ -189,7 +178,6 @@ define @fshr_v32i8( %a, ret %res } -declare @llvm.vp.fshl.nxv32i8(, , , , i32) define @fshl_v32i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v32i8: ; CHECK: # %bb.0: @@ -206,7 +194,6 @@ define @fshl_v32i8( %a, ret %res } -declare @llvm.vp.fshr.nxv64i8(, , , , i32) define @fshr_v64i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v64i8: ; CHECK: # %bb.0: @@ -240,7 +227,6 @@ define @fshr_v64i8( %a, ret %res } -declare @llvm.vp.fshl.nxv64i8(, , , , i32) define @fshl_v64i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v64i8: ; CHECK: # %bb.0: @@ -274,7 +260,6 @@ define @fshl_v64i8( %a, ret %res } -declare @llvm.vp.fshr.nxv1i16(, , , , i32) define @fshr_v1i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v1i16: ; CHECK: # %bb.0: @@ -291,7 +276,6 @@ define @fshr_v1i16( %a, ret %res } -declare @llvm.vp.fshl.nxv1i16(, , , , i32) define @fshl_v1i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v1i16: ; CHECK: # %bb.0: @@ -308,7 +292,6 @@ define @fshl_v1i16( %a, ret %res } -declare @llvm.vp.fshr.nxv2i16(, , , , i32) define @fshr_v2i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v2i16: ; CHECK: # %bb.0: @@ -325,7 +308,6 @@ define @fshr_v2i16( %a, ret %res } -declare @llvm.vp.fshl.nxv2i16(, , , , i32) define @fshl_v2i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v2i16: ; CHECK: # %bb.0: @@ -342,7 +324,6 @@ define @fshl_v2i16( %a, ret %res } -declare @llvm.vp.fshr.nxv4i16(, , , , i32) define @fshr_v4i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v4i16: ; CHECK: # %bb.0: @@ -359,7 +340,6 @@ define @fshr_v4i16( %a, ret %res } -declare @llvm.vp.fshl.nxv4i16(, , , , i32) define @fshl_v4i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v4i16: ; CHECK: # %bb.0: @@ -376,7 +356,6 @@ define @fshl_v4i16( %a, ret %res } -declare @llvm.vp.fshr.nxv8i16(, , , , i32) define @fshr_v8i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v8i16: ; CHECK: # %bb.0: @@ -393,7 +372,6 @@ define @fshr_v8i16( %a, ret %res } -declare @llvm.vp.fshl.nxv8i16(, , , , i32) define @fshl_v8i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v8i16: ; CHECK: # %bb.0: @@ -410,7 +388,6 @@ define @fshl_v8i16( %a, ret %res } -declare @llvm.vp.fshr.nxv16i16(, , , , i32) define @fshr_v16i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v16i16: ; CHECK: # %bb.0: @@ -427,7 +404,6 @@ define @fshr_v16i16( %a, %res } -declare @llvm.vp.fshl.nxv16i16(, , , , i32) define @fshl_v16i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v16i16: ; CHECK: # %bb.0: @@ -444,7 +420,6 @@ define @fshl_v16i16( %a, %res } -declare @llvm.vp.fshr.nxv32i16(, , , , i32) define @fshr_v32i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v32i16: ; CHECK: # %bb.0: @@ -478,7 +453,6 @@ define @fshr_v32i16( %a, %res } -declare @llvm.vp.fshl.nxv32i16(, , , , i32) define @fshl_v32i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v32i16: ; CHECK: # %bb.0: @@ -512,7 +486,6 @@ define @fshl_v32i16( %a, %res } -declare @llvm.vp.fshr.nxv1i32(, , , , i32) define @fshr_v1i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v1i32: ; CHECK: # %bb.0: @@ -530,7 +503,6 @@ define @fshr_v1i32( %a, ret %res } -declare @llvm.vp.fshl.nxv1i32(, , , , i32) define @fshl_v1i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v1i32: ; CHECK: # %bb.0: @@ -548,7 +520,6 @@ define @fshl_v1i32( %a, ret %res } -declare @llvm.vp.fshr.nxv2i32(, , , , i32) define @fshr_v2i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v2i32: ; CHECK: # %bb.0: @@ -566,7 +537,6 @@ define @fshr_v2i32( %a, ret %res } -declare @llvm.vp.fshl.nxv2i32(, , , , i32) define @fshl_v2i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v2i32: ; CHECK: # %bb.0: @@ -584,7 +554,6 @@ define @fshl_v2i32( %a, ret %res } -declare @llvm.vp.fshr.nxv4i32(, , , , i32) define @fshr_v4i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v4i32: ; CHECK: # %bb.0: @@ -602,7 +571,6 @@ define @fshr_v4i32( %a, ret %res } -declare @llvm.vp.fshl.nxv4i32(, , , , i32) define @fshl_v4i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v4i32: ; CHECK: # %bb.0: @@ -620,7 +588,6 @@ define @fshl_v4i32( %a, ret %res } -declare @llvm.vp.fshr.nxv8i32(, , , , i32) define @fshr_v8i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v8i32: ; CHECK: # %bb.0: @@ -638,7 +605,6 @@ define @fshr_v8i32( %a, ret %res } -declare @llvm.vp.fshl.nxv8i32(, , , , i32) define @fshl_v8i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v8i32: ; CHECK: # %bb.0: @@ -656,7 +622,6 @@ define @fshl_v8i32( %a, ret %res } -declare @llvm.vp.fshr.nxv16i32(, , , , i32) define @fshr_v16i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v16i32: ; CHECK: # %bb.0: @@ -691,7 +656,6 @@ define @fshr_v16i32( %a, %res } -declare @llvm.vp.fshl.nxv16i32(, , , , i32) define @fshl_v16i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v16i32: ; CHECK: # %bb.0: @@ -727,7 +691,6 @@ define @fshl_v16i32( %a, %res } -declare @llvm.vp.fshr.nxv1i64(, , , , i32) define @fshr_v1i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v1i64: ; CHECK: # %bb.0: @@ -745,7 +708,6 @@ define @fshr_v1i64( %a, ret %res } -declare @llvm.vp.fshl.nxv1i64(, , , , i32) define @fshl_v1i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v1i64: ; CHECK: # %bb.0: @@ -763,7 +725,6 @@ define @fshl_v1i64( %a, ret %res } -declare @llvm.vp.fshr.nxv2i64(, , , , i32) define @fshr_v2i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v2i64: ; CHECK: # %bb.0: @@ -781,7 +742,6 @@ define @fshr_v2i64( %a, ret %res } -declare @llvm.vp.fshl.nxv2i64(, , , , i32) define @fshl_v2i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v2i64: ; CHECK: # %bb.0: @@ -799,7 +759,6 @@ define @fshl_v2i64( %a, ret %res } -declare @llvm.vp.fshr.nxv4i64(, , , , i32) define @fshr_v4i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v4i64: ; CHECK: # %bb.0: @@ -817,7 +776,6 @@ define @fshr_v4i64( %a, ret %res } -declare @llvm.vp.fshl.nxv4i64(, , , , i32) define @fshl_v4i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v4i64: ; CHECK: # %bb.0: @@ -835,7 +793,6 @@ define @fshl_v4i64( %a, ret %res } -declare @llvm.vp.fshr.nxv7i64(, , , , i32) define @fshr_v7i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v7i64: ; CHECK: # %bb.0: @@ -870,7 +827,6 @@ define @fshr_v7i64( %a, ret %res } -declare @llvm.vp.fshl.nxv7i64(, , , , i32) define @fshl_v7i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v7i64: ; CHECK: # %bb.0: @@ -906,7 +862,6 @@ define @fshl_v7i64( %a, ret %res } -declare @llvm.vp.fshr.nxv8i64(, , , , i32) define @fshr_v8i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v8i64: ; CHECK: # %bb.0: @@ -941,7 +896,6 @@ define @fshr_v8i64( %a, ret %res } -declare @llvm.vp.fshl.nxv8i64(, , , , i32) define @fshl_v8i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v8i64: ; CHECK: # %bb.0: @@ -977,7 +931,6 @@ define @fshl_v8i64( %a, ret %res } -declare @llvm.vp.fshr.nxv16i64(, , , , i32) define @fshr_v16i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v16i64: ; CHECK: # %bb.0: @@ -1082,7 +1035,6 @@ define @fshr_v16i64( %a, %res } -declare @llvm.vp.fshl.nxv16i64(, , , , i32) define @fshl_v16i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v16i64: ; CHECK: # %bb.0: @@ -1171,7 +1123,6 @@ define @fshl_v16i64( %a, @llvm.vp.fshr.nxv1i9(, , , , i32) define @fshr_v1i9( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v1i9: ; CHECK: # %bb.0: @@ -1194,7 +1145,6 @@ define @fshr_v1i9( %a, %b, ret %res } -declare @llvm.vp.fshl.nxv1i9(, , , , i32) define @fshl_v1i9( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v1i9: ; CHECK: # %bb.0: @@ -1216,9 +1166,6 @@ define @fshl_v1i9( %a, %b, ret %res } -declare @llvm.vp.trunc.nxv1i4.nxv1i8(, , i32) -declare @llvm.vp.zext.nxv1i8.nxv1i4(, , i32) -declare @llvm.vp.fshr.nxv1i4(, , , , i32) define @fshr_v1i4( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v1i4: ; CHECK: # %bb.0: @@ -1240,7 +1187,6 @@ define @fshr_v1i4( %a, %b, ret %res } -declare @llvm.vp.fshl.nxv1i4(, , , , i32) define @fshl_v1i4( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v1i4: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl.ll index eae21a76f3f00..fa3ce9428c350 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl.ll @@ -36,5 +36,3 @@ define @fshl( %a, %b, %res } -declare @llvm.fshr.v4i32( %a, %b, %c) -declare @llvm.fshl.v4i32( %a, %b, %c) diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll index 63cb72e8795e1..adeee2bd82b57 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll @@ -24,7 +24,6 @@ define @trunc_nxv1f16( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv1f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv1f16(, metadata) define @trunc_nxv2f16( %x) strictfp { ; CHECK-LABEL: trunc_nxv2f16: @@ -46,7 +45,6 @@ define @trunc_nxv2f16( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv2f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv2f16(, metadata) define @trunc_nxv4f16( %x) strictfp { ; CHECK-LABEL: trunc_nxv4f16: @@ -68,7 +66,6 @@ define @trunc_nxv4f16( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv4f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv4f16(, metadata) define @trunc_nxv8f16( %x) strictfp { ; CHECK-LABEL: trunc_nxv8f16: @@ -90,7 +87,6 @@ define @trunc_nxv8f16( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv8f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv8f16(, metadata) define @trunc_nxv16f16( %x) strictfp { ; CHECK-LABEL: trunc_nxv16f16: @@ -112,7 +108,6 @@ define @trunc_nxv16f16( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv16f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv16f16(, metadata) define @trunc_nxv32f16( %x) strictfp { ; CHECK-LABEL: trunc_nxv32f16: @@ -134,7 +129,6 @@ define @trunc_nxv32f16( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv32f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv32f16(, metadata) define @trunc_nxv1f32( %x) strictfp { ; CHECK-LABEL: trunc_nxv1f32: @@ -155,7 +149,6 @@ define @trunc_nxv1f32( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv1f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv1f32(, metadata) define @trunc_nxv2f32( %x) strictfp { ; CHECK-LABEL: trunc_nxv2f32: @@ -176,7 +169,6 @@ define @trunc_nxv2f32( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv2f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv2f32(, metadata) define @trunc_nxv4f32( %x) strictfp { ; CHECK-LABEL: trunc_nxv4f32: @@ -197,7 +189,6 @@ define @trunc_nxv4f32( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv4f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv4f32(, metadata) define @trunc_nxv8f32( %x) strictfp { ; CHECK-LABEL: trunc_nxv8f32: @@ -218,7 +209,6 @@ define @trunc_nxv8f32( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv8f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv8f32(, metadata) define @trunc_nxv16f32( %x) strictfp { ; CHECK-LABEL: trunc_nxv16f32: @@ -239,7 +229,6 @@ define @trunc_nxv16f32( %x) strictfp %a = call @llvm.experimental.constrained.trunc.nxv16f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv16f32(, metadata) define @trunc_nxv1f64( %x) strictfp { ; RV32-LABEL: trunc_nxv1f64: @@ -277,7 +266,6 @@ define @trunc_nxv1f64( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv1f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv1f64(, metadata) define @trunc_nxv2f64( %x) strictfp { ; RV32-LABEL: trunc_nxv2f64: @@ -315,7 +303,6 @@ define @trunc_nxv2f64( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv2f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv2f64(, metadata) define @trunc_nxv4f64( %x) strictfp { ; RV32-LABEL: trunc_nxv4f64: @@ -353,7 +340,6 @@ define @trunc_nxv4f64( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv4f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv4f64(, metadata) define @trunc_nxv8f64( %x) strictfp { ; RV32-LABEL: trunc_nxv8f64: @@ -391,4 +377,3 @@ define @trunc_nxv8f64( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv8f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv8f64(, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll index 34b3e8d2849b7..811f2a526ac47 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll @@ -184,7 +184,6 @@ define @trunc_nxv1f16( %x) { %a = call @llvm.trunc.nxv1f16( %x) ret %a } -declare @llvm.trunc.nxv1f16() define @trunc_nxv2f16( %x) { ; ZVFH-LABEL: trunc_nxv2f16: @@ -220,7 +219,6 @@ define @trunc_nxv2f16( %x) { %a = call @llvm.trunc.nxv2f16( %x) ret %a } -declare @llvm.trunc.nxv2f16() define @trunc_nxv4f16( %x) { ; ZVFH-LABEL: trunc_nxv4f16: @@ -256,7 +254,6 @@ define @trunc_nxv4f16( %x) { %a = call @llvm.trunc.nxv4f16( %x) ret %a } -declare @llvm.trunc.nxv4f16() define @trunc_nxv8f16( %x) { ; ZVFH-LABEL: trunc_nxv8f16: @@ -292,7 +289,6 @@ define @trunc_nxv8f16( %x) { %a = call @llvm.trunc.nxv8f16( %x) ret %a } -declare @llvm.trunc.nxv8f16() define @trunc_nxv16f16( %x) { ; ZVFH-LABEL: trunc_nxv16f16: @@ -328,7 +324,6 @@ define @trunc_nxv16f16( %x) { %a = call @llvm.trunc.nxv16f16( %x) ret %a } -declare @llvm.trunc.nxv16f16() define @trunc_nxv32f16( %x) { ; ZVFH-LABEL: trunc_nxv32f16: @@ -376,7 +371,6 @@ define @trunc_nxv32f16( %x) { %a = call @llvm.trunc.nxv32f16( %x) ret %a } -declare @llvm.trunc.nxv32f16() define @trunc_nxv1f32( %x) { ; CHECK-LABEL: trunc_nxv1f32: @@ -394,7 +388,6 @@ define @trunc_nxv1f32( %x) { %a = call @llvm.trunc.nxv1f32( %x) ret %a } -declare @llvm.trunc.nxv1f32() define @trunc_nxv2f32( %x) { ; CHECK-LABEL: trunc_nxv2f32: @@ -412,7 +405,6 @@ define @trunc_nxv2f32( %x) { %a = call @llvm.trunc.nxv2f32( %x) ret %a } -declare @llvm.trunc.nxv2f32() define @trunc_nxv4f32( %x) { ; CHECK-LABEL: trunc_nxv4f32: @@ -430,7 +422,6 @@ define @trunc_nxv4f32( %x) { %a = call @llvm.trunc.nxv4f32( %x) ret %a } -declare @llvm.trunc.nxv4f32() define @trunc_nxv8f32( %x) { ; CHECK-LABEL: trunc_nxv8f32: @@ -448,7 +439,6 @@ define @trunc_nxv8f32( %x) { %a = call @llvm.trunc.nxv8f32( %x) ret %a } -declare @llvm.trunc.nxv8f32() define @trunc_nxv16f32( %x) { ; CHECK-LABEL: trunc_nxv16f32: @@ -466,7 +456,6 @@ define @trunc_nxv16f32( %x) { %a = call @llvm.trunc.nxv16f32( %x) ret %a } -declare @llvm.trunc.nxv16f32() define @trunc_nxv1f64( %x) { ; RV32ZVFH-LABEL: trunc_nxv1f64: @@ -525,7 +514,6 @@ define @trunc_nxv1f64( %x) { %a = call @llvm.trunc.nxv1f64( %x) ret %a } -declare @llvm.trunc.nxv1f64() define @trunc_nxv2f64( %x) { ; RV32ZVFH-LABEL: trunc_nxv2f64: @@ -584,7 +572,6 @@ define @trunc_nxv2f64( %x) { %a = call @llvm.trunc.nxv2f64( %x) ret %a } -declare @llvm.trunc.nxv2f64() define @trunc_nxv4f64( %x) { ; RV32ZVFH-LABEL: trunc_nxv4f64: @@ -643,7 +630,6 @@ define @trunc_nxv4f64( %x) { %a = call @llvm.trunc.nxv4f64( %x) ret %a } -declare @llvm.trunc.nxv4f64() define @trunc_nxv8f64( %x) { ; RV32ZVFH-LABEL: trunc_nxv8f64: @@ -702,4 +688,3 @@ define @trunc_nxv8f64( %x) { %a = call @llvm.trunc.nxv8f64( %x) ret %a } -declare @llvm.trunc.nxv8f64() diff --git a/llvm/test/CodeGen/RISCV/rvv/get_vector_length.ll b/llvm/test/CodeGen/RISCV/rvv/get_vector_length.ll index aea688f03cf72..3223bf108ab4e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/get_vector_length.ll +++ b/llvm/test/CodeGen/RISCV/rvv/get_vector_length.ll @@ -2,10 +2,6 @@ ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare i32 @llvm.experimental.get.vector.length.i16(i16, i32, i1) -declare i32 @llvm.experimental.get.vector.length.i32(i32, i32, i1) -declare i32 @llvm.experimental.get.vector.length.i64(i64, i32, i1) - define i32 @vector_length_i16(i16 zeroext %tc) { ; CHECK-LABEL: vector_length_i16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll b/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll index d7bf566b9b5f4..6413b914b6440 100644 --- a/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll @@ -8,8 +8,6 @@ ; trunc ; ================================================================================ -declare @llvm.trunc.nxv1f16() - define @trunc_nxv1f16_to_si8( %x) { ; CHECK-LABEL: trunc_nxv1f16_to_si8: ; CHECK: # %bb.0: @@ -128,8 +126,6 @@ define @trunc_nxv1f16_to_ui64( %x) { ; trunc ; ================================================================================ -declare @llvm.trunc.nxv4f16() - define @trunc_nxv4f16_to_si8( %x) { ; CHECK-LABEL: trunc_nxv4f16_to_si8: ; CHECK: # %bb.0: @@ -248,8 +244,6 @@ define @trunc_nxv4f16_to_ui64( %x) { ; ceil ; ================================================================================ -declare @llvm.ceil.nxv1f16() - define @ceil_nxv1f16_to_si8( %x) { ; CHECK-LABEL: ceil_nxv1f16_to_si8: ; CHECK: # %bb.0: @@ -452,8 +446,6 @@ define @ceil_nxv1f16_to_ui64( %x) { ; ceil ; ================================================================================ -declare @llvm.ceil.nxv4f16() - define @ceil_nxv4f16_to_si8( %x) { ; CHECK-LABEL: ceil_nxv4f16_to_si8: ; CHECK: # %bb.0: @@ -656,8 +648,6 @@ define @ceil_nxv4f16_to_ui64( %x) { ; rint ; ================================================================================ -declare @llvm.rint.nxv1f16() - define @rint_nxv1f16_to_si8( %x) { ; CHECK-LABEL: rint_nxv1f16_to_si8: ; CHECK: # %bb.0: @@ -844,8 +834,6 @@ define @rint_nxv1f16_to_ui64( %x) { ; rint ; ================================================================================ -declare @llvm.rint.nxv4f16() - define @rint_nxv4f16_to_si8( %x) { ; CHECK-LABEL: rint_nxv4f16_to_si8: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll b/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll index 9475989d46343..c39630ae07e27 100644 --- a/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll +++ b/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll @@ -19,4 +19,3 @@ define @vpload_nxv8i64(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv8i64.p0(ptr, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll index 0135ce790610d..962fa729722cd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll @@ -471,8 +471,6 @@ define @insert_nxv4i1_nxv1i1_2( %v, %vec } -declare @llvm.vector.insert.nxv8i64.nxv16i64(, , i64) - define void @insert_nxv8i64_nxv16i64( %sv0, %sv1, ptr %out) { ; CHECK-LABEL: insert_nxv8i64_nxv16i64: ; CHECK: # %bb.0: @@ -642,25 +640,3 @@ define @insert_splat_to_splat2() { attributes #0 = { vscale_range(2,1024) } -declare @llvm.vector.insert.nxv1i1.nxv4i1(, , i64) -declare @llvm.vector.insert.nxv8i1.nxv32i1(, , i64) - -declare @llvm.vector.insert.nxv1i8.nxv16i8(, , i64) - -declare @llvm.vector.insert.nxv1f16.nxv32f16(, , i64) -declare @llvm.vector.insert.nxv2f16.nxv32f16(, , i64) - -declare @llvm.vector.insert.nxv1i8.nxv4i8(, , i64 %idx) - -declare @llvm.vector.insert.nxv2i32.nxv4i32(, , i64) -declare @llvm.vector.insert.nxv4i32.v2i32(, <2 x i32>, i64) - -declare @llvm.vector.insert.nxv2i32.nxv8i32(, , i64 %idx) -declare @llvm.vector.insert.nxv4i32.nxv8i32(, , i64 %idx) - -declare @llvm.vector.insert.nxv1i32.nxv16i32(, , i64 %idx) -declare @llvm.vector.insert.nxv2i32.nxv16i32(, , i64 %idx) -declare @llvm.vector.insert.nxv4i32.nxv16i32(, , i64 %idx) -declare @llvm.vector.insert.nxv8i32.nxv16i32(, , i64 %idx) - -declare @llvm.vector.insert.nxv2i64.v3i64(, <3 x i64>, i64 %idx) diff --git a/llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll index 6bc934cbdf0d8..423406b511261 100644 --- a/llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll @@ -14,7 +14,6 @@ define @llrint_nxv1i64_nxv1f32( %x) { %a = call @llvm.llrint.nxv1i64.nxv1f32( %x) ret %a } -declare @llvm.llrint.nxv1i64.nxv1f32() define @llrint_nxv2i64_nxv2f32( %x) { ; CHECK-LABEL: llrint_nxv2i64_nxv2f32: @@ -26,7 +25,6 @@ define @llrint_nxv2i64_nxv2f32( %x) { %a = call @llvm.llrint.nxv2i64.nxv2f32( %x) ret %a } -declare @llvm.llrint.nxv2i64.nxv2f32() define @llrint_nxv4i64_nxv4f32( %x) { ; CHECK-LABEL: llrint_nxv4i64_nxv4f32: @@ -38,7 +36,6 @@ define @llrint_nxv4i64_nxv4f32( %x) { %a = call @llvm.llrint.nxv4i64.nxv4f32( %x) ret %a } -declare @llvm.llrint.nxv4i64.nxv4f32() define @llrint_nxv8i64_nxv8f32( %x) { ; CHECK-LABEL: llrint_nxv8i64_nxv8f32: @@ -50,7 +47,6 @@ define @llrint_nxv8i64_nxv8f32( %x) { %a = call @llvm.llrint.nxv8i64.nxv8f32( %x) ret %a } -declare @llvm.llrint.nxv8i64.nxv8f32() define @llrint_nxv16i64_nxv16f32( %x) { ; CHECK-LABEL: llrint_nxv16i64_nxv16f32: @@ -63,7 +59,6 @@ define @llrint_nxv16i64_nxv16f32( %x) { %a = call @llvm.llrint.nxv16i64.nxv16f32( %x) ret %a } -declare @llvm.llrint.nxv16i64.nxv16f32() define @llrint_nxv1i64_nxv1f64( %x) { ; CHECK-LABEL: llrint_nxv1i64_nxv1f64: @@ -74,7 +69,6 @@ define @llrint_nxv1i64_nxv1f64( %x) { %a = call @llvm.llrint.nxv1i64.nxv1f64( %x) ret %a } -declare @llvm.llrint.nxv1i64.nxv1f64() define @llrint_nxv2i64_nxv2f64( %x) { ; CHECK-LABEL: llrint_nxv2i64_nxv2f64: @@ -85,7 +79,6 @@ define @llrint_nxv2i64_nxv2f64( %x) { %a = call @llvm.llrint.nxv2i64.nxv2f64( %x) ret %a } -declare @llvm.llrint.nxv2i64.nxv2f64() define @llrint_nxv4i64_nxv4f64( %x) { ; CHECK-LABEL: llrint_nxv4i64_nxv4f64: @@ -96,7 +89,6 @@ define @llrint_nxv4i64_nxv4f64( %x) { %a = call @llvm.llrint.nxv4i64.nxv4f64( %x) ret %a } -declare @llvm.llrint.nxv4i64.nxv4f64() define @llrint_nxv8i64_nxv8f64( %x) { ; CHECK-LABEL: llrint_nxv8i64_nxv8f64: @@ -107,7 +99,6 @@ define @llrint_nxv8i64_nxv8f64( %x) { %a = call @llvm.llrint.nxv8i64.nxv8f64( %x) ret %a } -declare @llvm.llrint.nxv8i64.nxv8f64() define @llrint_nxv1f16( %x) { ; CHECK-LABEL: llrint_nxv1f16: @@ -120,7 +111,6 @@ define @llrint_nxv1f16( %x) { %a = call @llvm.llrint.nxv1i64.nxv1f16( %x) ret %a } -declare @llvm.llrint.nxv1i64.nxv1f16() define @llrint_nxv2f16( %x) { ; CHECK-LABEL: llrint_nxv2f16: @@ -133,7 +123,6 @@ define @llrint_nxv2f16( %x) { %a = call @llvm.llrint.nxv2i64.nxv2f16( %x) ret %a } -declare @llvm.llrint.nxv2i64.nxv2f16() define @llrint_nxv4f16( %x) { ; CHECK-LABEL: llrint_nxv4f16: @@ -146,7 +135,6 @@ define @llrint_nxv4f16( %x) { %a = call @llvm.llrint.nxv4i64.nxv4f16( %x) ret %a } -declare @llvm.llrint.nxv4i64.nxv4f16() define @llrint_nxv8f16( %x) { ; CHECK-LABEL: llrint_nxv8f16: @@ -159,7 +147,6 @@ define @llrint_nxv8f16( %x) { %a = call @llvm.llrint.nxv8i64.nxv8f16( %x) ret %a } -declare @llvm.llrint.nxv8i64.nxv8f16() define @llrint_nxv16f16( %x) { ; CHECK-LABEL: llrint_nxv16f16: @@ -174,7 +161,6 @@ define @llrint_nxv16f16( %x) { %a = call @llvm.llrint.nxv16i64.nxv16f16( %x) ret %a } -declare @llvm.llrint.nxv16i64.nxv16f16() define @llrint_nxv1bf16( %x) { ; CHECK-LABEL: llrint_nxv1bf16: @@ -187,7 +173,6 @@ define @llrint_nxv1bf16( %x) { %a = call @llvm.llrint.nxv1i64.nxv1bf16( %x) ret %a } -declare @llvm.llrint.nxv1i64.nxv1bf16() define @llrint_nxv2bf16( %x) { ; CHECK-LABEL: llrint_nxv2bf16: @@ -200,7 +185,6 @@ define @llrint_nxv2bf16( %x) { %a = call @llvm.llrint.nxv2i64.nxv2bf16( %x) ret %a } -declare @llvm.llrint.nxv2i64.nxv2bf16() define @llrint_nxv4bf16( %x) { ; CHECK-LABEL: llrint_nxv4bf16: @@ -213,7 +197,6 @@ define @llrint_nxv4bf16( %x) { %a = call @llvm.llrint.nxv4i64.nxv4bf16( %x) ret %a } -declare @llvm.llrint.nxv4i64.nxv4bf16() define @llrint_nxv8bf16( %x) { ; CHECK-LABEL: llrint_nxv8bf16: @@ -226,7 +209,6 @@ define @llrint_nxv8bf16( %x) { %a = call @llvm.llrint.nxv8i64.nxv8bf16( %x) ret %a } -declare @llvm.llrint.nxv8i64.nxv8bf16() define @llrint_nxv16bf16( %x) { ; CHECK-LABEL: llrint_nxv16bf16: @@ -241,4 +223,3 @@ define @llrint_nxv16bf16( %x) { %a = call @llvm.llrint.nxv16i64.nxv16bf16( %x) ret %a } -declare @llvm.llrint.nxv16i64.nxv16bf16() diff --git a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll index dbe2d03e1a909..c0a794afac3ae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll @@ -14,7 +14,6 @@ define @llrint_nxv1i64_nxv1f32( %x, @llvm.vp.llrint.nxv1i64.nxv1f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv1i64.nxv1f32(, , i32) define @llrint_nxv2i64_nxv2f32( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv2i64_nxv2f32: @@ -26,7 +25,6 @@ define @llrint_nxv2i64_nxv2f32( %x, @llvm.vp.llrint.nxv2i64.nxv2f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv2i64.nxv2f32(, , i32) define @llrint_nxv4i64_nxv4f32( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv4i64_nxv4f32: @@ -38,7 +36,6 @@ define @llrint_nxv4i64_nxv4f32( %x, @llvm.vp.llrint.nxv4i64.nxv4f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv4i64.nxv4f32(, , i32) define @llrint_nxv8i64_nxv8f32( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv8i64_nxv8f32: @@ -50,7 +47,6 @@ define @llrint_nxv8i64_nxv8f32( %x, @llvm.vp.llrint.nxv8i64.nxv8f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv8i64.nxv8f32(, , i32) define @llrint_nxv16i64_nxv16f32( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv16i64_nxv16f32: @@ -78,7 +74,6 @@ define @llrint_nxv16i64_nxv16f32( %x, < %a = call @llvm.vp.llrint.nxv16i64.nxv16f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv16i64.nxv16f32(, , i32) define @llrint_nxv1i64_nxv1f64( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv1i64_nxv1f64: @@ -89,7 +84,6 @@ define @llrint_nxv1i64_nxv1f64( %x, @llvm.vp.llrint.nxv1i64.nxv1f64( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv1i64.nxv1f64(, , i32) define @llrint_nxv2i64_nxv2f64( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv2i64_nxv2f64: @@ -100,7 +94,6 @@ define @llrint_nxv2i64_nxv2f64( %x, @llvm.vp.llrint.nxv2i64.nxv2f64( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv2i64.nxv2f64(, , i32) define @llrint_nxv4i64_nxv4f64( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv4i64_nxv4f64: @@ -111,7 +104,6 @@ define @llrint_nxv4i64_nxv4f64( %x, @llvm.vp.llrint.nxv4i64.nxv4f64( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv4i64.nxv4f64(, , i32) define @llrint_nxv8i64_nxv8f64( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv8i64_nxv8f64: @@ -122,4 +114,3 @@ define @llrint_nxv8i64_nxv8f64( %x, @llvm.vp.llrint.nxv8i64.nxv8f64( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv8i64.nxv8f64(, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/llround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/llround-sdnode.ll index 5b4c7ba91400f..61cf3da7757f6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/llround-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/llround-sdnode.ll @@ -16,7 +16,6 @@ define @llround_nxv1i64_nxv1f32( %x) { %a = call @llvm.llround.nxv1i64.nxv1f32( %x) ret %a } -declare @llvm.llround.nxv1i64.nxv1f32() define @llround_nxv2i64_nxv2f32( %x) { ; CHECK-LABEL: llround_nxv2i64_nxv2f32: @@ -30,7 +29,6 @@ define @llround_nxv2i64_nxv2f32( %x) { %a = call @llvm.llround.nxv2i64.nxv2f32( %x) ret %a } -declare @llvm.llround.nxv2i64.nxv2f32() define @llround_nxv4i64_nxv4f32( %x) { ; CHECK-LABEL: llround_nxv4i64_nxv4f32: @@ -44,7 +42,6 @@ define @llround_nxv4i64_nxv4f32( %x) { %a = call @llvm.llround.nxv4i64.nxv4f32( %x) ret %a } -declare @llvm.llround.nxv4i64.nxv4f32() define @llround_nxv8i64_nxv8f32( %x) { ; CHECK-LABEL: llround_nxv8i64_nxv8f32: @@ -58,7 +55,6 @@ define @llround_nxv8i64_nxv8f32( %x) { %a = call @llvm.llround.nxv8i64.nxv8f32( %x) ret %a } -declare @llvm.llround.nxv8i64.nxv8f32() define @llround_nxv16i64_nxv16f32( %x) { ; CHECK-LABEL: llround_nxv16i64_nxv16f32: @@ -73,7 +69,6 @@ define @llround_nxv16i64_nxv16f32( %x) %a = call @llvm.llround.nxv16i64.nxv16f32( %x) ret %a } -declare @llvm.llround.nxv16i64.nxv16f32() define @llround_nxv1i64_nxv1f64( %x) { ; CHECK-LABEL: llround_nxv1i64_nxv1f64: @@ -86,7 +81,6 @@ define @llround_nxv1i64_nxv1f64( %x) { %a = call @llvm.llround.nxv1i64.nxv1f64( %x) ret %a } -declare @llvm.llround.nxv1i64.nxv1f64() define @llround_nxv2i64_nxv2f64( %x) { ; CHECK-LABEL: llround_nxv2i64_nxv2f64: @@ -99,7 +93,6 @@ define @llround_nxv2i64_nxv2f64( %x) { %a = call @llvm.llround.nxv2i64.nxv2f64( %x) ret %a } -declare @llvm.llround.nxv2i64.nxv2f64() define @llround_nxv4i64_nxv4f64( %x) { ; CHECK-LABEL: llround_nxv4i64_nxv4f64: @@ -112,7 +105,6 @@ define @llround_nxv4i64_nxv4f64( %x) { %a = call @llvm.llround.nxv4i64.nxv4f64( %x) ret %a } -declare @llvm.llround.nxv4i64.nxv4f64() define @llround_nxv8i64_nxv8f64( %x) { ; CHECK-LABEL: llround_nxv8i64_nxv8f64: @@ -125,7 +117,6 @@ define @llround_nxv8i64_nxv8f64( %x) { %a = call @llvm.llround.nxv8i64.nxv8f64( %x) ret %a } -declare @llvm.llround.nxv8i64.nxv8f64() define @llround_nxv1f16( %x) { ; CHECK-LABEL: llround_nxv1f16: @@ -140,7 +131,6 @@ define @llround_nxv1f16( %x) { %a = call @llvm.llround.nxv1i64.nxv1f16( %x) ret %a } -declare @llvm.llround.nxv1i64.nxv1f16() define @llround_nxv2f16( %x) { ; CHECK-LABEL: llround_nxv2f16: @@ -155,7 +145,6 @@ define @llround_nxv2f16( %x) { %a = call @llvm.llround.nxv2i64.nxv2f16( %x) ret %a } -declare @llvm.llround.nxv2i64.nxv2f16() define @llround_nxv4f16( %x) { ; CHECK-LABEL: llround_nxv4f16: @@ -170,7 +159,6 @@ define @llround_nxv4f16( %x) { %a = call @llvm.llround.nxv4i64.nxv4f16( %x) ret %a } -declare @llvm.llround.nxv4i64.nxv4f16() define @llround_nxv8f16( %x) { ; CHECK-LABEL: llround_nxv8f16: @@ -185,7 +173,6 @@ define @llround_nxv8f16( %x) { %a = call @llvm.llround.nxv8i64.nxv8f16( %x) ret %a } -declare @llvm.llround.nxv8i64.nxv8f16() define @llround_nxv16f16( %x) { ; CHECK-LABEL: llround_nxv16f16: @@ -202,7 +189,6 @@ define @llround_nxv16f16( %x) { %a = call @llvm.llround.nxv16i64.nxv16f16( %x) ret %a } -declare @llvm.llround.nxv16i64.nxv16f16() define @llround_nxv1bf16( %x) { ; CHECK-LABEL: llround_nxv1bf16: @@ -217,7 +203,6 @@ define @llround_nxv1bf16( %x) { %a = call @llvm.llround.nxv1i64.nxv1bf16( %x) ret %a } -declare @llvm.llround.nxv1i64.nxv1bf16() define @llround_nxv2bf16( %x) { ; CHECK-LABEL: llround_nxv2bf16: @@ -232,7 +217,6 @@ define @llround_nxv2bf16( %x) { %a = call @llvm.llround.nxv2i64.nxv2bf16( %x) ret %a } -declare @llvm.llround.nxv2i64.nxv2bf16() define @llround_nxv4bf16( %x) { ; CHECK-LABEL: llround_nxv4bf16: @@ -247,7 +231,6 @@ define @llround_nxv4bf16( %x) { %a = call @llvm.llround.nxv4i64.nxv4bf16( %x) ret %a } -declare @llvm.llround.nxv4i64.nxv4bf16() define @llround_nxv8bf16( %x) { ; CHECK-LABEL: llround_nxv8bf16: @@ -262,7 +245,6 @@ define @llround_nxv8bf16( %x) { %a = call @llvm.llround.nxv8i64.nxv8bf16( %x) ret %a } -declare @llvm.llround.nxv8i64.nxv8bf16() define @llround_nxv16bf16( %x) { ; CHECK-LABEL: llround_nxv16bf16: @@ -279,4 +261,3 @@ define @llround_nxv16bf16( %x) { %a = call @llvm.llround.nxv16i64.nxv16bf16( %x) ret %a } -declare @llvm.llround.nxv16i64.nxv16bf16() diff --git a/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll index 6df738fd72854..ba71f9d7321c6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll @@ -28,7 +28,6 @@ define @lrint_nxv1f32( %x) { %a = call @llvm.lrint.nxv1iXLen.nxv1f32( %x) ret %a } -declare @llvm.lrint.nxv1iXLen.nxv1f32() define @lrint_nxv2f32( %x) { ; RV32-LABEL: lrint_nxv2f32: @@ -52,7 +51,6 @@ define @lrint_nxv2f32( %x) { %a = call @llvm.lrint.nxv2iXLen.nxv2f32( %x) ret %a } -declare @llvm.lrint.nxv2iXLen.nxv2f32() define @lrint_nxv4f32( %x) { ; RV32-LABEL: lrint_nxv4f32: @@ -76,7 +74,6 @@ define @lrint_nxv4f32( %x) { %a = call @llvm.lrint.nxv4iXLen.nxv4f32( %x) ret %a } -declare @llvm.lrint.nxv4iXLen.nxv4f32() define @lrint_nxv8f32( %x) { ; RV32-LABEL: lrint_nxv8f32: @@ -100,7 +97,6 @@ define @lrint_nxv8f32( %x) { %a = call @llvm.lrint.nxv8iXLen.nxv8f32( %x) ret %a } -declare @llvm.lrint.nxv8iXLen.nxv8f32() define @lrint_nxv16f32( %x) { ; RV32-LABEL: lrint_nxv16f32: @@ -125,7 +121,6 @@ define @lrint_nxv16f32( %x) { %a = call @llvm.lrint.nxv16iXLen.nxv16f32( %x) ret %a } -declare @llvm.lrint.nxv16iXLen.nxv16f32() define @lrint_nxv1f64( %x) { ; RV32-LABEL: lrint_nxv1f64: @@ -150,7 +145,6 @@ define @lrint_nxv1f64( %x) { %a = call @llvm.lrint.nxv1iXLen.nxv1f64( %x) ret %a } -declare @llvm.lrint.nxv1iXLen.nxv1f64() define @lrint_nxv2f64( %x) { ; RV32-LABEL: lrint_nxv2f64: @@ -175,7 +169,6 @@ define @lrint_nxv2f64( %x) { %a = call @llvm.lrint.nxv2iXLen.nxv2f64( %x) ret %a } -declare @llvm.lrint.nxv2iXLen.nxv2f64() define @lrint_nxv4f64( %x) { ; RV32-LABEL: lrint_nxv4f64: @@ -200,7 +193,6 @@ define @lrint_nxv4f64( %x) { %a = call @llvm.lrint.nxv4iXLen.nxv4f64( %x) ret %a } -declare @llvm.lrint.nxv4iXLen.nxv4f64() define @lrint_nxv8f64( %x) { ; RV32-LABEL: lrint_nxv8f64: @@ -225,7 +217,6 @@ define @lrint_nxv8f64( %x) { %a = call @llvm.lrint.nxv8iXLen.nxv8f64( %x) ret %a } -declare @llvm.lrint.nxv8iXLen.nxv8f64() define @lrint_nxv1f16( %x) { ; RV32-LABEL: lrint_nxv1f16: @@ -254,7 +245,6 @@ define @lrint_nxv1f16( %x) { %a = call @llvm.lrint.nxv1iXLen.nxv1f16( %x) ret %a } -declare @llvm.lrint.nxv1iXLen.nxv1f16() define @lrint_nxv2f16( %x) { ; RV32-LABEL: lrint_nxv2f16: @@ -283,7 +273,6 @@ define @lrint_nxv2f16( %x) { %a = call @llvm.lrint.nxv2iXLen.nxv2f16( %x) ret %a } -declare @llvm.lrint.nxv2iXLen.nxv2f16() define @lrint_nxv4f16( %x) { ; RV32-LABEL: lrint_nxv4f16: @@ -312,7 +301,6 @@ define @lrint_nxv4f16( %x) { %a = call @llvm.lrint.nxv4iXLen.nxv4f16( %x) ret %a } -declare @llvm.lrint.nxv4iXLen.nxv4f16() define @lrint_nxv8f16( %x) { ; RV32-LABEL: lrint_nxv8f16: @@ -341,7 +329,6 @@ define @lrint_nxv8f16( %x) { %a = call @llvm.lrint.nxv8iXLen.nxv8f16( %x) ret %a } -declare @llvm.lrint.nxv8iXLen.nxv8f16() define @lrint_nxv16f16( %x) { ; RV32-LABEL: lrint_nxv16f16: @@ -372,7 +359,6 @@ define @lrint_nxv16f16( %x) { %a = call @llvm.lrint.nxv16iXLen.nxv16f16( %x) ret %a } -declare @llvm.lrint.nxv16iXLen.nxv16f16() define @lrint_nxv1bf16( %x) { ; RV32-LABEL: lrint_nxv1bf16: @@ -401,7 +387,6 @@ define @lrint_nxv1bf16( %x) { %a = call @llvm.lrint.nxv1iXLen.nxv1bf16( %x) ret %a } -declare @llvm.lrint.nxv1iXLen.nxv1bf16() define @lrint_nxv2bf16( %x) { ; RV32-LABEL: lrint_nxv2bf16: @@ -430,7 +415,6 @@ define @lrint_nxv2bf16( %x) { %a = call @llvm.lrint.nxv2iXLen.nxv2bf16( %x) ret %a } -declare @llvm.lrint.nxv2iXLen.nxv2bf16() define @lrint_nxv4bf16( %x) { ; RV32-LABEL: lrint_nxv4bf16: @@ -459,7 +443,6 @@ define @lrint_nxv4bf16( %x) { %a = call @llvm.lrint.nxv4iXLen.nxv4bf16( %x) ret %a } -declare @llvm.lrint.nxv4iXLen.nxv4bf16() define @lrint_nxv8bf16( %x) { ; RV32-LABEL: lrint_nxv8bf16: @@ -488,7 +471,6 @@ define @lrint_nxv8bf16( %x) { %a = call @llvm.lrint.nxv8iXLen.nxv8bf16( %x) ret %a } -declare @llvm.lrint.nxv8iXLen.nxv8bf16() define @lrint_nxv16bf16( %x) { ; RV32-LABEL: lrint_nxv16bf16: @@ -519,7 +501,6 @@ define @lrint_nxv16bf16( %x) { %a = call @llvm.lrint.nxv16iXLen.nxv16bf16( %x) ret %a } -declare @llvm.lrint.nxv16iXLen.nxv16bf16() define @lrint_nxv32bf16( %x) { ; RV32-LABEL: lrint_nxv32bf16: @@ -636,4 +617,3 @@ define @lrint_nxv32bf16( %x) { %a = call @llvm.lrint.nxv32iXLen.nxv32bf16( %x) ret %a } -declare @llvm.lrint.nxv32iXLen.nxv32bf16() diff --git a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll index 98d32b36c23c1..c09df1a60d2ae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll @@ -28,7 +28,6 @@ define @lrint_nxv1f32( %x, @llvm.vp.lrint.nxv1iXLen.nxv1f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv1iXLen.nxv1f32(, , i32) define @lrint_nxv2f32( %x, %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_nxv2f32: @@ -52,7 +51,6 @@ define @lrint_nxv2f32( %x, @llvm.vp.lrint.nxv2iXLen.nxv2f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv2iXLen.nxv2f32(, , i32) define @lrint_nxv4f32( %x, %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_nxv4f32: @@ -76,7 +74,6 @@ define @lrint_nxv4f32( %x, @llvm.vp.lrint.nxv4iXLen.nxv4f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv4iXLen.nxv4f32(, , i32) define @lrint_nxv8f32( %x, %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_nxv8f32: @@ -100,7 +97,6 @@ define @lrint_nxv8f32( %x, @llvm.vp.lrint.nxv8iXLen.nxv8f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv8iXLen.nxv8f32(, , i32) define @lrint_nxv16f32( %x, %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_nxv16f32: @@ -140,7 +136,6 @@ define @lrint_nxv16f32( %x, @llvm.vp.lrint.nxv16iXLen.nxv16f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv16iXLen.nxv16f32(, , i32) define @lrint_nxv1f64( %x, %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_nxv1f64: @@ -165,7 +160,6 @@ define @lrint_nxv1f64( %x, @llvm.vp.lrint.nxv1iXLen.nxv1f64( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv1iXLen.nxv1f64(, , i32) define @lrint_nxv2f64( %x, %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_nxv2f64: @@ -190,7 +184,6 @@ define @lrint_nxv2f64( %x, @llvm.vp.lrint.nxv2iXLen.nxv2f64( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv2iXLen.nxv2f64(, , i32) define @lrint_nxv4f64( %x, %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_nxv4f64: @@ -215,7 +208,6 @@ define @lrint_nxv4f64( %x, @llvm.vp.lrint.nxv4iXLen.nxv4f64( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv4iXLen.nxv4f64(, , i32) define @lrint_nxv8f64( %x, %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_nxv8f64: @@ -240,4 +232,3 @@ define @lrint_nxv8f64( %x, @llvm.vp.lrint.nxv8iXLen.nxv8f64( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv8iXLen.nxv8f64(, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/lround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/lround-sdnode.ll index 109b9055e7b55..03e18738a491d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/lround-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/lround-sdnode.ll @@ -34,7 +34,6 @@ define @lround_nxv1f32( %x) { %a = call @llvm.lround.nxv1iXLen.nxv1f32( %x) ret %a } -declare @llvm.lround.nxv1iXLen.nxv1f32() define @lround_nxv2f32( %x) { ; RV32-LABEL: lround_nxv2f32: @@ -64,7 +63,6 @@ define @lround_nxv2f32( %x) { %a = call @llvm.lround.nxv2iXLen.nxv2f32( %x) ret %a } -declare @llvm.lround.nxv2iXLen.nxv2f32() define @lround_nxv4f32( %x) { ; RV32-LABEL: lround_nxv4f32: @@ -94,7 +92,6 @@ define @lround_nxv4f32( %x) { %a = call @llvm.lround.nxv4iXLen.nxv4f32( %x) ret %a } -declare @llvm.lround.nxv4iXLen.nxv4f32() define @lround_nxv8f32( %x) { ; RV32-LABEL: lround_nxv8f32: @@ -124,7 +121,6 @@ define @lround_nxv8f32( %x) { %a = call @llvm.lround.nxv8iXLen.nxv8f32( %x) ret %a } -declare @llvm.lround.nxv8iXLen.nxv8f32() define @lround_nxv16f32( %x) { ; RV32-LABEL: lround_nxv16f32: @@ -155,7 +151,6 @@ define @lround_nxv16f32( %x) { %a = call @llvm.lround.nxv16iXLen.nxv16f32( %x) ret %a } -declare @llvm.lround.nxv16iXLen.nxv16f32() define @lround_nxv1f64( %x) { ; RV32-LABEL: lround_nxv1f64: @@ -186,7 +181,6 @@ define @lround_nxv1f64( %x) { %a = call @llvm.lround.nxv1iXLen.nxv1f64( %x) ret %a } -declare @llvm.lround.nxv1iXLen.nxv1f64() define @lround_nxv2f64( %x) { ; RV32-LABEL: lround_nxv2f64: @@ -217,7 +211,6 @@ define @lround_nxv2f64( %x) { %a = call @llvm.lround.nxv2iXLen.nxv2f64( %x) ret %a } -declare @llvm.lround.nxv2iXLen.nxv2f64() define @lround_nxv4f64( %x) { ; RV32-LABEL: lround_nxv4f64: @@ -248,7 +241,6 @@ define @lround_nxv4f64( %x) { %a = call @llvm.lround.nxv4iXLen.nxv4f64( %x) ret %a } -declare @llvm.lround.nxv4iXLen.nxv4f64() define @lround_nxv8f64( %x) { ; RV32-LABEL: lround_nxv8f64: @@ -279,7 +271,6 @@ define @lround_nxv8f64( %x) { %a = call @llvm.lround.nxv8iXLen.nxv8f64( %x) ret %a } -declare @llvm.lround.nxv8iXLen.nxv8f64() define @lround_nxv1f16( %x) { ; RV32-LABEL: lround_nxv1f16: @@ -314,7 +305,6 @@ define @lround_nxv1f16( %x) { %a = call @llvm.lround.nxv1iXLen.nxv1f16( %x) ret %a } -declare @llvm.lround.nxv1iXLen.nxv1f16() define @lround_nxv2f16( %x) { ; RV32-LABEL: lround_nxv2f16: @@ -349,7 +339,6 @@ define @lround_nxv2f16( %x) { %a = call @llvm.lround.nxv2iXLen.nxv2f16( %x) ret %a } -declare @llvm.lround.nxv2iXLen.nxv2f16() define @lround_nxv4f16( %x) { ; RV32-LABEL: lround_nxv4f16: @@ -384,7 +373,6 @@ define @lround_nxv4f16( %x) { %a = call @llvm.lround.nxv4iXLen.nxv4f16( %x) ret %a } -declare @llvm.lround.nxv4iXLen.nxv4f16() define @lround_nxv8f16( %x) { ; RV32-LABEL: lround_nxv8f16: @@ -419,7 +407,6 @@ define @lround_nxv8f16( %x) { %a = call @llvm.lround.nxv8iXLen.nxv8f16( %x) ret %a } -declare @llvm.lround.nxv8iXLen.nxv8f16() define @lround_nxv16f16( %x) { ; RV32-LABEL: lround_nxv16f16: @@ -456,7 +443,6 @@ define @lround_nxv16f16( %x) { %a = call @llvm.lround.nxv16iXLen.nxv16f16( %x) ret %a } -declare @llvm.lround.nxv16iXLen.nxv16f16() define @lround_nxv1bf16( %x) { ; RV32-LABEL: lround_nxv1bf16: @@ -491,7 +477,6 @@ define @lround_nxv1bf16( %x) { %a = call @llvm.lround.nxv1iXLen.nxv1bf16( %x) ret %a } -declare @llvm.lround.nxv1iXLen.nxv1bf16() define @lround_nxv2bf16( %x) { ; RV32-LABEL: lround_nxv2bf16: @@ -526,7 +511,6 @@ define @lround_nxv2bf16( %x) { %a = call @llvm.lround.nxv2iXLen.nxv2bf16( %x) ret %a } -declare @llvm.lround.nxv2iXLen.nxv2bf16() define @lround_nxv4bf16( %x) { ; RV32-LABEL: lround_nxv4bf16: @@ -561,7 +545,6 @@ define @lround_nxv4bf16( %x) { %a = call @llvm.lround.nxv4iXLen.nxv4bf16( %x) ret %a } -declare @llvm.lround.nxv4iXLen.nxv4bf16() define @lround_nxv8bf16( %x) { ; RV32-LABEL: lround_nxv8bf16: @@ -596,7 +579,6 @@ define @lround_nxv8bf16( %x) { %a = call @llvm.lround.nxv8iXLen.nxv8bf16( %x) ret %a } -declare @llvm.lround.nxv8iXLen.nxv8bf16() define @lround_nxv16bf16( %x) { ; RV32-LABEL: lround_nxv16bf16: @@ -633,7 +615,6 @@ define @lround_nxv16bf16( %x) { %a = call @llvm.lround.nxv16iXLen.nxv16bf16( %x) ret %a } -declare @llvm.lround.nxv16iXLen.nxv16bf16() define @lround_nxv32bf16( %x) { ; RV32-LABEL: lround_nxv32bf16: @@ -756,4 +737,3 @@ define @lround_nxv32bf16( %x) { %a = call @llvm.lround.nxv32iXLen.nxv32bf16( %x) ret %a } -declare @llvm.lround.nxv32iXLen.nxv32bf16() diff --git a/llvm/test/CodeGen/RISCV/rvv/marith-vp.ll b/llvm/test/CodeGen/RISCV/rvv/marith-vp.ll index 40e6567ac802d..05b261b95e30c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/marith-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/marith-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare <1 x i1> @llvm.vp.and.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32) - define <1 x i1> @and_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v1i1: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define <1 x i1> @and_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %ev ret <1 x i1> %v } -declare <2 x i1> @llvm.vp.and.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @and_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v2i1: ; CHECK: # %bb.0: @@ -26,8 +22,6 @@ define <2 x i1> @and_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %ev ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.and.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @and_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v4i1: ; CHECK: # %bb.0: @@ -38,8 +32,6 @@ define <4 x i1> @and_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %ev ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.and.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @and_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v8i1: ; CHECK: # %bb.0: @@ -50,8 +42,6 @@ define <8 x i1> @and_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %ev ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.and.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @and_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v16i1: ; CHECK: # %bb.0: @@ -62,8 +52,6 @@ define <16 x i1> @and_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroex ret <16 x i1> %v } -declare @llvm.vp.and.nxv1i1(, , , i32) - define @and_nxv1i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv1i1: ; CHECK: # %bb.0: @@ -74,8 +62,6 @@ define @and_nxv1i1( %b, %c, ret %v } -declare @llvm.vp.and.nxv2i1(, , , i32) - define @and_nxv2i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv2i1: ; CHECK: # %bb.0: @@ -86,8 +72,6 @@ define @and_nxv2i1( %b, %c, ret %v } -declare @llvm.vp.and.nxv4i1(, , , i32) - define @and_nxv4i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv4i1: ; CHECK: # %bb.0: @@ -98,8 +82,6 @@ define @and_nxv4i1( %b, %c, ret %v } -declare @llvm.vp.and.nxv8i1(, , , i32) - define @and_nxv8i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv8i1: ; CHECK: # %bb.0: @@ -110,8 +92,6 @@ define @and_nxv8i1( %b, %c, ret %v } -declare @llvm.vp.and.nxv16i1(, , , i32) - define @and_nxv16i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv16i1: ; CHECK: # %bb.0: @@ -122,8 +102,6 @@ define @and_nxv16i1( %b, ret %v } -declare @llvm.vp.and.nxv32i1(, , , i32) - define @and_nxv32i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv32i1: ; CHECK: # %bb.0: @@ -134,8 +112,6 @@ define @and_nxv32i1( %b, ret %v } -declare @llvm.vp.and.nxv64i1(, , , i32) - define @and_nxv64i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv64i1: ; CHECK: # %bb.0: @@ -146,8 +122,6 @@ define @and_nxv64i1( %b, ret %v } -declare <1 x i1> @llvm.vp.or.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32) - define <1 x i1> @or_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v1i1: ; CHECK: # %bb.0: @@ -158,8 +132,6 @@ define <1 x i1> @or_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl ret <1 x i1> %v } -declare <2 x i1> @llvm.vp.or.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @or_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v2i1: ; CHECK: # %bb.0: @@ -170,8 +142,6 @@ define <2 x i1> @or_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.or.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @or_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v4i1: ; CHECK: # %bb.0: @@ -182,8 +152,6 @@ define <4 x i1> @or_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.or.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @or_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v8i1: ; CHECK: # %bb.0: @@ -194,8 +162,6 @@ define <8 x i1> @or_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.or.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @or_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v16i1: ; CHECK: # %bb.0: @@ -206,8 +172,6 @@ define <16 x i1> @or_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext ret <16 x i1> %v } -declare @llvm.vp.or.nxv1i1(, , , i32) - define @or_nxv1i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv1i1: ; CHECK: # %bb.0: @@ -218,8 +182,6 @@ define @or_nxv1i1( %b, %c, ret %v } -declare @llvm.vp.or.nxv2i1(, , , i32) - define @or_nxv2i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv2i1: ; CHECK: # %bb.0: @@ -230,8 +192,6 @@ define @or_nxv2i1( %b, %c, ret %v } -declare @llvm.vp.or.nxv4i1(, , , i32) - define @or_nxv4i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv4i1: ; CHECK: # %bb.0: @@ -242,8 +202,6 @@ define @or_nxv4i1( %b, %c, ret %v } -declare @llvm.vp.or.nxv8i1(, , , i32) - define @or_nxv8i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv8i1: ; CHECK: # %bb.0: @@ -254,8 +212,6 @@ define @or_nxv8i1( %b, %c, ret %v } -declare @llvm.vp.or.nxv16i1(, , , i32) - define @or_nxv16i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv16i1: ; CHECK: # %bb.0: @@ -266,8 +222,6 @@ define @or_nxv16i1( %b, ret %v } -declare @llvm.vp.or.nxv32i1(, , , i32) - define @or_nxv32i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv32i1: ; CHECK: # %bb.0: @@ -278,8 +232,6 @@ define @or_nxv32i1( %b, ret %v } -declare @llvm.vp.or.nxv64i1(, , , i32) - define @or_nxv64i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv64i1: ; CHECK: # %bb.0: @@ -290,8 +242,6 @@ define @or_nxv64i1( %b, ret %v } -declare <1 x i1> @llvm.vp.xor.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32) - define <1 x i1> @xor_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v1i1: ; CHECK: # %bb.0: @@ -302,8 +252,6 @@ define <1 x i1> @xor_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %ev ret <1 x i1> %v } -declare <2 x i1> @llvm.vp.xor.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @xor_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v2i1: ; CHECK: # %bb.0: @@ -314,8 +262,6 @@ define <2 x i1> @xor_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %ev ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.xor.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @xor_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v4i1: ; CHECK: # %bb.0: @@ -326,8 +272,6 @@ define <4 x i1> @xor_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %ev ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.xor.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @xor_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v8i1: ; CHECK: # %bb.0: @@ -338,8 +282,6 @@ define <8 x i1> @xor_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %ev ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.xor.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @xor_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v16i1: ; CHECK: # %bb.0: @@ -350,8 +292,6 @@ define <16 x i1> @xor_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroex ret <16 x i1> %v } -declare @llvm.vp.xor.nxv1i1(, , , i32) - define @xor_nxv1i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv1i1: ; CHECK: # %bb.0: @@ -362,8 +302,6 @@ define @xor_nxv1i1( %b, %c, ret %v } -declare @llvm.vp.xor.nxv2i1(, , , i32) - define @xor_nxv2i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv2i1: ; CHECK: # %bb.0: @@ -374,8 +312,6 @@ define @xor_nxv2i1( %b, %c, ret %v } -declare @llvm.vp.xor.nxv4i1(, , , i32) - define @xor_nxv4i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv4i1: ; CHECK: # %bb.0: @@ -386,8 +322,6 @@ define @xor_nxv4i1( %b, %c, ret %v } -declare @llvm.vp.xor.nxv8i1(, , , i32) - define @xor_nxv8i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv8i1: ; CHECK: # %bb.0: @@ -398,8 +332,6 @@ define @xor_nxv8i1( %b, %c, ret %v } -declare @llvm.vp.xor.nxv16i1(, , , i32) - define @xor_nxv16i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv16i1: ; CHECK: # %bb.0: @@ -410,8 +342,6 @@ define @xor_nxv16i1( %b, ret %v } -declare @llvm.vp.xor.nxv32i1(, , , i32) - define @xor_nxv32i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv32i1: ; CHECK: # %bb.0: @@ -422,8 +352,6 @@ define @xor_nxv32i1( %b, ret %v } -declare @llvm.vp.xor.nxv64i1(, , , i32) - define @xor_nxv64i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv64i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll index 5c0a6ac82d8cf..62cee4057a56d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll @@ -13,7 +13,6 @@ define @masked_load_nxv1bf16(ptr %a, %ma %load = call @llvm.masked.load.nxv1bf16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv1bf16(ptr, i32, , ) define @masked_load_nxv1f16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv1f16: @@ -24,7 +23,6 @@ define @masked_load_nxv1f16(ptr %a, %mask) %load = call @llvm.masked.load.nxv1f16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv1f16(ptr, i32, , ) define @masked_load_nxv1f32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv1f32: @@ -35,7 +33,6 @@ define @masked_load_nxv1f32(ptr %a, %mask %load = call @llvm.masked.load.nxv1f32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv1f32(ptr, i32, , ) define @masked_load_nxv1f64(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv1f64: @@ -46,7 +43,6 @@ define @masked_load_nxv1f64(ptr %a, %mas %load = call @llvm.masked.load.nxv1f64(ptr %a, i32 8, %mask, poison) ret %load } -declare @llvm.masked.load.nxv1f64(ptr, i32, , ) define @masked_load_nxv2bf16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2bf16: @@ -57,7 +53,6 @@ define @masked_load_nxv2bf16(ptr %a, %ma %load = call @llvm.masked.load.nxv2bf16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv2bf16(ptr, i32, , ) define @masked_load_nxv2f16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2f16: @@ -68,7 +63,6 @@ define @masked_load_nxv2f16(ptr %a, %mask) %load = call @llvm.masked.load.nxv2f16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv2f16(ptr, i32, , ) define @masked_load_nxv2f32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2f32: @@ -79,7 +73,6 @@ define @masked_load_nxv2f32(ptr %a, %mask %load = call @llvm.masked.load.nxv2f32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv2f32(ptr, i32, , ) define @masked_load_nxv2f64(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2f64: @@ -90,7 +83,6 @@ define @masked_load_nxv2f64(ptr %a, %mas %load = call @llvm.masked.load.nxv2f64(ptr %a, i32 8, %mask, poison) ret %load } -declare @llvm.masked.load.nxv2f64(ptr, i32, , ) define @masked_load_nxv4bf16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4bf16: @@ -101,7 +93,6 @@ define @masked_load_nxv4bf16(ptr %a, %ma %load = call @llvm.masked.load.nxv4bf16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv4bf16(ptr, i32, , ) define @masked_load_nxv4f16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4f16: @@ -112,7 +103,6 @@ define @masked_load_nxv4f16(ptr %a, %mask) %load = call @llvm.masked.load.nxv4f16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv4f16(ptr, i32, , ) define @masked_load_nxv4f32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4f32: @@ -123,7 +113,6 @@ define @masked_load_nxv4f32(ptr %a, %mask %load = call @llvm.masked.load.nxv4f32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv4f32(ptr, i32, , ) define @masked_load_nxv4f64(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4f64: @@ -134,7 +123,6 @@ define @masked_load_nxv4f64(ptr %a, %mas %load = call @llvm.masked.load.nxv4f64(ptr %a, i32 8, %mask, poison) ret %load } -declare @llvm.masked.load.nxv4f64(ptr, i32, , ) define @masked_load_nxv8bf16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8bf16: @@ -145,7 +133,6 @@ define @masked_load_nxv8bf16(ptr %a, %ma %load = call @llvm.masked.load.nxv8bf16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv8bf16(ptr, i32, , ) define @masked_load_nxv8f16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8f16: @@ -156,7 +143,6 @@ define @masked_load_nxv8f16(ptr %a, %mask) %load = call @llvm.masked.load.nxv8f16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv8f16(ptr, i32, , ) define @masked_load_nxv8f32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8f32: @@ -167,7 +153,6 @@ define @masked_load_nxv8f32(ptr %a, %mask %load = call @llvm.masked.load.nxv8f32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv8f32(ptr, i32, , ) define @masked_load_nxv8f64(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8f64: @@ -178,7 +163,6 @@ define @masked_load_nxv8f64(ptr %a, %mas %load = call @llvm.masked.load.nxv8f64(ptr %a, i32 8, %mask, poison) ret %load } -declare @llvm.masked.load.nxv8f64(ptr, i32, , ) define @masked_load_nxv16bf16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16bf16: @@ -189,7 +173,6 @@ define @masked_load_nxv16bf16(ptr %a, %load = call @llvm.masked.load.nxv16bf16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv16bf16(ptr, i32, , ) define @masked_load_nxv16f16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16f16: @@ -200,7 +183,6 @@ define @masked_load_nxv16f16(ptr %a, %ma %load = call @llvm.masked.load.nxv16f16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv16f16(ptr, i32, , ) define @masked_load_nxv16f32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16f32: @@ -211,7 +193,6 @@ define @masked_load_nxv16f32(ptr %a, %m %load = call @llvm.masked.load.nxv16f32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv16f32(ptr, i32, , ) define @masked_load_nxv32bf16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv32bf16: @@ -222,7 +203,6 @@ define @masked_load_nxv32bf16(ptr %a, %load = call @llvm.masked.load.nxv32bf16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv32bf16(ptr, i32, , ) define @masked_load_nxv32f16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv32f16: @@ -233,4 +213,3 @@ define @masked_load_nxv32f16(ptr %a, %ma %load = call @llvm.masked.load.nxv32f16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv32f16(ptr, i32, , ) diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-int-e64.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-int-e64.ll index 41cc500f4a610..5fcf10d446a33 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-load-int-e64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-int-e64.ll @@ -11,7 +11,6 @@ define @masked_load_nxv1i64(ptr %a, %mask) %load = call @llvm.masked.load.nxv1i64(ptr %a, i32 8, %mask, poison) ret %load } -declare @llvm.masked.load.nxv1i64(ptr, i32, , ) define @masked_load_nxv2i64(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2i64: @@ -22,7 +21,6 @@ define @masked_load_nxv2i64(ptr %a, %mask) %load = call @llvm.masked.load.nxv2i64(ptr %a, i32 8, %mask, poison) ret %load } -declare @llvm.masked.load.nxv2i64(ptr, i32, , ) define @masked_load_nxv4i64(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4i64: @@ -33,7 +31,6 @@ define @masked_load_nxv4i64(ptr %a, %mask) %load = call @llvm.masked.load.nxv4i64(ptr %a, i32 8, %mask, poison) ret %load } -declare @llvm.masked.load.nxv4i64(ptr, i32, , ) define @masked_load_nxv8i64(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8i64: @@ -44,4 +41,3 @@ define @masked_load_nxv8i64(ptr %a, %mask) %load = call @llvm.masked.load.nxv8i64(ptr %a, i32 8, %mask, poison) ret %load } -declare @llvm.masked.load.nxv8i64(ptr, i32, , ) diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll index 94794a74b2ced..40b906a481daa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll @@ -59,7 +59,6 @@ define @masked_load_nxv1i16(ptr %a, %mask) %load = call @llvm.masked.load.nxv1i16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv1i16(ptr, i32, , ) define @masked_load_nxv1i32(ptr %a, %mask) nounwind { ; V-LABEL: masked_load_nxv1i32: @@ -78,7 +77,6 @@ define @masked_load_nxv1i32(ptr %a, %mask) %load = call @llvm.masked.load.nxv1i32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv1i32(ptr, i32, , ) define @masked_load_nxv2i8(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2i8: @@ -89,7 +87,6 @@ define @masked_load_nxv2i8(ptr %a, %mask) no %load = call @llvm.masked.load.nxv2i8(ptr %a, i32 1, %mask, poison) ret %load } -declare @llvm.masked.load.nxv2i8(ptr, i32, , ) define @masked_load_nxv2i16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2i16: @@ -100,7 +97,6 @@ define @masked_load_nxv2i16(ptr %a, %mask) %load = call @llvm.masked.load.nxv2i16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv2i16(ptr, i32, , ) define @masked_load_nxv2i32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2i32: @@ -111,7 +107,6 @@ define @masked_load_nxv2i32(ptr %a, %mask) %load = call @llvm.masked.load.nxv2i32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv2i32(ptr, i32, , ) define @masked_load_nxv4i8(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4i8: @@ -122,7 +117,6 @@ define @masked_load_nxv4i8(ptr %a, %mask) no %load = call @llvm.masked.load.nxv4i8(ptr %a, i32 1, %mask, poison) ret %load } -declare @llvm.masked.load.nxv4i8(ptr, i32, , ) define @masked_load_nxv4i16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4i16: @@ -133,7 +127,6 @@ define @masked_load_nxv4i16(ptr %a, %mask) %load = call @llvm.masked.load.nxv4i16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv4i16(ptr, i32, , ) define @masked_load_nxv4i32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4i32: @@ -144,7 +137,6 @@ define @masked_load_nxv4i32(ptr %a, %mask) %load = call @llvm.masked.load.nxv4i32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv4i32(ptr, i32, , ) define @masked_load_nxv8i8(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8i8: @@ -155,7 +147,6 @@ define @masked_load_nxv8i8(ptr %a, %mask) no %load = call @llvm.masked.load.nxv8i8(ptr %a, i32 1, %mask, poison) ret %load } -declare @llvm.masked.load.nxv8i8(ptr, i32, , ) define @masked_load_nxv8i16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8i16: @@ -166,7 +157,6 @@ define @masked_load_nxv8i16(ptr %a, %mask) %load = call @llvm.masked.load.nxv8i16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv8i16(ptr, i32, , ) define @masked_load_nxv8i32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8i32: @@ -177,7 +167,6 @@ define @masked_load_nxv8i32(ptr %a, %mask) %load = call @llvm.masked.load.nxv8i32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv8i32(ptr, i32, , ) define @masked_load_nxv16i8(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16i8: @@ -188,7 +177,6 @@ define @masked_load_nxv16i8(ptr %a, %mask) %load = call @llvm.masked.load.nxv16i8(ptr %a, i32 1, %mask, poison) ret %load } -declare @llvm.masked.load.nxv16i8(ptr, i32, , ) define @masked_load_nxv16i16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16i16: @@ -199,7 +187,6 @@ define @masked_load_nxv16i16(ptr %a, %mas %load = call @llvm.masked.load.nxv16i16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv16i16(ptr, i32, , ) define @masked_load_nxv16i32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16i32: @@ -210,7 +197,6 @@ define @masked_load_nxv16i32(ptr %a, %mas %load = call @llvm.masked.load.nxv16i32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv16i32(ptr, i32, , ) define @masked_load_nxv32i8(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv32i8: @@ -221,7 +207,6 @@ define @masked_load_nxv32i8(ptr %a, %mask) %load = call @llvm.masked.load.nxv32i8(ptr %a, i32 1, %mask, poison) ret %load } -declare @llvm.masked.load.nxv32i8(ptr, i32, , ) define @masked_load_nxv32i16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv32i16: @@ -232,7 +217,6 @@ define @masked_load_nxv32i16(ptr %a, %mas %load = call @llvm.masked.load.nxv32i16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv32i16(ptr, i32, , ) define @masked_load_nxv64i8(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv64i8: @@ -243,7 +227,6 @@ define @masked_load_nxv64i8(ptr %a, %mask) %load = call @llvm.masked.load.nxv64i8(ptr %a, i32 1, %mask, poison) ret %load } -declare @llvm.masked.load.nxv64i8(ptr, i32, , ) define @masked_load_zero_mask(ptr %a) nounwind { ; CHECK-LABEL: masked_load_zero_mask: diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll b/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll index 586af50266f94..0b874fff5c526 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll @@ -13,7 +13,6 @@ define void @masked_store_nxv1bf16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv1bf16.p0(, ptr, i32, ) define void @masked_store_nxv1f16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv1f16: @@ -24,7 +23,6 @@ define void @masked_store_nxv1f16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv1f16.p0(, ptr, i32, ) define void @masked_store_nxv1f32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv1f32: @@ -35,7 +33,6 @@ define void @masked_store_nxv1f32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.nxv1f32.p0(, ptr, i32, ) define void @masked_store_nxv1f64( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv1f64: @@ -46,7 +43,6 @@ define void @masked_store_nxv1f64( %val, ptr %a, %val, ptr %a, i32 8, %mask) ret void } -declare void @llvm.masked.store.nxv1f64.p0(, ptr, i32, ) define void @masked_store_nxv2bf16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2bf16: @@ -57,7 +53,6 @@ define void @masked_store_nxv2bf16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv2bf16.p0(, ptr, i32, ) define void @masked_store_nxv2f16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2f16: @@ -68,7 +63,6 @@ define void @masked_store_nxv2f16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv2f16.p0(, ptr, i32, ) define void @masked_store_nxv2f32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2f32: @@ -79,7 +73,6 @@ define void @masked_store_nxv2f32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.nxv2f32.p0(, ptr, i32, ) define void @masked_store_nxv2f64( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2f64: @@ -90,7 +83,6 @@ define void @masked_store_nxv2f64( %val, ptr %a, %val, ptr %a, i32 8, %mask) ret void } -declare void @llvm.masked.store.nxv2f64.p0(, ptr, i32, ) define void @masked_store_nxv4bf16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4bf16: @@ -101,7 +93,6 @@ define void @masked_store_nxv4bf16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv4bf16.p0(, ptr, i32, ) define void @masked_store_nxv4f16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4f16: @@ -112,7 +103,6 @@ define void @masked_store_nxv4f16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv4f16.p0(, ptr, i32, ) define void @masked_store_nxv4f32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4f32: @@ -123,7 +113,6 @@ define void @masked_store_nxv4f32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.nxv4f32.p0(, ptr, i32, ) define void @masked_store_nxv4f64( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4f64: @@ -134,7 +123,6 @@ define void @masked_store_nxv4f64( %val, ptr %a, %val, ptr %a, i32 8, %mask) ret void } -declare void @llvm.masked.store.nxv4f64.p0(, ptr, i32, ) define void @masked_store_nxv8bf16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8bf16: @@ -145,7 +133,6 @@ define void @masked_store_nxv8bf16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv8bf16.p0(, ptr, i32, ) define void @masked_store_nxv8f16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8f16: @@ -156,7 +143,6 @@ define void @masked_store_nxv8f16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv8f16.p0(, ptr, i32, ) define void @masked_store_nxv8f32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8f32: @@ -167,7 +153,6 @@ define void @masked_store_nxv8f32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.nxv8f32.p0(, ptr, i32, ) define void @masked_store_nxv8f64( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8f64: @@ -178,7 +163,6 @@ define void @masked_store_nxv8f64( %val, ptr %a, %val, ptr %a, i32 8, %mask) ret void } -declare void @llvm.masked.store.nxv8f64.p0(, ptr, i32, ) define void @masked_store_nxv16bf16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16bf16: @@ -189,7 +173,6 @@ define void @masked_store_nxv16bf16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv16bf16.p0(, ptr, i32, ) define void @masked_store_nxv16f16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16f16: @@ -200,7 +183,6 @@ define void @masked_store_nxv16f16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv16f16.p0(, ptr, i32, ) define void @masked_store_nxv16f32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16f32: @@ -211,7 +193,6 @@ define void @masked_store_nxv16f32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.nxv16f32.p0(, ptr, i32, ) define void @masked_store_nxv32bf16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv32bf16: @@ -222,7 +203,6 @@ define void @masked_store_nxv32bf16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv32bf16.p0(, ptr, i32, ) define void @masked_store_nxv32f16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv32f16: @@ -233,4 +213,3 @@ define void @masked_store_nxv32f16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv32f16.p0(, ptr, i32, ) diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-store-int-e64.ll b/llvm/test/CodeGen/RISCV/rvv/masked-store-int-e64.ll index 602ee6105af5f..5936bc73c1a94 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-store-int-e64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-store-int-e64.ll @@ -11,7 +11,6 @@ define void @masked_store_nxv1i64( %val, ptr %a, %val, ptr %a, i32 8, %mask) ret void } -declare void @llvm.masked.store.v1i64.p0(, ptr, i32, ) define void @masked_store_nxv2i64( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2i64: @@ -22,7 +21,6 @@ define void @masked_store_nxv2i64( %val, ptr %a, %val, ptr %a, i32 8, %mask) ret void } -declare void @llvm.masked.store.v2i64.p0(, ptr, i32, ) define void @masked_store_nxv4i64( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4i64: @@ -33,7 +31,6 @@ define void @masked_store_nxv4i64( %val, ptr %a, %val, ptr %a, i32 8, %mask) ret void } -declare void @llvm.masked.store.v4i64.p0(, ptr, i32, ) define void @masked_store_nxv8i64( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8i64: @@ -44,4 +41,3 @@ define void @masked_store_nxv8i64( %val, ptr %a, %val, ptr %a, i32 8, %mask) ret void } -declare void @llvm.masked.store.v8i64.p0(, ptr, i32, ) diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll index 92893a7dd463a..dc83cdc695b54 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll @@ -21,7 +21,6 @@ define void @masked_store_nxv1i8( %val, ptr %a, %val, ptr %a, i32 1, %mask) ret void } -declare void @llvm.masked.store.v1i8.p0(, ptr, i32, ) define void @masked_store_nxv1i16( %val, ptr %a, %mask) nounwind { ; V-LABEL: masked_store_nxv1i16: @@ -40,7 +39,6 @@ define void @masked_store_nxv1i16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.v1i16.p0(, ptr, i32, ) define void @masked_store_nxv1i32( %val, ptr %a, %mask) nounwind { ; V-LABEL: masked_store_nxv1i32: @@ -59,7 +57,6 @@ define void @masked_store_nxv1i32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.v1i32.p0(, ptr, i32, ) define void @masked_store_nxv2i8( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2i8: @@ -70,7 +67,6 @@ define void @masked_store_nxv2i8( %val, ptr %a, %val, ptr %a, i32 1, %mask) ret void } -declare void @llvm.masked.store.v2i8.p0(, ptr, i32, ) define void @masked_store_nxv2i16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2i16: @@ -81,7 +77,6 @@ define void @masked_store_nxv2i16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.v2i16.p0(, ptr, i32, ) define void @masked_store_nxv2i32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2i32: @@ -92,7 +87,6 @@ define void @masked_store_nxv2i32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.v2i32.p0(, ptr, i32, ) define void @masked_store_nxv4i8( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4i8: @@ -103,7 +97,6 @@ define void @masked_store_nxv4i8( %val, ptr %a, %val, ptr %a, i32 1, %mask) ret void } -declare void @llvm.masked.store.v4i8.p0(, ptr, i32, ) define void @masked_store_nxv4i16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4i16: @@ -114,7 +107,6 @@ define void @masked_store_nxv4i16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.v4i16.p0(, ptr, i32, ) define void @masked_store_nxv4i32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4i32: @@ -125,7 +117,6 @@ define void @masked_store_nxv4i32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.v4i32.p0(, ptr, i32, ) define void @masked_store_nxv8i8( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8i8: @@ -136,7 +127,6 @@ define void @masked_store_nxv8i8( %val, ptr %a, %val, ptr %a, i32 1, %mask) ret void } -declare void @llvm.masked.store.v8i8.p0(, ptr, i32, ) define void @masked_store_nxv8i16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8i16: @@ -147,7 +137,6 @@ define void @masked_store_nxv8i16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.v8i16.p0(, ptr, i32, ) define void @masked_store_nxv8i32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8i32: @@ -158,7 +147,6 @@ define void @masked_store_nxv8i32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.v8i32.p0(, ptr, i32, ) define void @masked_store_nxv16i8( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16i8: @@ -169,7 +157,6 @@ define void @masked_store_nxv16i8( %val, ptr %a, %val, ptr %a, i32 1, %mask) ret void } -declare void @llvm.masked.store.v16i8.p0(, ptr, i32, ) define void @masked_store_nxv16i16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16i16: @@ -180,7 +167,6 @@ define void @masked_store_nxv16i16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.v16i16.p0(, ptr, i32, ) define void @masked_store_nxv16i32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16i32: @@ -191,7 +177,6 @@ define void @masked_store_nxv16i32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.v16i32.p0(, ptr, i32, ) define void @masked_store_nxv32i8( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv32i8: @@ -202,7 +187,6 @@ define void @masked_store_nxv32i8( %val, ptr %a, %val, ptr %a, i32 1, %mask) ret void } -declare void @llvm.masked.store.v32i8.p0(, ptr, i32, ) define void @masked_store_nxv32i16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv32i16: @@ -213,7 +197,6 @@ define void @masked_store_nxv32i16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.v32i16.p0(, ptr, i32, ) define void @masked_store_nxv64i8( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv64i8: @@ -224,7 +207,6 @@ define void @masked_store_nxv64i8( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.v64i8.p0(, ptr, i32, ) define void @masked_store_zero_mask( %val, ptr %a) nounwind { ; CHECK-LABEL: masked_store_zero_mask: diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll index 42d0bc57c6b5c..08e25246a6092 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vle.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_nxv1i64_nxv1i64(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(ptr %0, %1, iXLen %2, ptr %3) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -63,14 +49,6 @@ entry: ret %b } -declare @llvm.riscv.vlse.mask.nxv1i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -88,14 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(ptr %0, %1, %2, iXLen %3) nounwind { entry: %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( @@ -108,13 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -132,14 +95,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -158,14 +113,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -183,14 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -208,13 +147,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -232,13 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -255,12 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( - , - , - , - iXLen, - iXLen); define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: @@ -279,14 +198,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -304,14 +215,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -329,14 +232,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -354,14 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -379,14 +266,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -404,14 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -430,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -454,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -479,14 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -504,15 +353,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -531,15 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -558,15 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -585,15 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -612,13 +425,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -636,13 +442,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -661,13 +460,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -685,13 +477,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -709,13 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -733,13 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -758,13 +529,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -782,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -807,12 +564,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -829,13 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -852,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -874,14 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -899,14 +629,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -924,12 +646,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -949,12 +665,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -971,12 +681,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -993,12 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1016,12 +714,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1039,12 +731,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1062,12 +748,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1085,12 +765,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1108,12 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1131,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1155,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1181,14 +834,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -1207,14 +852,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1233,14 +870,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1259,11 +888,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv1i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1279,12 +903,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -1303,12 +921,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv1i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -1327,18 +939,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1f16( - , - , - , - , - iXLen); - -declare @llvm.riscv.vmfeq.nxv1f16( - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1361,13 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1397,13 +990,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1434,12 +1020,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv64i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll index 2e8b6c5fcca22..a1be60e689f20 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vle.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen) define @intrinsic_vle_mask_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,12 +20,6 @@ entry: ret %a } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen) define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -61,13 +49,6 @@ entry: ret %b } -declare @llvm.riscv.vlse.mask.nxv1i64( - , - ptr, - iXLen, - , - iXLen, - iXLen) define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -85,13 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( - , - ptr, - , - , - iXLen, - iXLen) define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen( %0, ptr %1, %2, %3, iXLen %4) nounwind { entry: %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -127,13 +95,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -151,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -175,13 +129,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -199,12 +146,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -221,12 +162,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -242,12 +177,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -264,13 +193,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -288,13 +210,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -312,13 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -336,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -360,13 +261,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -384,13 +278,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -408,12 +295,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -431,12 +312,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -478,14 +346,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -504,14 +364,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -530,14 +382,6 @@ entry: ret %a } -declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -556,14 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -582,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -605,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,12 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -651,12 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -674,12 +486,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen) define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -697,12 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -720,12 +520,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -743,12 +537,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -765,11 +553,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -786,12 +569,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( - , - , - , - iXLen, - iXLen) define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -808,11 +585,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -829,13 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -853,13 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -877,11 +635,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv1i16( - , - , - , - iXLen, iXLen) define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -902,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -923,11 +671,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -944,11 +687,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -965,11 +703,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( - , - , - , - iXLen, iXLen) define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -986,11 +719,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen) define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1007,11 +735,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1028,11 +751,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1049,11 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1070,12 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1i8( - , - , - iXLen, - , - iXLen, iXLen) define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1093,13 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1117,13 +817,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen) define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -1141,13 +834,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1165,13 +851,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1189,11 +868,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv1i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1209,12 +883,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll index ca9b6245a8570..c11cc4f2b498c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vle.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen) define @intrinsic_vle_mask_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,12 +20,6 @@ entry: ret %a } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen) define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -61,13 +49,6 @@ entry: ret %b } -declare @llvm.riscv.vlse.mask.nxv1i64( - , - ptr, - iXLen, - , - iXLen, - iXLen) define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -85,13 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( - , - ptr, - , - , - iXLen, - iXLen) define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen( %0, ptr %1, %2, %3, iXLen %4) nounwind { entry: %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -127,13 +95,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -151,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -175,13 +129,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -199,12 +146,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -221,12 +162,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -242,12 +177,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -264,13 +193,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -288,13 +210,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -312,13 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -336,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -360,13 +261,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -384,13 +278,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -408,12 +295,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -431,12 +312,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -478,14 +346,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -504,14 +364,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -530,14 +382,6 @@ entry: ret %a } -declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -556,14 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -582,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -605,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,12 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -651,12 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -674,12 +486,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen) define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -697,12 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -720,12 +520,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -743,12 +537,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -765,11 +553,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -786,12 +569,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( - , - , - , - iXLen, - iXLen) define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -808,11 +585,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -829,13 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -853,13 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -877,11 +635,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv1i16( - , - , - , - iXLen, iXLen) define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -902,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -923,11 +671,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -944,11 +687,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -965,11 +703,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( - , - , - , - iXLen, iXLen) define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -986,11 +719,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen) define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1007,11 +735,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1028,11 +751,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1049,11 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1070,12 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1i8( - , - , - iXLen, - , - iXLen, iXLen) define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1093,13 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1117,13 +817,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen) define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -1141,13 +834,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1165,13 +851,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1189,11 +868,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv1i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1209,12 +883,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll index df9e84c66cefa..e2b2d7b4a6a34 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh\ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vle.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen) define @intrinsic_vle_mask_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,12 +20,6 @@ entry: ret %a } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen) define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -61,13 +49,6 @@ entry: ret %b } -declare @llvm.riscv.vlse.mask.nxv1i64( - , - ptr, - iXLen, - , - iXLen, - iXLen) define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -85,13 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( - , - ptr, - , - , - iXLen, - iXLen) define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen( %0, ptr %1, %2, %3, iXLen %4) nounwind { entry: %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -127,13 +95,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -151,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -175,13 +129,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -199,12 +146,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -221,12 +162,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -242,12 +177,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -264,13 +193,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -288,13 +210,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -312,13 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -336,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -360,13 +261,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -384,13 +278,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -408,12 +295,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -431,12 +312,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -478,14 +346,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -504,14 +364,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -530,14 +382,6 @@ entry: ret %a } -declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -556,14 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -582,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -605,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,12 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -651,12 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -674,12 +486,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen) define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -697,12 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -720,12 +520,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -743,12 +537,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -765,11 +553,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -786,12 +569,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( - , - , - , - iXLen, - iXLen) define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -808,11 +585,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -829,13 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -853,13 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -877,11 +635,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv1i16( - , - , - , - iXLen, iXLen) define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -902,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -923,11 +671,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -944,11 +687,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -965,11 +703,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( - , - , - , - iXLen, iXLen) define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -986,11 +719,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen) define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1007,11 +735,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1028,11 +751,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1049,11 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1070,12 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1i8( - , - , - iXLen, - , - iXLen, iXLen) define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1093,13 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1117,13 +817,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen) define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -1141,13 +834,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1165,13 +851,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1189,11 +868,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv1i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1209,12 +883,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll index 0e4d709836abd..d888e23d6c59c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll @@ -2,14 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare @llvm.riscv.vslide1down.mask.nxv1i64.i64( - , - , - i64, - , - i32, - i32); - define @intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry @@ -54,7 +46,6 @@ entry: ret %a } - ; Fallback vslide1 to mask undisturbed until InsertVSETVLI supports mask agnostic. define @intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64: diff --git a/llvm/test/CodeGen/RISCV/rvv/memcpy-crash-zvl32b.ll b/llvm/test/CodeGen/RISCV/rvv/memcpy-crash-zvl32b.ll index e020fe1a0aa1a..3203ec8614153 100644 --- a/llvm/test/CodeGen/RISCV/rvv/memcpy-crash-zvl32b.ll +++ b/llvm/test/CodeGen/RISCV/rvv/memcpy-crash-zvl32b.ll @@ -14,4 +14,3 @@ entry: ret void } -declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #1 diff --git a/llvm/test/CodeGen/RISCV/rvv/memcpy-inline.ll b/llvm/test/CodeGen/RISCV/rvv/memcpy-inline.ll index 2553f563b7d0f..90a8c68b3e96d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/memcpy-inline.ll +++ b/llvm/test/CodeGen/RISCV/rvv/memcpy-inline.ll @@ -11,7 +11,6 @@ ; ---------------------------------------------------------------------- ; Fully unaligned cases - define void @unaligned_memcpy1(ptr nocapture %dest, ptr %src) nounwind { ; RV32-BOTH-LABEL: unaligned_memcpy1: ; RV32-BOTH: # %bb.0: # %entry @@ -645,7 +644,6 @@ entry: ret void } - ; ---------------------------------------------------------------------- ; Fully aligned cases @@ -1031,7 +1029,6 @@ entry: ; ------------------------------------------------------------------------ ; A few partially aligned cases - define void @memcpy16_align4(ptr nocapture %dest, ptr nocapture %src) nounwind { ; RV32-LABEL: memcpy16_align4: ; RV32: # %bb.0: # %entry @@ -1112,6 +1109,3 @@ entry: ret i32 0 } - -declare void @llvm.memcpy.inline.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind -declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll index 8190a82d7035b..d5f8cc3b6ee93 100644 --- a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll +++ b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll @@ -2,13 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -O2 < %s \ ; RUN: | FileCheck %s -check-prefix=RV64IV -declare @llvm.riscv.vmacc.nxv64i8.nxv64i8( - , - , - , - i64, - i64); - define @callee( %arg0, %arg1, %arg2) { ; RV64IV-LABEL: callee: ; RV64IV: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll index 2c11bd1ff5dc5..41c744e9347a0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll +++ b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll @@ -9,9 +9,6 @@ ; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST %struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } -declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind -declare void @llvm.memset.inline.p0.i64(ptr nocapture, i8, i64, i1) nounwind - ; ///////////////////////////////////////////////////////////////////////////// define void @memset_1(ptr %a, i8 %value) nounwind { @@ -620,7 +617,6 @@ define void @aligned_memset_zero_8(ptr %a) nounwind { ret void } - define void @aligned_memset_zero_16(ptr %a) nounwind { ; RV32-BOTH-LABEL: aligned_memset_zero_16: ; RV32-BOTH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll index 65ac424c2359a..ae3b9db74080c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,RV64 -declare @llvm.masked.gather.nxv1i8.nxv1p0(, i32, , ) - define @mgather_nxv1i8( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1i8: ; RV32: # %bb.0: @@ -32,8 +30,6 @@ define @mgather_nxv1i8( %ptrs, %v } -declare @llvm.masked.gather.nxv2i8.nxv2p0(, i32, , ) - define @mgather_nxv2i8( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i8: ; RV32: # %bb.0: @@ -180,8 +176,6 @@ define @mgather_nxv2i8_zextload_nxv2i64( %p ret %ev } -declare @llvm.masked.gather.nxv4i8.nxv4p0(, i32, , ) - define @mgather_nxv4i8( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4i8: ; RV32: # %bb.0: @@ -234,8 +228,6 @@ define @mgather_falsemask_nxv4i8( %ptrs, %v } -declare @llvm.masked.gather.nxv8i8.nxv8p0(, i32, , ) - define @mgather_nxv8i8( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8i8: ; RV32: # %bb.0: @@ -277,8 +269,6 @@ define @mgather_baseidx_nxv8i8(ptr %base, %i ret %v } -declare @llvm.masked.gather.nxv1i16.nxv1p0(, i32, , ) - define @mgather_nxv1i16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1i16: ; RV32: # %bb.0: @@ -297,8 +287,6 @@ define @mgather_nxv1i16( %ptrs, %v } -declare @llvm.masked.gather.nxv2i16.nxv2p0(, i32, , ) - define @mgather_nxv2i16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i16: ; RV32: # %bb.0: @@ -403,8 +391,6 @@ define @mgather_nxv2i16_zextload_nxv2i64( % ret %ev } -declare @llvm.masked.gather.nxv4i16.nxv4p0(, i32, , ) - define @mgather_nxv4i16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4i16: ; RV32: # %bb.0: @@ -457,8 +443,6 @@ define @mgather_falsemask_nxv4i16( %ptrs, < ret %v } -declare @llvm.masked.gather.nxv8i16.nxv8p0(, i32, , ) - define @mgather_nxv8i16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8i16: ; RV32: # %bb.0: @@ -566,8 +550,6 @@ define @mgather_baseidx_nxv8i16(ptr %base, ret %v } -declare @llvm.masked.gather.nxv1i32.nxv1p0(, i32, , ) - define @mgather_nxv1i32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1i32: ; RV32: # %bb.0: @@ -586,8 +568,6 @@ define @mgather_nxv1i32( %ptrs, %v } -declare @llvm.masked.gather.nxv2i32.nxv2p0(, i32, , ) - define @mgather_nxv2i32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i32: ; RV32: # %bb.0: @@ -650,8 +630,6 @@ define @mgather_nxv2i32_zextload_nxv2i64( % ret %ev } -declare @llvm.masked.gather.nxv4i32.nxv4p0(, i32, , ) - define @mgather_nxv4i32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4i32: ; RV32: # %bb.0: @@ -703,8 +681,6 @@ define @mgather_falsemask_nxv4i32( %ptrs, < ret %v } -declare @llvm.masked.gather.nxv8i32.nxv8p0(, i32, , ) - define @mgather_nxv8i32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8i32: ; RV32: # %bb.0: @@ -877,8 +853,6 @@ define @mgather_baseidx_nxv8i32(ptr %base, ret %v } -declare @llvm.masked.gather.nxv1i64.nxv1p0(, i32, , ) - define @mgather_nxv1i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1i64: ; RV32: # %bb.0: @@ -897,8 +871,6 @@ define @mgather_nxv1i64( %ptrs, %v } -declare @llvm.masked.gather.nxv2i64.nxv2p0(, i32, , ) - define @mgather_nxv2i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i64: ; RV32: # %bb.0: @@ -917,8 +889,6 @@ define @mgather_nxv2i64( %ptrs, %v } -declare @llvm.masked.gather.nxv4i64.nxv4p0(, i32, , ) - define @mgather_nxv4i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4i64: ; RV32: # %bb.0: @@ -964,8 +934,6 @@ define @mgather_falsemask_nxv4i64( %ptrs, < ret %v } -declare @llvm.masked.gather.nxv8i64.nxv8p0(, i32, , ) - define @mgather_nxv8i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8i64: ; RV32: # %bb.0: @@ -1211,11 +1179,6 @@ define @mgather_baseidx_nxv8i64(ptr %base, ret %v } -declare @llvm.masked.gather.nxv16i64.nxv16p0(, i32, , ) - -declare @llvm.vector.insert.nxv8i64.nxv16i64(, , i64 %idx) -declare @llvm.vector.insert.nxv8p0.nxv16p0(, , i64 %idx) - define void @mgather_nxv16i64( %ptrs0, %ptrs1, %m, %passthru0, %passthru1, ptr %out) { ; RV32-LABEL: mgather_nxv16i64: ; RV32: # %bb.0: @@ -1262,8 +1225,6 @@ define void @mgather_nxv16i64( %ptrs0, %ptr ret void } -declare @llvm.masked.gather.nxv1bf16.nxv1p0(, i32, , ) - define @mgather_nxv1bf16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1bf16: ; RV32: # %bb.0: @@ -1282,8 +1243,6 @@ define @mgather_nxv1bf16( %ptrs, %v } -declare @llvm.masked.gather.nxv2bf16.nxv2p0(, i32, , ) - define @mgather_nxv2bf16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2bf16: ; RV32: # %bb.0: @@ -1302,8 +1261,6 @@ define @mgather_nxv2bf16( %ptrs, %v } -declare @llvm.masked.gather.nxv4bf16.nxv4p0(, i32, , ) - define @mgather_nxv4bf16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4bf16: ; RV32: # %bb.0: @@ -1356,8 +1313,6 @@ define @mgather_falsemask_nxv4bf16( %ptr ret %v } -declare @llvm.masked.gather.nxv8bf16.nxv8p0(, i32, , ) - define @mgather_nxv8bf16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8bf16: ; RV32: # %bb.0: @@ -1465,8 +1420,6 @@ define @mgather_baseidx_nxv8bf16(ptr %base, %v } -declare @llvm.masked.gather.nxv1f16.nxv1p0(, i32, , ) - define @mgather_nxv1f16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1f16: ; RV32: # %bb.0: @@ -1485,8 +1438,6 @@ define @mgather_nxv1f16( %ptrs, %v } -declare @llvm.masked.gather.nxv2f16.nxv2p0(, i32, , ) - define @mgather_nxv2f16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2f16: ; RV32: # %bb.0: @@ -1505,8 +1456,6 @@ define @mgather_nxv2f16( %ptrs, %v } -declare @llvm.masked.gather.nxv4f16.nxv4p0(, i32, , ) - define @mgather_nxv4f16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4f16: ; RV32: # %bb.0: @@ -1559,8 +1508,6 @@ define @mgather_falsemask_nxv4f16( %ptrs, ret %v } -declare @llvm.masked.gather.nxv8f16.nxv8p0(, i32, , ) - define @mgather_nxv8f16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8f16: ; RV32: # %bb.0: @@ -1668,8 +1615,6 @@ define @mgather_baseidx_nxv8f16(ptr %base, %v } -declare @llvm.masked.gather.nxv1f32.nxv1p0(, i32, , ) - define @mgather_nxv1f32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1f32: ; RV32: # %bb.0: @@ -1688,8 +1633,6 @@ define @mgather_nxv1f32( %ptrs, %v } -declare @llvm.masked.gather.nxv2f32.nxv2p0(, i32, , ) - define @mgather_nxv2f32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2f32: ; RV32: # %bb.0: @@ -1708,8 +1651,6 @@ define @mgather_nxv2f32( %ptrs, %v } -declare @llvm.masked.gather.nxv4f32.nxv4p0(, i32, , ) - define @mgather_nxv4f32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4f32: ; RV32: # %bb.0: @@ -1761,8 +1702,6 @@ define @mgather_falsemask_nxv4f32( %ptrs, ret %v } -declare @llvm.masked.gather.nxv8f32.nxv8p0(, i32, , ) - define @mgather_nxv8f32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8f32: ; RV32: # %bb.0: @@ -1935,8 +1874,6 @@ define @mgather_baseidx_nxv8f32(ptr %base, %v } -declare @llvm.masked.gather.nxv1f64.nxv1p0(, i32, , ) - define @mgather_nxv1f64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1f64: ; RV32: # %bb.0: @@ -1955,8 +1892,6 @@ define @mgather_nxv1f64( %ptrs, %v } -declare @llvm.masked.gather.nxv2f64.nxv2p0(, i32, , ) - define @mgather_nxv2f64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2f64: ; RV32: # %bb.0: @@ -1975,8 +1910,6 @@ define @mgather_nxv2f64( %ptrs, %v } -declare @llvm.masked.gather.nxv4f64.nxv4p0(, i32, , ) - define @mgather_nxv4f64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4f64: ; RV32: # %bb.0: @@ -2022,8 +1955,6 @@ define @mgather_falsemask_nxv4f64( %ptrs ret %v } -declare @llvm.masked.gather.nxv8f64.nxv8p0(, i32, , ) - define @mgather_nxv8f64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8f64: ; RV32: # %bb.0: @@ -2269,8 +2200,6 @@ define @mgather_baseidx_nxv8f64(ptr %base, %v } -declare @llvm.masked.gather.nxv16i8.nxv16p0(, i32, , ) - define @mgather_baseidx_nxv16i8(ptr %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv16i8: ; RV32: # %bb.0: @@ -2302,8 +2231,6 @@ define @mgather_baseidx_nxv16i8(ptr %base, ret %v } -declare @llvm.masked.gather.nxv32i8.nxv32p0(, i32, , ) - define @mgather_baseidx_nxv32i8(ptr %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv32i8: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll b/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll index 489323b323110..0f3ebb9e625ad 100644 --- a/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll @@ -4,24 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - -declare @llvm.riscv.vadd.nxv1i32.nxv1i32( - , - , - , - iXLen); - -declare @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen); - define @test_half_bf16( %0, %1, iXLen %2, %3, %4, ptr %ptr) nounwind { ; CHECK-LABEL: test_half_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll index ac26a014aaa64..00300cc09607d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll @@ -124,6 +124,3 @@ define void @stride_one_store(i64 %n, ptr %p) { ret void } -declare @llvm.stepvector.nxv1i64() -declare void @llvm.masked.scatter.nxv2i32.nxv2p0(, , i32, ) -declare void @llvm.masked.scatter.nxv1i64.nxv1p0(, , i32, ) diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll index c961d1a9e32e4..5eb6553aaba79 100644 --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,RV64 -declare void @llvm.masked.scatter.nxv1i8.nxv1p0(, , i32, ) - define void @mscatter_nxv1i8( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1i8: ; RV32: # %bb.0: @@ -30,8 +28,6 @@ define void @mscatter_nxv1i8( %val, %ptrs, < ret void } -declare void @llvm.masked.scatter.nxv2i8.nxv2p0(, , i32, ) - define void @mscatter_nxv2i8( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i8: ; RV32: # %bb.0: @@ -117,8 +113,6 @@ define void @mscatter_nxv2i64_truncstore_nxv2i8( %val, , , i32, ) - define void @mscatter_nxv4i8( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4i8: ; RV32: # %bb.0: @@ -159,8 +153,6 @@ define void @mscatter_falsemask_nxv4i8( %val, , , i32, ) - define void @mscatter_nxv8i8( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8i8: ; RV32: # %bb.0: @@ -198,8 +190,6 @@ define void @mscatter_baseidx_nxv8i8( %val, ptr %base, , , i32, ) - define void @mscatter_nxv1i16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1i16: ; RV32: # %bb.0: @@ -216,8 +206,6 @@ define void @mscatter_nxv1i16( %val, %ptrs, ret void } -declare void @llvm.masked.scatter.nxv2i16.nxv2p0(, , i32, ) - define void @mscatter_nxv2i16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i16: ; RV32: # %bb.0: @@ -276,8 +264,6 @@ define void @mscatter_nxv2i64_truncstore_nxv2i16( %val, , , i32, ) - define void @mscatter_nxv4i16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4i16: ; RV32: # %bb.0: @@ -318,8 +304,6 @@ define void @mscatter_falsemask_nxv4i16( %val, , , i32, ) - define void @mscatter_nxv8i16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8i16: ; RV32: # %bb.0: @@ -418,8 +402,6 @@ define void @mscatter_baseidx_nxv8i16( %val, ptr %base, , , i32, ) - define void @mscatter_nxv1i32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1i32: ; RV32: # %bb.0: @@ -436,8 +418,6 @@ define void @mscatter_nxv1i32( %val, %ptrs, ret void } -declare void @llvm.masked.scatter.nxv2i32.nxv2p0(, , i32, ) - define void @mscatter_nxv2i32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i32: ; RV32: # %bb.0: @@ -473,8 +453,6 @@ define void @mscatter_nxv2i64_truncstore_nxv2i32( %val, , , i32, ) - define void @mscatter_nxv4i32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4i32: ; RV32: # %bb.0: @@ -515,8 +493,6 @@ define void @mscatter_falsemask_nxv4i32( %val, , , i32, ) - define void @mscatter_nxv8i32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8i32: ; RV32: # %bb.0: @@ -675,8 +651,6 @@ define void @mscatter_baseidx_nxv8i32( %val, ptr %base, , , i32, ) - define void @mscatter_nxv1i64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1i64: ; RV32: # %bb.0: @@ -693,8 +667,6 @@ define void @mscatter_nxv1i64( %val, %ptrs, ret void } -declare void @llvm.masked.scatter.nxv2i64.nxv2p0(, , i32, ) - define void @mscatter_nxv2i64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i64: ; RV32: # %bb.0: @@ -711,8 +683,6 @@ define void @mscatter_nxv2i64( %val, %ptrs, ret void } -declare void @llvm.masked.scatter.nxv4i64.nxv4p0(, , i32, ) - define void @mscatter_nxv4i64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4i64: ; RV32: # %bb.0: @@ -753,8 +723,6 @@ define void @mscatter_falsemask_nxv4i64( %val, , , i32, ) - define void @mscatter_nxv8i64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8i64: ; RV32: # %bb.0: @@ -980,8 +948,6 @@ define void @mscatter_baseidx_nxv8i64( %val, ptr %base, , , i32, ) - define void @mscatter_nxv1bf16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1bf16: ; RV32: # %bb.0: @@ -998,8 +964,6 @@ define void @mscatter_nxv1bf16( %val, %p ret void } -declare void @llvm.masked.scatter.nxv2bf16.nxv2p0(, , i32, ) - define void @mscatter_nxv2bf16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2bf16: ; RV32: # %bb.0: @@ -1016,8 +980,6 @@ define void @mscatter_nxv2bf16( %val, %p ret void } -declare void @llvm.masked.scatter.nxv4bf16.nxv4p0(, , i32, ) - define void @mscatter_nxv4bf16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4bf16: ; RV32: # %bb.0: @@ -1058,8 +1020,6 @@ define void @mscatter_falsemask_nxv4bf16( %val, , , i32, ) - define void @mscatter_nxv8bf16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8bf16: ; RV32: # %bb.0: @@ -1158,8 +1118,6 @@ define void @mscatter_baseidx_nxv8bf16( %val, ptr %base, , , i32, ) - define void @mscatter_nxv1f16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1f16: ; RV32: # %bb.0: @@ -1176,8 +1134,6 @@ define void @mscatter_nxv1f16( %val, %ptrs ret void } -declare void @llvm.masked.scatter.nxv2f16.nxv2p0(, , i32, ) - define void @mscatter_nxv2f16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2f16: ; RV32: # %bb.0: @@ -1194,8 +1150,6 @@ define void @mscatter_nxv2f16( %val, %ptrs ret void } -declare void @llvm.masked.scatter.nxv4f16.nxv4p0(, , i32, ) - define void @mscatter_nxv4f16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4f16: ; RV32: # %bb.0: @@ -1236,8 +1190,6 @@ define void @mscatter_falsemask_nxv4f16( %val, , , i32, ) - define void @mscatter_nxv8f16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8f16: ; RV32: # %bb.0: @@ -1336,8 +1288,6 @@ define void @mscatter_baseidx_nxv8f16( %val, ptr %base, , , i32, ) - define void @mscatter_nxv1f32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1f32: ; RV32: # %bb.0: @@ -1354,8 +1304,6 @@ define void @mscatter_nxv1f32( %val, %ptr ret void } -declare void @llvm.masked.scatter.nxv2f32.nxv2p0(, , i32, ) - define void @mscatter_nxv2f32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2f32: ; RV32: # %bb.0: @@ -1372,8 +1320,6 @@ define void @mscatter_nxv2f32( %val, %ptr ret void } -declare void @llvm.masked.scatter.nxv4f32.nxv4p0(, , i32, ) - define void @mscatter_nxv4f32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4f32: ; RV32: # %bb.0: @@ -1414,8 +1360,6 @@ define void @mscatter_falsemask_nxv4f32( %val, , , i32, ) - define void @mscatter_nxv8f32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8f32: ; RV32: # %bb.0: @@ -1574,8 +1518,6 @@ define void @mscatter_baseidx_nxv8f32( %val, ptr %base, , , i32, ) - define void @mscatter_nxv1f64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1f64: ; RV32: # %bb.0: @@ -1592,8 +1534,6 @@ define void @mscatter_nxv1f64( %val, %pt ret void } -declare void @llvm.masked.scatter.nxv2f64.nxv2p0(, , i32, ) - define void @mscatter_nxv2f64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2f64: ; RV32: # %bb.0: @@ -1610,8 +1550,6 @@ define void @mscatter_nxv2f64( %val, %pt ret void } -declare void @llvm.masked.scatter.nxv4f64.nxv4p0(, , i32, ) - define void @mscatter_nxv4f64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4f64: ; RV32: # %bb.0: @@ -1652,8 +1590,6 @@ define void @mscatter_falsemask_nxv4f64( %val, , , i32, ) - define void @mscatter_nxv8f64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8f64: ; RV32: # %bb.0: @@ -1879,11 +1815,6 @@ define void @mscatter_baseidx_nxv8f64( %val, ptr %base, , , i32, ) - -declare @llvm.vector.insert.nxv8f64.nxv16f64(, , i64) -declare @llvm.vector.insert.nxv8p0.nxv16p0(, , i64) - define void @mscatter_nxv16f64( %val0, %val1, %ptrs0, %ptrs1, %m) { ; RV32-LABEL: mscatter_nxv16f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll b/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll index ec83c4e87cebf..11305d9efbea1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll @@ -3,7 +3,6 @@ @__const.test.var_45 = private unnamed_addr constant [2 x i8] c"\D1S", align 1 @__const.test.var_101 = private unnamed_addr constant [2 x i8] c"\830", align 1 -; Function Attrs: nounwind vscale_range(2,1024) define dso_local void @test(ptr nocapture noundef %var_99) { ; CHECK-LABEL: test: ; CHECK: # %bb.0: # %entry @@ -36,16 +35,3 @@ entry: ret void } -declare @llvm.riscv.vle.nxv32i8.i64(, ptr nocapture, i64) #1 -declare @llvm.riscv.vmul.nxv32i8.i8.i64(, , i8, i64) #2 -declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #3 -declare i8 @llvm.riscv.vmv.x.s.nxv32i8() #2 -declare @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(, , , i64, i64) #3 -declare @llvm.riscv.vmsleu.nxv32i8.i8.i64(, i8, i64) #2 -declare @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(, , , , i64) #2 -declare void @llvm.riscv.vse.nxv32i8.i64(, ptr nocapture, i64) #4 - -attributes #1 = { nofree nounwind memory(read) } -attributes #2 = { nofree nosync nounwind memory(none) } -attributes #3 = { nounwind } -attributes #4 = { nounwind memory(write) } diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll index acc68491d5aee..c8cd78eda799d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll @@ -2089,49 +2089,3 @@ define @reverse_nxv12i64( %a) { ret %res } -declare @llvm.vector.reverse.nxv2i1() -declare @llvm.vector.reverse.nxv4i1() -declare @llvm.vector.reverse.nxv8i1() -declare @llvm.vector.reverse.nxv16i1() -declare @llvm.vector.reverse.nxv32i1() -declare @llvm.vector.reverse.nxv64i1() -declare @llvm.vector.reverse.nxv1i8() -declare @llvm.vector.reverse.nxv2i8() -declare @llvm.vector.reverse.nxv4i8() -declare @llvm.vector.reverse.nxv8i8() -declare @llvm.vector.reverse.nxv16i8() -declare @llvm.vector.reverse.nxv32i8() -declare @llvm.vector.reverse.nxv64i8() -declare @llvm.vector.reverse.nxv1i16() -declare @llvm.vector.reverse.nxv2i16() -declare @llvm.vector.reverse.nxv4i16() -declare @llvm.vector.reverse.nxv8i16() -declare @llvm.vector.reverse.nxv16i16() -declare @llvm.vector.reverse.nxv32i16() -declare @llvm.vector.reverse.nxv1i32() -declare @llvm.vector.reverse.nxv2i32() -declare @llvm.vector.reverse.nxv4i32() -declare @llvm.vector.reverse.nxv8i32() -declare @llvm.vector.reverse.nxv16i32() -declare @llvm.vector.reverse.nxv1i64() -declare @llvm.vector.reverse.nxv2i64() -declare @llvm.vector.reverse.nxv4i64() -declare @llvm.vector.reverse.nxv8i64() -declare @llvm.vector.reverse.nxv1f16() -declare @llvm.vector.reverse.nxv2f16() -declare @llvm.vector.reverse.nxv4f16() -declare @llvm.vector.reverse.nxv8f16() -declare @llvm.vector.reverse.nxv16f16() -declare @llvm.vector.reverse.nxv32f16() -declare @llvm.vector.reverse.nxv1f32() -declare @llvm.vector.reverse.nxv2f32() -declare @llvm.vector.reverse.nxv4f32() -declare @llvm.vector.reverse.nxv8f32() -declare @llvm.vector.reverse.nxv16f32() -declare @llvm.vector.reverse.nxv1f64() -declare @llvm.vector.reverse.nxv2f64() -declare @llvm.vector.reverse.nxv4f64() -declare @llvm.vector.reverse.nxv8f64() -declare @llvm.vector.reverse.nxv3i64() -declare @llvm.vector.reverse.nxv6i64() -declare @llvm.vector.reverse.nxv12i64() diff --git a/llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll b/llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll index 70c2691069276..1f4390c07be61 100644 --- a/llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll +++ b/llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll @@ -1,12 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare @llvm.riscv.vloxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define @test_vloxei(ptr %ptr, %offset, i64 %vl) { ; CHECK-LABEL: test_vloxei: ; CHECK: # %bb.0: # %entry @@ -68,7 +62,6 @@ entry: } ; Test use vp.zext to extend. -declare @llvm.vp.zext.nxvi64.nxv1i8(, , i32) define @test_vloxei4(ptr %ptr, %offset, %m, i32 zeroext %vl) { ; CHECK-LABEL: test_vloxei4: ; CHECK: # %bb.0: # %entry @@ -91,11 +84,6 @@ entry: } ; Test orignal extnened type is enough narrow. -declare @llvm.riscv.vloxei.nxv4i32.nxv4i16( - , - ptr, - , - i64); define @test_vloxei5(ptr %ptr, %offset, i64 %vl) { ; CHECK-LABEL: test_vloxei5: ; CHECK: # %bb.0: # %entry @@ -160,14 +148,6 @@ entry: ret %res } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @test_vloxei_mask(ptr %ptr, %offset, %m, i64 %vl) { ; CHECK-LABEL: test_vloxei_mask: ; CHECK: # %bb.0: # %entry @@ -189,12 +169,6 @@ entry: ret %res } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define @test_vluxei(ptr %ptr, %offset, i64 %vl) { ; CHECK-LABEL: test_vluxei: ; CHECK: # %bb.0: # %entry @@ -215,14 +189,6 @@ entry: ret %res } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @test_vluxei_mask(ptr %ptr, %offset, %m, i64 %vl) { ; CHECK-LABEL: test_vluxei_mask: ; CHECK: # %bb.0: # %entry @@ -244,12 +210,6 @@ entry: ret %res } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define void @test_vsoxei( %val, ptr %ptr, %offset, i64 %vl) { ; CHECK-LABEL: test_vsoxei: ; CHECK: # %bb.0: # %entry @@ -270,13 +230,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64); - define void @test_vsoxei_mask( %val, ptr %ptr, %offset, %m, i64 %vl) { ; CHECK-LABEL: test_vsoxei_mask: ; CHECK: # %bb.0: # %entry @@ -298,12 +251,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define void @test_vsuxei( %val, ptr %ptr, %offset, i64 %vl) { ; CHECK-LABEL: test_vsuxei: ; CHECK: # %bb.0: # %entry @@ -324,13 +271,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64); - define void @test_vsuxei_mask( %val, ptr %ptr, %offset, %m, i64 %vl) { ; CHECK-LABEL: test_vsuxei_mask: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll index 64e305f130dd7..67e7f7c7fbd42 100644 --- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare @llvm.vp.nearbyint.nxv1bf16(, , i32) - define @vp_nearbyint_nxv1bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv1bf16: ; CHECK: # %bb.0: @@ -66,8 +64,6 @@ define @vp_nearbyint_nxv1bf16_unmasked( %v } -declare @llvm.vp.nearbyint.nxv2bf16(, , i32) - define @vp_nearbyint_nxv2bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv2bf16: ; CHECK: # %bb.0: @@ -120,8 +116,6 @@ define @vp_nearbyint_nxv2bf16_unmasked( %v } -declare @llvm.vp.nearbyint.nxv4bf16(, , i32) - define @vp_nearbyint_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv4bf16: ; CHECK: # %bb.0: @@ -174,8 +168,6 @@ define @vp_nearbyint_nxv4bf16_unmasked( %v } -declare @llvm.vp.nearbyint.nxv8bf16(, , i32) - define @vp_nearbyint_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv8bf16: ; CHECK: # %bb.0: @@ -228,8 +220,6 @@ define @vp_nearbyint_nxv8bf16_unmasked( %v } -declare @llvm.vp.nearbyint.nxv16bf16(, , i32) - define @vp_nearbyint_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv16bf16: ; CHECK: # %bb.0: @@ -282,8 +272,6 @@ define @vp_nearbyint_nxv16bf16_unmasked( %v } -declare @llvm.vp.nearbyint.nxv32bf16(, , i32) - define @vp_nearbyint_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv32bf16: ; CHECK: # %bb.0: @@ -402,7 +390,6 @@ define @vp_nearbyint_nxv32bf16_unmasked( @llvm.vp.nearbyint.nxv32bf16( %va, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.nearbyint.nxv1f16(, , i32) define @vp_nearbyint_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv1f16: @@ -490,8 +477,6 @@ define @vp_nearbyint_nxv1f16_unmasked( %v ret %v } -declare @llvm.vp.nearbyint.nxv2f16(, , i32) - define @vp_nearbyint_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv2f16: ; ZVFH: # %bb.0: @@ -578,8 +563,6 @@ define @vp_nearbyint_nxv2f16_unmasked( %v ret %v } -declare @llvm.vp.nearbyint.nxv4f16(, , i32) - define @vp_nearbyint_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv4f16: ; ZVFH: # %bb.0: @@ -666,8 +649,6 @@ define @vp_nearbyint_nxv4f16_unmasked( %v ret %v } -declare @llvm.vp.nearbyint.nxv8f16(, , i32) - define @vp_nearbyint_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv8f16: ; ZVFH: # %bb.0: @@ -756,8 +737,6 @@ define @vp_nearbyint_nxv8f16_unmasked( %v ret %v } -declare @llvm.vp.nearbyint.nxv16f16(, , i32) - define @vp_nearbyint_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv16f16: ; ZVFH: # %bb.0: @@ -846,8 +825,6 @@ define @vp_nearbyint_nxv16f16_unmasked( ret %v } -declare @llvm.vp.nearbyint.nxv32f16(, , i32) - define @vp_nearbyint_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv32f16: ; ZVFH: # %bb.0: @@ -1003,8 +980,6 @@ define @vp_nearbyint_nxv32f16_unmasked( ret %v } -declare @llvm.vp.nearbyint.nxv1f32(, , i32) - define @vp_nearbyint_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv1f32: ; CHECK: # %bb.0: @@ -1045,8 +1020,6 @@ define @vp_nearbyint_nxv1f32_unmasked( ret %v } -declare @llvm.vp.nearbyint.nxv2f32(, , i32) - define @vp_nearbyint_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv2f32: ; CHECK: # %bb.0: @@ -1087,8 +1060,6 @@ define @vp_nearbyint_nxv2f32_unmasked( ret %v } -declare @llvm.vp.nearbyint.nxv4f32(, , i32) - define @vp_nearbyint_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv4f32: ; CHECK: # %bb.0: @@ -1131,8 +1102,6 @@ define @vp_nearbyint_nxv4f32_unmasked( ret %v } -declare @llvm.vp.nearbyint.nxv8f32(, , i32) - define @vp_nearbyint_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv8f32: ; CHECK: # %bb.0: @@ -1175,8 +1144,6 @@ define @vp_nearbyint_nxv8f32_unmasked( ret %v } -declare @llvm.vp.nearbyint.nxv16f32(, , i32) - define @vp_nearbyint_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv16f32: ; CHECK: # %bb.0: @@ -1219,8 +1186,6 @@ define @vp_nearbyint_nxv16f32_unmasked( %v } -declare @llvm.vp.nearbyint.nxv1f64(, , i32) - define @vp_nearbyint_nxv1f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_nearbyint_nxv1f64: ; RV32ZVFH: # %bb.0: @@ -1361,8 +1326,6 @@ define @vp_nearbyint_nxv1f64_unmasked( %v } -declare @llvm.vp.nearbyint.nxv2f64(, , i32) - define @vp_nearbyint_nxv2f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_nearbyint_nxv2f64: ; RV32ZVFH: # %bb.0: @@ -1511,8 +1474,6 @@ define @vp_nearbyint_nxv2f64_unmasked( %v } -declare @llvm.vp.nearbyint.nxv4f64(, , i32) - define @vp_nearbyint_nxv4f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_nearbyint_nxv4f64: ; RV32ZVFH: # %bb.0: @@ -1661,8 +1622,6 @@ define @vp_nearbyint_nxv4f64_unmasked( %v } -declare @llvm.vp.nearbyint.nxv7f64(, , i32) - define @vp_nearbyint_nxv7f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_nearbyint_nxv7f64: ; RV32ZVFH: # %bb.0: @@ -1811,8 +1770,6 @@ define @vp_nearbyint_nxv7f64_unmasked( %v } -declare @llvm.vp.nearbyint.nxv8f64(, , i32) - define @vp_nearbyint_nxv8f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_nearbyint_nxv8f64: ; RV32ZVFH: # %bb.0: @@ -1962,7 +1919,6 @@ define @vp_nearbyint_nxv8f64_unmasked( @llvm.vp.nearbyint.nxv16f64(, , i32) define @vp_nearbyint_nxv16f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_nearbyint_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll index c6662e092aa5a..0654fe8bd8d66 100644 --- a/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel | FileCheck %s -declare @llvm.vp.fmul.nxv1f64( %x, %y, %m, i32 %vl) - define @foo( %x, %y, %z, %m, i32 %vl) { ; CHECK-LABEL: name: foo ; CHECK: bb.0 (%ir-block.0): diff --git a/llvm/test/CodeGen/RISCV/rvv/pr63459.ll b/llvm/test/CodeGen/RISCV/rvv/pr63459.ll index 08a2e588330b5..ff964ece0bcb1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/pr63459.ll +++ b/llvm/test/CodeGen/RISCV/rvv/pr63459.ll @@ -17,4 +17,3 @@ bb: ret void } -declare void @llvm.vp.scatter.nxv2i32.nxv2p0(, , , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll index ce8db766234e8..998554a68d47d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll @@ -71,10 +71,5 @@ entry: ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 - declare void @bar(i32 noundef signext, i32 noundef signext, i32 noundef signext, i32 noundef signext, i32 noundef signext, i32 noundef signext, i32 noundef signext, i32 noundef signext, ptr noundef) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 - -attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn } diff --git a/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll b/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll index 06bce82efb313..8a2f6bb9b0074 100644 --- a/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll @@ -4,8 +4,6 @@ ; This test previously crashed with an error "ran out of registers during register allocation" -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_mask_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl) { ; CHECK-LABEL: test_vsseg2_mask_nxv16i16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/reproducer-pr146855.ll b/llvm/test/CodeGen/RISCV/rvv/reproducer-pr146855.ll index 2d64defe8c7b1..c574bd758bd2d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/reproducer-pr146855.ll +++ b/llvm/test/CodeGen/RISCV/rvv/reproducer-pr146855.ll @@ -57,16 +57,8 @@ middle.block: ; preds = %vector.body ret i32 %13 } -; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: read) -declare @llvm.masked.load.nxv4i32.p0(ptr captures(none), i32 immarg, , ) #1 - -; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none) -declare i32 @llvm.vector.reduce.add.nxv4i32() #2 - ; uselistorder directives uselistorder ptr @llvm.masked.load.nxv4i32.p0, { 1, 0 } uselistorder ptr @llvm.vector.reduce.add.nxv4i32, { 1, 0 } attributes #0 = { "target-features"="+v" } -attributes #1 = { nocallback nofree nosync nounwind willreturn memory(argmem: read) } -attributes #2 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll index 091caa6c65fd2..380287dd555c9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare @llvm.vp.rint.nxv1bf16(, , i32) - define @vp_rint_nxv1bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv1bf16: ; CHECK: # %bb.0: @@ -62,8 +60,6 @@ define @vp_rint_nxv1bf16_unmasked( %v ret %v } -declare @llvm.vp.rint.nxv2bf16(, , i32) - define @vp_rint_nxv2bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv2bf16: ; CHECK: # %bb.0: @@ -112,8 +108,6 @@ define @vp_rint_nxv2bf16_unmasked( %v ret %v } -declare @llvm.vp.rint.nxv4bf16(, , i32) - define @vp_rint_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv4bf16: ; CHECK: # %bb.0: @@ -162,8 +156,6 @@ define @vp_rint_nxv4bf16_unmasked( %v ret %v } -declare @llvm.vp.rint.nxv8bf16(, , i32) - define @vp_rint_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv8bf16: ; CHECK: # %bb.0: @@ -212,8 +204,6 @@ define @vp_rint_nxv8bf16_unmasked( %v ret %v } -declare @llvm.vp.rint.nxv16bf16(, , i32) - define @vp_rint_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv16bf16: ; CHECK: # %bb.0: @@ -262,8 +252,6 @@ define @vp_rint_nxv16bf16_unmasked( ret %v } -declare @llvm.vp.rint.nxv32bf16(, , i32) - define @vp_rint_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv32bf16: ; CHECK: # %bb.0: @@ -374,7 +362,6 @@ define @vp_rint_nxv32bf16_unmasked( %v = call @llvm.vp.rint.nxv32bf16( %va, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.rint.nxv1f16(, , i32) define @vp_rint_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv1f16: @@ -454,8 +441,6 @@ define @vp_rint_nxv1f16_unmasked( %va, i3 ret %v } -declare @llvm.vp.rint.nxv2f16(, , i32) - define @vp_rint_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv2f16: ; ZVFH: # %bb.0: @@ -534,8 +519,6 @@ define @vp_rint_nxv2f16_unmasked( %va, i3 ret %v } -declare @llvm.vp.rint.nxv4f16(, , i32) - define @vp_rint_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv4f16: ; ZVFH: # %bb.0: @@ -614,8 +597,6 @@ define @vp_rint_nxv4f16_unmasked( %va, i3 ret %v } -declare @llvm.vp.rint.nxv8f16(, , i32) - define @vp_rint_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv8f16: ; ZVFH: # %bb.0: @@ -696,8 +677,6 @@ define @vp_rint_nxv8f16_unmasked( %va, i3 ret %v } -declare @llvm.vp.rint.nxv16f16(, , i32) - define @vp_rint_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv16f16: ; ZVFH: # %bb.0: @@ -778,8 +757,6 @@ define @vp_rint_nxv16f16_unmasked( %va, ret %v } -declare @llvm.vp.rint.nxv32f16(, , i32) - define @vp_rint_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv32f16: ; ZVFH: # %bb.0: @@ -923,8 +900,6 @@ define @vp_rint_nxv32f16_unmasked( %va, ret %v } -declare @llvm.vp.rint.nxv1f32(, , i32) - define @vp_rint_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv1f32: ; CHECK: # %bb.0: @@ -961,8 +936,6 @@ define @vp_rint_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.rint.nxv2f32(, , i32) - define @vp_rint_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv2f32: ; CHECK: # %bb.0: @@ -999,8 +972,6 @@ define @vp_rint_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.rint.nxv4f32(, , i32) - define @vp_rint_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv4f32: ; CHECK: # %bb.0: @@ -1039,8 +1010,6 @@ define @vp_rint_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.rint.nxv8f32(, , i32) - define @vp_rint_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv8f32: ; CHECK: # %bb.0: @@ -1079,8 +1048,6 @@ define @vp_rint_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.rint.nxv16f32(, , i32) - define @vp_rint_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv16f32: ; CHECK: # %bb.0: @@ -1119,8 +1086,6 @@ define @vp_rint_nxv16f32_unmasked( %v ret %v } -declare @llvm.vp.rint.nxv1f64(, , i32) - define @vp_rint_nxv1f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_rint_nxv1f64: ; RV32ZVFH: # %bb.0: @@ -1245,8 +1210,6 @@ define @vp_rint_nxv1f64_unmasked( %va ret %v } -declare @llvm.vp.rint.nxv2f64(, , i32) - define @vp_rint_nxv2f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_rint_nxv2f64: ; RV32ZVFH: # %bb.0: @@ -1379,8 +1342,6 @@ define @vp_rint_nxv2f64_unmasked( %va ret %v } -declare @llvm.vp.rint.nxv4f64(, , i32) - define @vp_rint_nxv4f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_rint_nxv4f64: ; RV32ZVFH: # %bb.0: @@ -1513,8 +1474,6 @@ define @vp_rint_nxv4f64_unmasked( %va ret %v } -declare @llvm.vp.rint.nxv7f64(, , i32) - define @vp_rint_nxv7f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_rint_nxv7f64: ; RV32ZVFH: # %bb.0: @@ -1647,8 +1606,6 @@ define @vp_rint_nxv7f64_unmasked( %va ret %v } -declare @llvm.vp.rint.nxv8f64(, , i32) - define @vp_rint_nxv8f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_rint_nxv8f64: ; RV32ZVFH: # %bb.0: @@ -1782,7 +1739,6 @@ define @vp_rint_nxv8f64_unmasked( %va } ; Test splitting. -declare @llvm.vp.rint.nxv16f64(, , i32) define @vp_rint_nxv16f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_rint_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/riscv-codegenprepare-asm.ll b/llvm/test/CodeGen/RISCV/rvv/riscv-codegenprepare-asm.ll index 4e5f6e0f65489..b8b377d70d3b7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/riscv-codegenprepare-asm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/riscv-codegenprepare-asm.ll @@ -1,9 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare i64 @llvm.vscale.i64() -declare float @llvm.vector.reduce.fadd.nxv4f32(float, ) - define float @reduce_fadd(ptr %f) { ; CHECK-LABEL: reduce_fadd: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/riscv-codegenprepare.ll b/llvm/test/CodeGen/RISCV/rvv/riscv-codegenprepare.ll index 8967fb8bf01ac..ffbcb65c40c33 100644 --- a/llvm/test/CodeGen/RISCV/rvv/riscv-codegenprepare.ll +++ b/llvm/test/CodeGen/RISCV/rvv/riscv-codegenprepare.ll @@ -1,9 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 ; RUN: opt %s -S -riscv-codegenprepare -mtriple=riscv64 -mattr=+v | FileCheck %s -declare i64 @llvm.vscale.i64() -declare float @llvm.vector.reduce.fadd.nxv4f32(float, ) - define float @reduce_fadd(ptr %f) { ; CHECK-LABEL: define float @reduce_fadd( ; CHECK-SAME: ptr [[F:%.*]]) #[[ATTR2:[0-9]+]] { diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll index d1ea5aa76268a..37c036d38148a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare @llvm.vp.round.nxv1bf16(, , i32) - define @vp_round_nxv1bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv1bf16: ; CHECK: # %bb.0: @@ -66,8 +64,6 @@ define @vp_round_nxv1bf16_unmasked( % ret %v } -declare @llvm.vp.round.nxv2bf16(, , i32) - define @vp_round_nxv2bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv2bf16: ; CHECK: # %bb.0: @@ -120,8 +116,6 @@ define @vp_round_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.round.nxv4bf16(, , i32) - define @vp_round_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv4bf16: ; CHECK: # %bb.0: @@ -174,8 +168,6 @@ define @vp_round_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.round.nxv8bf16(, , i32) - define @vp_round_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv8bf16: ; CHECK: # %bb.0: @@ -228,8 +220,6 @@ define @vp_round_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.round.nxv16bf16(, , i32) - define @vp_round_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv16bf16: ; CHECK: # %bb.0: @@ -282,8 +272,6 @@ define @vp_round_nxv16bf16_unmasked( %v } -declare @llvm.vp.round.nxv32bf16(, , i32) - define @vp_round_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv32bf16: ; CHECK: # %bb.0: @@ -402,7 +390,6 @@ define @vp_round_nxv32bf16_unmasked( @llvm.vp.round.nxv32bf16( %va, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.round.nxv1f16(, , i32) define @vp_round_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv1f16: @@ -490,8 +477,6 @@ define @vp_round_nxv1f16_unmasked( %va, i ret %v } -declare @llvm.vp.round.nxv2f16(, , i32) - define @vp_round_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv2f16: ; ZVFH: # %bb.0: @@ -578,8 +563,6 @@ define @vp_round_nxv2f16_unmasked( %va, i ret %v } -declare @llvm.vp.round.nxv4f16(, , i32) - define @vp_round_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv4f16: ; ZVFH: # %bb.0: @@ -666,8 +649,6 @@ define @vp_round_nxv4f16_unmasked( %va, i ret %v } -declare @llvm.vp.round.nxv8f16(, , i32) - define @vp_round_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv8f16: ; ZVFH: # %bb.0: @@ -756,8 +737,6 @@ define @vp_round_nxv8f16_unmasked( %va, i ret %v } -declare @llvm.vp.round.nxv16f16(, , i32) - define @vp_round_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv16f16: ; ZVFH: # %bb.0: @@ -846,8 +825,6 @@ define @vp_round_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.round.nxv32f16(, , i32) - define @vp_round_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv32f16: ; ZVFH: # %bb.0: @@ -1003,8 +980,6 @@ define @vp_round_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.round.nxv1f32(, , i32) - define @vp_round_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv1f32: ; CHECK: # %bb.0: @@ -1045,8 +1020,6 @@ define @vp_round_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.round.nxv2f32(, , i32) - define @vp_round_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv2f32: ; CHECK: # %bb.0: @@ -1087,8 +1060,6 @@ define @vp_round_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.round.nxv4f32(, , i32) - define @vp_round_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv4f32: ; CHECK: # %bb.0: @@ -1131,8 +1102,6 @@ define @vp_round_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.round.nxv8f32(, , i32) - define @vp_round_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv8f32: ; CHECK: # %bb.0: @@ -1175,8 +1144,6 @@ define @vp_round_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.round.nxv16f32(, , i32) - define @vp_round_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv16f32: ; CHECK: # %bb.0: @@ -1219,8 +1186,6 @@ define @vp_round_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.round.nxv1f64(, , i32) - define @vp_round_nxv1f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_nxv1f64: ; RV32ZVFH: # %bb.0: @@ -1361,8 +1326,6 @@ define @vp_round_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.round.nxv2f64(, , i32) - define @vp_round_nxv2f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_nxv2f64: ; RV32ZVFH: # %bb.0: @@ -1511,8 +1474,6 @@ define @vp_round_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.round.nxv4f64(, , i32) - define @vp_round_nxv4f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_nxv4f64: ; RV32ZVFH: # %bb.0: @@ -1661,8 +1622,6 @@ define @vp_round_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.round.nxv7f64(, , i32) - define @vp_round_nxv7f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_nxv7f64: ; RV32ZVFH: # %bb.0: @@ -1811,8 +1770,6 @@ define @vp_round_nxv7f64_unmasked( %v ret %v } -declare @llvm.vp.round.nxv8f64(, , i32) - define @vp_round_nxv8f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_nxv8f64: ; RV32ZVFH: # %bb.0: @@ -1962,7 +1919,6 @@ define @vp_round_nxv8f64_unmasked( %v } ; Test splitting. -declare @llvm.vp.round.nxv16f64(, , i32) define @vp_round_nxv16f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll index 23d0e97c1c82b..37a9ec1c0a8aa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare @llvm.vp.roundeven.nxv1bf16(, , i32) - define @vp_roundeven_nxv1bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv1bf16: ; CHECK: # %bb.0: @@ -66,8 +64,6 @@ define @vp_roundeven_nxv1bf16_unmasked( %v } -declare @llvm.vp.roundeven.nxv2bf16(, , i32) - define @vp_roundeven_nxv2bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv2bf16: ; CHECK: # %bb.0: @@ -120,8 +116,6 @@ define @vp_roundeven_nxv2bf16_unmasked( %v } -declare @llvm.vp.roundeven.nxv4bf16(, , i32) - define @vp_roundeven_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv4bf16: ; CHECK: # %bb.0: @@ -174,8 +168,6 @@ define @vp_roundeven_nxv4bf16_unmasked( %v } -declare @llvm.vp.roundeven.nxv8bf16(, , i32) - define @vp_roundeven_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv8bf16: ; CHECK: # %bb.0: @@ -228,8 +220,6 @@ define @vp_roundeven_nxv8bf16_unmasked( %v } -declare @llvm.vp.roundeven.nxv16bf16(, , i32) - define @vp_roundeven_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv16bf16: ; CHECK: # %bb.0: @@ -282,8 +272,6 @@ define @vp_roundeven_nxv16bf16_unmasked( %v } -declare @llvm.vp.roundeven.nxv32bf16(, , i32) - define @vp_roundeven_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv32bf16: ; CHECK: # %bb.0: @@ -402,7 +390,6 @@ define @vp_roundeven_nxv32bf16_unmasked( @llvm.vp.roundeven.nxv32bf16( %va, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.roundeven.nxv1f16(, , i32) define @vp_roundeven_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv1f16: @@ -490,8 +477,6 @@ define @vp_roundeven_nxv1f16_unmasked( %v ret %v } -declare @llvm.vp.roundeven.nxv2f16(, , i32) - define @vp_roundeven_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv2f16: ; ZVFH: # %bb.0: @@ -578,8 +563,6 @@ define @vp_roundeven_nxv2f16_unmasked( %v ret %v } -declare @llvm.vp.roundeven.nxv4f16(, , i32) - define @vp_roundeven_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv4f16: ; ZVFH: # %bb.0: @@ -666,8 +649,6 @@ define @vp_roundeven_nxv4f16_unmasked( %v ret %v } -declare @llvm.vp.roundeven.nxv8f16(, , i32) - define @vp_roundeven_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv8f16: ; ZVFH: # %bb.0: @@ -756,8 +737,6 @@ define @vp_roundeven_nxv8f16_unmasked( %v ret %v } -declare @llvm.vp.roundeven.nxv16f16(, , i32) - define @vp_roundeven_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv16f16: ; ZVFH: # %bb.0: @@ -846,8 +825,6 @@ define @vp_roundeven_nxv16f16_unmasked( ret %v } -declare @llvm.vp.roundeven.nxv32f16(, , i32) - define @vp_roundeven_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv32f16: ; ZVFH: # %bb.0: @@ -1003,8 +980,6 @@ define @vp_roundeven_nxv32f16_unmasked( ret %v } -declare @llvm.vp.roundeven.nxv1f32(, , i32) - define @vp_roundeven_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv1f32: ; CHECK: # %bb.0: @@ -1045,8 +1020,6 @@ define @vp_roundeven_nxv1f32_unmasked( ret %v } -declare @llvm.vp.roundeven.nxv2f32(, , i32) - define @vp_roundeven_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv2f32: ; CHECK: # %bb.0: @@ -1087,8 +1060,6 @@ define @vp_roundeven_nxv2f32_unmasked( ret %v } -declare @llvm.vp.roundeven.nxv4f32(, , i32) - define @vp_roundeven_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv4f32: ; CHECK: # %bb.0: @@ -1131,8 +1102,6 @@ define @vp_roundeven_nxv4f32_unmasked( ret %v } -declare @llvm.vp.roundeven.nxv8f32(, , i32) - define @vp_roundeven_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv8f32: ; CHECK: # %bb.0: @@ -1175,8 +1144,6 @@ define @vp_roundeven_nxv8f32_unmasked( ret %v } -declare @llvm.vp.roundeven.nxv16f32(, , i32) - define @vp_roundeven_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv16f32: ; CHECK: # %bb.0: @@ -1219,8 +1186,6 @@ define @vp_roundeven_nxv16f32_unmasked( %v } -declare @llvm.vp.roundeven.nxv1f64(, , i32) - define @vp_roundeven_nxv1f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_nxv1f64: ; RV32ZVFH: # %bb.0: @@ -1361,8 +1326,6 @@ define @vp_roundeven_nxv1f64_unmasked( %v } -declare @llvm.vp.roundeven.nxv2f64(, , i32) - define @vp_roundeven_nxv2f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_nxv2f64: ; RV32ZVFH: # %bb.0: @@ -1511,8 +1474,6 @@ define @vp_roundeven_nxv2f64_unmasked( %v } -declare @llvm.vp.roundeven.nxv4f64(, , i32) - define @vp_roundeven_nxv4f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_nxv4f64: ; RV32ZVFH: # %bb.0: @@ -1661,8 +1622,6 @@ define @vp_roundeven_nxv4f64_unmasked( %v } -declare @llvm.vp.roundeven.nxv7f64(, , i32) - define @vp_roundeven_nxv7f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_nxv7f64: ; RV32ZVFH: # %bb.0: @@ -1811,8 +1770,6 @@ define @vp_roundeven_nxv7f64_unmasked( %v } -declare @llvm.vp.roundeven.nxv8f64(, , i32) - define @vp_roundeven_nxv8f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_nxv8f64: ; RV32ZVFH: # %bb.0: @@ -1962,7 +1919,6 @@ define @vp_roundeven_nxv8f64_unmasked( @llvm.vp.roundeven.nxv16f64(, , i32) define @vp_roundeven_nxv16f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll index 4d8066d12c9ad..5553b988fec97 100644 --- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare @llvm.vp.roundtozero.nxv1bf16(, , i32) - define @vp_roundtozero_nxv1bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv1bf16: ; CHECK: # %bb.0: @@ -66,8 +64,6 @@ define @vp_roundtozero_nxv1bf16_unmasked( %v } -declare @llvm.vp.roundtozero.nxv2bf16(, , i32) - define @vp_roundtozero_nxv2bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv2bf16: ; CHECK: # %bb.0: @@ -120,8 +116,6 @@ define @vp_roundtozero_nxv2bf16_unmasked( %v } -declare @llvm.vp.roundtozero.nxv4bf16(, , i32) - define @vp_roundtozero_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv4bf16: ; CHECK: # %bb.0: @@ -174,8 +168,6 @@ define @vp_roundtozero_nxv4bf16_unmasked( %v } -declare @llvm.vp.roundtozero.nxv8bf16(, , i32) - define @vp_roundtozero_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv8bf16: ; CHECK: # %bb.0: @@ -228,8 +220,6 @@ define @vp_roundtozero_nxv8bf16_unmasked( %v } -declare @llvm.vp.roundtozero.nxv16bf16(, , i32) - define @vp_roundtozero_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv16bf16: ; CHECK: # %bb.0: @@ -282,8 +272,6 @@ define @vp_roundtozero_nxv16bf16_unmasked( %v } -declare @llvm.vp.roundtozero.nxv32bf16(, , i32) - define @vp_roundtozero_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv32bf16: ; CHECK: # %bb.0: @@ -402,7 +390,6 @@ define @vp_roundtozero_nxv32bf16_unmasked( @llvm.vp.roundtozero.nxv32bf16( %va, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.roundtozero.nxv1f16(, , i32) define @vp_roundtozero_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv1f16: @@ -490,8 +477,6 @@ define @vp_roundtozero_nxv1f16_unmasked( ret %v } -declare @llvm.vp.roundtozero.nxv2f16(, , i32) - define @vp_roundtozero_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv2f16: ; ZVFH: # %bb.0: @@ -578,8 +563,6 @@ define @vp_roundtozero_nxv2f16_unmasked( ret %v } -declare @llvm.vp.roundtozero.nxv4f16(, , i32) - define @vp_roundtozero_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv4f16: ; ZVFH: # %bb.0: @@ -666,8 +649,6 @@ define @vp_roundtozero_nxv4f16_unmasked( ret %v } -declare @llvm.vp.roundtozero.nxv8f16(, , i32) - define @vp_roundtozero_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv8f16: ; ZVFH: # %bb.0: @@ -756,8 +737,6 @@ define @vp_roundtozero_nxv8f16_unmasked( ret %v } -declare @llvm.vp.roundtozero.nxv16f16(, , i32) - define @vp_roundtozero_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv16f16: ; ZVFH: # %bb.0: @@ -846,8 +825,6 @@ define @vp_roundtozero_nxv16f16_unmasked( %v } -declare @llvm.vp.roundtozero.nxv32f16(, , i32) - define @vp_roundtozero_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv32f16: ; ZVFH: # %bb.0: @@ -1003,8 +980,6 @@ define @vp_roundtozero_nxv32f16_unmasked( %v } -declare @llvm.vp.roundtozero.nxv1f32(, , i32) - define @vp_roundtozero_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv1f32: ; CHECK: # %bb.0: @@ -1045,8 +1020,6 @@ define @vp_roundtozero_nxv1f32_unmasked( %v } -declare @llvm.vp.roundtozero.nxv2f32(, , i32) - define @vp_roundtozero_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv2f32: ; CHECK: # %bb.0: @@ -1087,8 +1060,6 @@ define @vp_roundtozero_nxv2f32_unmasked( %v } -declare @llvm.vp.roundtozero.nxv4f32(, , i32) - define @vp_roundtozero_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv4f32: ; CHECK: # %bb.0: @@ -1131,8 +1102,6 @@ define @vp_roundtozero_nxv4f32_unmasked( %v } -declare @llvm.vp.roundtozero.nxv8f32(, , i32) - define @vp_roundtozero_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv8f32: ; CHECK: # %bb.0: @@ -1175,8 +1144,6 @@ define @vp_roundtozero_nxv8f32_unmasked( %v } -declare @llvm.vp.roundtozero.nxv16f32(, , i32) - define @vp_roundtozero_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv16f32: ; CHECK: # %bb.0: @@ -1219,8 +1186,6 @@ define @vp_roundtozero_nxv16f32_unmasked( %v } -declare @llvm.vp.roundtozero.nxv1f64(, , i32) - define @vp_roundtozero_nxv1f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_nxv1f64: ; RV32ZVFH: # %bb.0: @@ -1361,8 +1326,6 @@ define @vp_roundtozero_nxv1f64_unmasked( %v } -declare @llvm.vp.roundtozero.nxv2f64(, , i32) - define @vp_roundtozero_nxv2f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_nxv2f64: ; RV32ZVFH: # %bb.0: @@ -1511,8 +1474,6 @@ define @vp_roundtozero_nxv2f64_unmasked( %v } -declare @llvm.vp.roundtozero.nxv4f64(, , i32) - define @vp_roundtozero_nxv4f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_nxv4f64: ; RV32ZVFH: # %bb.0: @@ -1661,8 +1622,6 @@ define @vp_roundtozero_nxv4f64_unmasked( %v } -declare @llvm.vp.roundtozero.nxv7f64(, , i32) - define @vp_roundtozero_nxv7f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_nxv7f64: ; RV32ZVFH: # %bb.0: @@ -1811,8 +1770,6 @@ define @vp_roundtozero_nxv7f64_unmasked( %v } -declare @llvm.vp.roundtozero.nxv8f64(, , i32) - define @vp_roundtozero_nxv8f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_nxv8f64: ; RV32ZVFH: # %bb.0: @@ -1962,7 +1919,6 @@ define @vp_roundtozero_nxv8f64_unmasked( @llvm.vp.roundtozero.nxv16f64(, , i32) define @vp_roundtozero_nxv16f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll index 36ac7d9ec4e91..f05f6ab16c5e3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll @@ -269,5 +269,4 @@ define @foo( %a, %z } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %passthru, %a, %b, i32, i32 %gvl) declare i32 @puts(ptr); diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll index 83e16ded22db1..2ffb15b7af545 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll @@ -592,8 +592,3 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr , i32, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll index 9e77f488bddd0..138422c5a50e2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll @@ -14,7 +14,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zcmp,+prefer-vsetvli-over-read-vlenb -O2 < %s \ ; RUN: | FileCheck --check-prefix=SPILL-O2-ZCMP-VSETVLI %s - @.str = private unnamed_addr constant [6 x i8] c"hello\00", align 1 define @foo( %a, %b, %c, i64 %gvl) nounwind @@ -262,5 +261,4 @@ define @foo( %a, %z } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %passthru, %a, %b, i64, i64 %gvl) declare i32 @puts(ptr); diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll index ab9ce5173123d..d39f7cc3c7d57 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll @@ -592,8 +592,3 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr , i64, i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll index eaa8d03ed156f..3e43bb68c79b9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll @@ -198,10 +198,4 @@ entry: ret i32 0 } -declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) - -declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) - -declare @llvm.riscv.vle.nxv16i32.i64(, ptr nocapture, i64) - attributes #0 = { noinline nounwind optnone "frame-pointer"="all" } diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll index 53ca205f6bf63..52e28dacc378d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll @@ -1,9 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare @llvm.riscv.vmerge.nxv2i32.nxv2i32(, , , , i64); -declare @llvm.riscv.vmerge.nxv2f32.nxv2f32(, , , , i64); - define @vpmerge_vadd( %passthru, %x, %y, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vadd: ; CHECK: # %bb.0: @@ -14,7 +11,6 @@ define @vpmerge_vadd( %passthru, @llvm.riscv.vmerge.nxv2i32.nxv2i32( %passthru, %passthru, %a, splat (i1 -1), i64 %vl) ret %b } -declare @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(, , , , i64, i64) define @vpmerge_vsub( %passthru, %x, %y, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vsub: @@ -26,7 +22,6 @@ define @vpmerge_vsub( %passthru, @llvm.riscv.vmerge.nxv2i32.nxv2i32( %passthru, %passthru, %a, splat (i1 -1), i64 %vl) ret %b } -declare @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(, , , , i64, i64) define @vpmerge_vfadd( %passthru, %x, %y, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vfadd: @@ -39,7 +34,6 @@ define @vpmerge_vfadd( %passthru, %b } -declare @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(, , , , i64, i64, i64) define @vpmerge_vfsub( %passthru, %x, %y, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vfsub: @@ -51,7 +45,6 @@ define @vpmerge_vfsub( %passthru, @llvm.riscv.vmerge.nxv2f32.nxv2f32( %passthru, %passthru, %a, splat (i1 -1), i64 %vl) ret %b } -declare @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(, , , , i64, i64, i64) define @vpmerge_vwadd( %passthru, %x, %y, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vwadd: @@ -63,7 +56,6 @@ define @vpmerge_vwadd( %passthru, @llvm.riscv.vmerge.nxv2i32.nxv2i32( %passthru, %passthru, %a, splat (i1 -1), i64 %vl) ret %b } -declare @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(, , , , i64, i64) define @vpmerge_vle( %passthru, ptr %p, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vle: @@ -79,9 +71,7 @@ define @vpmerge_vle( %passthru, ptr %p, @llvm.riscv.vmerge.nxv2i32.nxv2i32( %passthru, %passthru, %a, splat (i1 -1), i64 %vl) ret %b } -declare @llvm.riscv.vle.mask.nxv2i32(, ptr, , i64, i64) -declare @llvm.riscv.vslideup.mask.nxv2i32(, , i64, , i64, i64) define @vpmerge_vslideup( %passthru, %v, i64 %x, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vslideup: ; CHECK: # %bb.0: @@ -93,7 +83,6 @@ define @vpmerge_vslideup( %passthru, %b } -declare @llvm.riscv.vslidedown.mask.nxv2i32(, , i64, , i64, i64) define @vpmerge_vslidedown( %passthru, %v, i64 %x, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vslidedown: ; CHECK: # %bb.0: @@ -105,7 +94,6 @@ define @vpmerge_vslidedown( %passthru, %b } -declare @llvm.riscv.vslide1up.mask.nxv2i32(, , i32, , i64, i64) define @vpmerge_vslide1up( %passthru, %v, i32 %x, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vslide1up: ; CHECK: # %bb.0: @@ -117,7 +105,6 @@ define @vpmerge_vslide1up( %passthru, %b } -declare @llvm.riscv.vslide1down.mask.nxv2i32(, , i32, , i64, i64) define @vpmerge_vslide1down( %passthru, %v, i32 %x, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vslide1down: ; CHECK: # %bb.0: @@ -209,8 +196,6 @@ define @vmerge_larger_vl_poison_passthru( % } ; Test VFCVT_RM -declare @llvm.floor.nxv2f32() -declare @llvm.vp.merge.nxv2i32(, , , i32) define @vmerge_vfcvt_rm( %passthru, %a, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmerge_vfcvt_rm: ; CHECK: # %bb.0: # %entry @@ -227,7 +212,6 @@ entry: } ; Test VIOTA_M -declare @llvm.riscv.viota.mask.nxv2i32(, , , i64, i64) define @vpmerge_viota( %passthru, %m, %vm, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_viota: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll index 364831f530747..acd9519bb5a8e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll @@ -1,13 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare @llvm.vp.merge.nxv2i16(, , , i32) -declare @llvm.vp.merge.nxv2i32(, , , i32) -declare @llvm.vp.merge.nxv2f32(, , , i32) -declare @llvm.vp.merge.nxv2f64(, , , i32) - ; Test binary operator with vp.merge and vp.smax. -declare @llvm.vp.add.nxv2i32(, , , i32) define @vpmerge_vpadd( %passthru, %x, %y, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpadd: ; CHECK: # %bb.0: @@ -20,7 +14,6 @@ define @vpmerge_vpadd( %passthru, @llvm.vp.icmp.nxv2i32(, , metadata, , i32) define @vpmerge_vpadd2( %passthru, %x, %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpadd2: ; CHECK: # %bb.0: @@ -48,7 +41,6 @@ define @vpmerge_vpadd3( %passthru, @llvm.vp.fadd.nxv2f32(, , , i32) define @vpmerge_vpfadd( %passthru, %x, %y, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfadd: ; CHECK: # %bb.0: @@ -61,7 +53,6 @@ define @vpmerge_vpfadd( %passthru, @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(, , , i64) define @vpmerge_vrgatherei16( %passthru, %x, %y, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vrgatherei16: ; CHECK: # %bb.0: @@ -75,7 +66,6 @@ define @vpmerge_vrgatherei16( %passthru, @llvm.vp.fptosi.nxv2i16.nxv2f32(, , i32) define @vpmerge_vpfptosi( %passthru, %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfptosi: ; CHECK: # %bb.0: @@ -88,7 +78,6 @@ define @vpmerge_vpfptosi( %passthru, @llvm.vp.sitofp.nxv2f32.nxv2i64(, , i32) define @vpmerge_vpsitofp( %passthru, %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpsitofp: ; CHECK: # %bb.0: @@ -101,7 +90,6 @@ define @vpmerge_vpsitofp( %passthru, @llvm.vp.zext.nxv2i32.nxv2i8(, , i32) define @vpmerge_vpzext( %passthru, %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpzext: ; CHECK: # %bb.0: @@ -114,7 +102,6 @@ define @vpmerge_vpzext( %passthru, @llvm.vp.trunc.nxv2i32.nxv2i64(, , i32) define @vpmerge_vptrunc( %passthru, %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vptrunc: ; CHECK: # %bb.0: @@ -127,7 +114,6 @@ define @vpmerge_vptrunc( %passthru, @llvm.vp.fpext.nxv2f64.nxv2f32(, , i32) define @vpmerge_vpfpext( %passthru, %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfpext: ; CHECK: # %bb.0: @@ -140,7 +126,6 @@ define @vpmerge_vpfpext( %passthru, < } ; Test integer truncation by vp.trunc. -declare @llvm.vp.fptrunc.nxv2f32.nxv2f64(, , i32) define @vpmerge_vpfptrunc( %passthru, %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfptrunc: ; CHECK: # %bb.0: @@ -153,7 +138,6 @@ define @vpmerge_vpfptrunc( %passthru, < } ; Test load operation by vp.load. -declare @llvm.vp.load.nxv2i32.p0(ptr, , i32) define @vpmerge_vpload( %passthru, ptr %p, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpload: ; CHECK: # %bb.0: @@ -194,7 +178,6 @@ define void @vpmerge_vpload_store( %passthru, ptr %p, , i64 } @llvm.riscv.vleff.nxv2i32(, ptr, i64) define @vpmerge_vleff( %passthru, ptr %p, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vleff: ; CHECK: # %bb.0: @@ -209,7 +192,6 @@ define @vpmerge_vleff( %passthru, ptr %p, < } ; Test strided load by riscv.vlse -declare @llvm.riscv.vlse.nxv2i32(, ptr, i64, i64) define @vpmerge_vlse( %passthru, ptr %p, %m, i64 %s, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vlse: ; CHECK: # %bb.0: @@ -223,7 +205,6 @@ define @vpmerge_vlse( %passthru, ptr %p, < } ; Test indexed load by riscv.vluxei -declare @llvm.riscv.vluxei.nxv2i32.nxv2i64(, ptr, , i64) define @vpmerge_vluxei( %passthru, ptr %p, %idx, %m, i64 %s, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vluxei: ; CHECK: # %bb.0: @@ -237,7 +218,6 @@ define @vpmerge_vluxei( %passthru, ptr %p, } ; Test vector index by riscv.vid -declare @llvm.riscv.vid.nxv2i32(, i64) define @vpmerge_vid( %passthru, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vid: ; CHECK: # %bb.0: @@ -251,7 +231,6 @@ define @vpmerge_vid( %passthru, @llvm.riscv.viota.nxv2i32(, , i64) define @vpmerge_viota( %passthru, %m, %vm, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_viota: ; CHECK: # %bb.0: @@ -280,7 +259,6 @@ define @vpmerge_viota2( %passthru, @llvm.riscv.vfclass.nxv2i32(, , i64) define @vpmerge_vflcass( %passthru, %vf, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vflcass: ; CHECK: # %bb.0: @@ -294,7 +272,6 @@ define @vpmerge_vflcass( %passthru, @llvm.riscv.vfsqrt.nxv2f32(, , i64, i64) define @vpmerge_vfsqrt( %passthru, %vf, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vfsqrt: ; CHECK: # %bb.0: @@ -308,7 +285,6 @@ define @vpmerge_vfsqrt( %passthru, @llvm.riscv.vfrec7.nxv2f32(, , i64, i64) define @vpmerge_vfrec7( %passthru, %vf, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vfrec7: ; CHECK: # %bb.0: @@ -361,8 +337,6 @@ define @vpmerge_constrained_fadd( %pass %b = call @llvm.riscv.vmerge.nxv2f32.nxv2f32( %passthru, %passthru, %a, %m, i64 %vl) strictfp ret %b } -declare @llvm.experimental.constrained.fadd.nxv2f32(, , metadata, metadata) -declare @llvm.riscv.vmerge.nxv2f32.nxv2f32(, , , , i64) ; This shouldn't be folded because we need to preserve exceptions with ; "fpexcept.strict" exception behaviour, and masking may hide them. @@ -451,11 +425,6 @@ define @vpmerge_trunc( %passthru, %b } -declare @llvm.vp.select.nxv2i16(, , , i32) -declare @llvm.vp.select.nxv2i32(, , , i32) -declare @llvm.vp.select.nxv2f32(, , , i32) -declare @llvm.vp.select.nxv2f64(, , , i32) - ; Test binary operator with vp.select and vp.smax. define @vpselect_vpadd( %passthru, %x, %y, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vpadd: @@ -737,7 +706,6 @@ define @vpselect_vfrec7( %passthru, @llvm.riscv.vslideup.nxv2i32(, , i64, i64, i64) define @vpselect_vslideup( %passthru, %v, i64 %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vslideup: ; CHECK: # %bb.0: @@ -750,7 +718,6 @@ define @vpselect_vslideup( %passthru, %b } -declare @llvm.riscv.vslidedown.nxv2i32(, , i64, i64, i64) define @vpselect_vslidedown( %passthru, %v, i64 %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vslidedown: ; CHECK: # %bb.0: @@ -763,7 +730,6 @@ define @vpselect_vslidedown( %passthru, %b } -declare @llvm.riscv.vslide1up.nxv2i32.i32(, , i32, i64) define @vpselect_vslide1up( %passthru, %v, i32 %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vslide1up: ; CHECK: # %bb.0: @@ -776,7 +742,6 @@ define @vpselect_vslide1up( %passthru, %b } -declare @llvm.riscv.vslide1down.nxv2i32.i32(, , i32, i64) define @vpselect_vslide1down( %passthru, %v, i32 %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vslide1down: ; CHECK: # %bb.0: @@ -932,12 +897,6 @@ entry: ; Test reductions don't have a vmerge folded into them, since the mask affects ; the result. -declare @llvm.riscv.vredsum.nxv2i32.nxv2i32( - , - , - , - i64) - define @vredsum( %passthru, %x, %y, %m, i64 %vl) { ; CHECK-LABEL: vredsum: ; CHECK: # %bb.0: @@ -955,12 +914,6 @@ define @vredsum( %passthru, %b } -declare @llvm.riscv.vfredusum.nxv2f32.nxv2f32( - , - , - , - i64, i64) - define @vfredusum( %passthru, %x, %y, %m, i64 %vl) { ; CHECK-LABEL: vfredusum: ; CHECK: # %bb.0: @@ -1031,19 +984,8 @@ define @unfoldable_vredsum_allones_mask_diff_vl( %b } -declare @llvm.riscv.vle.nxv32i16.i64(, ptr nocapture, i64) -declare @llvm.riscv.vssubu.mask.nxv32i8.i8.i64(, , i8, , i64, i64 immarg) -declare @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(, , i64) -declare @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(, , , , i64) -declare void @llvm.riscv.vse.nxv32i16.i64(, ptr nocapture, i64) -declare @llvm.riscv.vaaddu.nxv1i16.i16.i64(, , i16, i64 immarg, i64) -declare @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(, , , , i64) - ; Tests for folding vmerge into its ops when their VLs differ -declare @llvm.riscv.vadd.nxv2i32.nxv2i32(, , , i64) -declare @llvm.riscv.vmerge.nxv2i32.nxv2i32(, , , , i64) - ; Can fold with VL=2 define @vmerge_smaller_vl_same_passthru( %passthru, %x, %y, %m) { ; CHECK-LABEL: vmerge_smaller_vl_same_passthru: @@ -1195,7 +1137,6 @@ define @true_mask_vmerge_implicit_passthru( ret %b } - define @unfoldable_mismatched_sew( %passthru, %x, %y, %mask, i64 %avl) { ; CHECK-LABEL: unfoldable_mismatched_sew: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll index 838cd82156875..c1a2b11902315 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll @@ -50,4 +50,3 @@ entry: ret i32 %1 } -declare i32 @llvm.vscale.i32() diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll index d3f3087e06cf5..6b8822d998344 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll @@ -10,7 +10,6 @@ ; RUN: llc -mtriple riscv64 -mattr=+m,+v,+zvl256b -riscv-v-vector-bits-max=256 < %s \ ; RUN: | FileCheck %s -check-prefixes=RV64-VLEN256EXACT - define i64 @vscale_zero() nounwind { ; RV64-LABEL: vscale_zero: ; RV64: # %bb.0: # %entry @@ -200,5 +199,3 @@ entry: ret i64 %1 } - -declare i64 @llvm.vscale.i64() diff --git a/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll index 84f31e32a9b6b..bc0e05914a821 100644 --- a/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare { , } @llvm.sadd.with.overflow.nxv2i32(, ) - define @saddo_nvx2i32( %x, %y) { ; CHECK-LABEL: saddo_nvx2i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll index 32892bca84747..634e58198def3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,CHECK64,ZVFHMIN -declare @llvm.vp.fcmp.nxv1bf16(, , metadata, , i32) - define @fcmp_oeq_vv_nxv1bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -656,8 +654,6 @@ define @fcmp_uno_vf_swap_nxv1bf16( %va, b ret %v } -declare @llvm.vp.fcmp.nxv3bf16(, , metadata, , i32) - define @fcmp_oeq_vv_nxv3bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_nxv3bf16: ; CHECK: # %bb.0: @@ -672,8 +668,6 @@ define @fcmp_oeq_vv_nxv3bf16( %va, %v } -declare @llvm.vp.fcmp.nxv8bf16(, , metadata, , i32) - define @fcmp_oeq_vv_nxv8bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -1342,8 +1336,6 @@ define @fcmp_uno_vf_swap_nxv8bf16( %va, b ret %v } -declare @llvm.vp.fcmp.nxv64bf16(, , metadata, , i32) - define @fcmp_oeq_vv_nxv64bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_nxv64bf16: ; CHECK: # %bb.0: @@ -1554,8 +1546,6 @@ define @fcmp_oeq_vv_nxv64bf16( %va, %v } -declare @llvm.vp.fcmp.nxv1f16(, , metadata, , i32) - define @fcmp_oeq_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: fcmp_oeq_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -2490,8 +2480,6 @@ define @fcmp_uno_vf_swap_nxv1f16( %va, half ret %v } -declare @llvm.vp.fcmp.nxv3f16(, , metadata, , i32) - define @fcmp_oeq_vv_nxv3f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: fcmp_oeq_vv_nxv3f16: ; ZVFH: # %bb.0: @@ -2512,8 +2500,6 @@ define @fcmp_oeq_vv_nxv3f16( %va, %v } -declare @llvm.vp.fcmp.nxv8f16(, , metadata, , i32) - define @fcmp_oeq_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: fcmp_oeq_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -3492,8 +3478,6 @@ define @fcmp_uno_vf_swap_nxv8f16( %va, half ret %v } -declare @llvm.vp.fcmp.nxv64f16(, , metadata, , i32) - define @fcmp_oeq_vv_nxv64f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: fcmp_oeq_vv_nxv64f16: ; ZVFH: # %bb.0: @@ -3750,8 +3734,6 @@ define @fcmp_oeq_vv_nxv64f16( %va, %v } -declare @llvm.vp.fcmp.nxv1f64(, , metadata, , i32) - define @fcmp_oeq_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_nxv1f64: ; CHECK: # %bb.0: @@ -4268,8 +4250,6 @@ define @fcmp_uno_vf_swap_nxv1f64( %va, do ret %v } -declare @llvm.vp.fcmp.nxv3f64(, , metadata, , i32) - define @fcmp_oeq_vv_nxv3f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_nxv3f64: ; CHECK: # %bb.0: @@ -4281,8 +4261,6 @@ define @fcmp_oeq_vv_nxv3f64( %va, %v } -declare @llvm.vp.fcmp.nxv8f64(, , metadata, , i32) - define @fcmp_oeq_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_nxv8f64: ; CHECK: # %bb.0: @@ -4817,8 +4795,6 @@ define @fcmp_uno_vf_swap_nxv8f64( %va, do ret %v } -declare @llvm.vp.fcmp.nxv32f64(, , metadata, , i32) - define @fcmp_oeq_vv_nxv32f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK32-LABEL: fcmp_oeq_vv_nxv32f64: ; CHECK32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp-mask.ll index 5fde258fb442b..11e0691305e63 100644 --- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp-mask.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK -declare @llvm.vp.icmp.nxv1i1(, , metadata, , i32) - define @icmp_eq_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv1i1: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define @icmp_eq_vv_nxv1i1( %va, %v } -declare @llvm.vp.icmp.nxv2i1(, , metadata, , i32) - define @icmp_eq_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv2i1: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define @icmp_eq_vv_nxv2i1( %va, %v } -declare @llvm.vp.icmp.nxv4i1(, , metadata, , i32) - define @icmp_eq_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv4i1: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define @icmp_eq_vv_nxv4i1( %va, %v } -declare @llvm.vp.icmp.nxv8i1(, , metadata, , i32) - define @icmp_eq_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv8i1: ; CHECK: # %bb.0: @@ -52,8 +44,6 @@ define @icmp_eq_vv_nxv8i1( %va, %v } -declare @llvm.vp.icmp.nxv16i1(, , metadata, , i32) - define @icmp_eq_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv16i1: ; CHECK: # %bb.0: @@ -64,8 +54,6 @@ define @icmp_eq_vv_nxv16i1( %va, %v } -declare @llvm.vp.icmp.nxv32i1(, , metadata, , i32) - define @icmp_eq_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv32i1: ; CHECK: # %bb.0: @@ -76,8 +64,6 @@ define @icmp_eq_vv_nxv32i1( %va, %v } -declare @llvm.vp.icmp.nxv64i1(, , metadata, , i32) - define @icmp_eq_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv64i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll index 13c63d9c80a9a..c1de57bf850ac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll @@ -7,8 +7,6 @@ ; FIXME: We're missing canonicalizations of ISD::VP_SETCC equivalent to those ; for ISD::SETCC, e.g., splats aren't moved to the RHS. -declare @llvm.vp.icmp.nxv1i8(, , metadata, , i32) - define @icmp_eq_vv_nxv1i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv1i8: ; CHECK: # %bb.0: @@ -498,8 +496,6 @@ define @icmp_sle_vi_swap_nxv1i8( %va, %v } -declare @llvm.vp.icmp.nxv3i8(, , metadata, , i32) - define @icmp_eq_vv_nxv3i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv3i8: ; CHECK: # %bb.0: @@ -534,8 +530,6 @@ define @icmp_eq_vx_swap_nxv3i8( %va, i8 %b, < ret %v } -declare @llvm.vp.icmp.nxv8i7(, , metadata, , i32) - define @icmp_eq_vv_nxv8i7( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv8i7: ; CHECK: # %bb.0: @@ -581,8 +575,6 @@ define @icmp_eq_vx_swap_nxv8i7( %va, i7 %b, < ret %v } -declare @llvm.vp.icmp.nxv8i8(, , metadata, , i32) - define @icmp_eq_vv_nxv8i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv8i8: ; CHECK: # %bb.0: @@ -1072,8 +1064,6 @@ define @icmp_sle_vi_swap_nxv8i8( %va, %v } -declare @llvm.vp.icmp.nxv128i8(, , metadata, , i32) - define @icmp_eq_vv_nxv128i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv128i8: ; CHECK: # %bb.0: @@ -1181,8 +1171,6 @@ define @icmp_eq_vx_swap_nxv128i8( %va, i8 ret %v } -declare @llvm.vp.icmp.nxv1i32(, , metadata, , i32) - define @icmp_eq_vv_nxv1i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1672,8 +1660,6 @@ define @icmp_sle_vi_swap_nxv1i32( %va, %v } -declare @llvm.vp.icmp.nxv8i32(, , metadata, , i32) - define @icmp_eq_vv_nxv8i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv8i32: ; CHECK: # %bb.0: @@ -2208,8 +2194,6 @@ define @icmp_sle_vi_swap_nxv8i32( %va, %v } -declare @llvm.vp.icmp.nxv32i32(, , metadata, , i32) - define @icmp_eq_vv_nxv32i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv32i32: ; CHECK: # %bb.0: @@ -2324,8 +2308,6 @@ define @icmp_eq_vx_swap_nxv32i32( %va, i32 ret %v } -declare @llvm.vp.icmp.nxv1i64(, , metadata, , i32) - define @icmp_eq_vv_nxv1i64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv1i64: ; CHECK: # %bb.0: @@ -3067,8 +3049,6 @@ define @icmp_sle_vi_swap_nxv1i64( %va, %v } -declare @llvm.vp.icmp.nxv8i64(, , metadata, , i32) - define @icmp_eq_vv_nxv8i64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll index 058f83e3f1f31..89c23688a6350 100644 --- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll @@ -405,8 +405,6 @@ define @icmp_ult_vi_nxv8i8_4( %va) { ret %vc } -declare @llvm.riscv.vmv.v.x.nxv8i8.iXLen(, i8, iXLen); - ; Test that we don't optimize ult x, 0 -> ule x, -1 define @icmp_ult_vi_nxv8i8_5( %va, iXLen %vl) { ; CHECK-LABEL: icmp_ult_vi_nxv8i8_5: diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll index 472915939ffc4..75fb468e77b99 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvfnrclipxfqf \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_nxv1i8_nxv1f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv1i8_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -27,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv1i8_nxv1f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv1i8_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -53,12 +40,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_nxv2i8_nxv2f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv2i8_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv2i8_nxv2f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv2i8_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_nxv4i8_nxv4f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv4i8_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -129,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv4i8_nxv4f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv4i8_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -155,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_nxv8i8_nxv8f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv8i8_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -180,13 +135,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv8i8_nxv8f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv8i8_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -206,12 +154,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_nxv16i8_nxv16f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv16i8_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -231,13 +173,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv16i8_nxv16f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv16i8_nxv16f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll index b98a7aba7be16..0658647451858 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvfnrclipxfqf \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_nxv1i8_nxv1f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv1i8_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -27,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv1i8_nxv1f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv1i8_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -53,12 +40,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_nxv2i8_nxv2f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv2i8_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv2i8_nxv2f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv2i8_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_nxv4i8_nxv4f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv4i8_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -129,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv4i8_nxv4f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv4i8_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -155,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_nxv8i8_nxv8f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv8i8_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -180,13 +135,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv8i8_nxv8f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv8i8_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -206,12 +154,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_nxv16i8_nxv16f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv16i8_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -231,13 +173,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv16i8_nxv16f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv16i8_nxv16f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll index 8ec7126422913..785d4e6b6026f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfbfmin,+xsfvfwmaccqqq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vfwmacc.4x4x4.nxv1f32.nxv4bf16.nxv1bf16.iXLen( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmacc_4x4x4_tu_f32mf2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_tu_f32mf2: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfwmacc.4x4x4.nxv2f32.nxv4bf16.nxv2bf16.iXLen( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmacc_4x4x4_tu_f32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_tu_f32m1: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfwmacc.4x4x4.nxv4f32.nxv4bf16.nxv4bf16.iXLen( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmacc_4x4x4_tu_f32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_tu_f32m2: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfwmacc.4x4x4.nxv8f32.nxv4bf16.nxv8bf16.iXLen( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmacc_4x4x4_tu_f32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_tu_f32m4: ; CHECK: # %bb.0: # %entry @@ -156,12 +132,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfwmacc.4x4x4.nxv16f32.nxv4bf16.nxv16bf16.iXLen( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmacc_4x4x4_tu_f32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_tu_f32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_2x8x2.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_2x8x2.ll index 25256f7914931..56af93877dbff 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_2x8x2.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_2x8x2.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccdod \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vqmacc.2x8x2.nxv2i32.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmacc_2x8x2_tu_i32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmacc_2x8x2_tu_i32m1: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmacc.2x8x2.nxv4i32.nxv8i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmacc_2x8x2_tu_i32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmacc_2x8x2_tu_i32m2: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmacc.2x8x2.nxv8i32.nxv8i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmacc_2x8x2_tu_i32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmacc_2x8x2_tu_i32m4: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmacc.2x8x2.nxv16i32.nxv8i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmacc_2x8x2_tu_i32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmacc_2x8x2_tu_i32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_4x8x4.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_4x8x4.ll index eebc51619480b..9c36b2b8a71d1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_4x8x4.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_4x8x4.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccqoq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vqmacc.4x8x4.nxv2i32.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmacc_4x8x4_tu_i32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_tu_i32m1: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmacc.4x8x4.nxv4i32.nxv8i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmacc_4x8x4_tu_i32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_tu_i32m2: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmacc.4x8x4.nxv8i32.nxv8i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmacc_4x8x4_tu_i32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_tu_i32m4: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmacc.4x8x4.nxv16i32.nxv8i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmacc_4x8x4_tu_i32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_tu_i32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_2x8x2.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_2x8x2.ll index 8d61901107931..3c499d4111356 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_2x8x2.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_2x8x2.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccdod \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vqmaccsu.2x8x2.nxv2i32.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccsu_2x8x2_tu_i32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccsu_2x8x2_tu_i32m1: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccsu.2x8x2.nxv4i32.nxv8i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccsu_2x8x2_tu_i32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccsu_2x8x2_tu_i32m2: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccsu.2x8x2.nxv8i32.nxv8i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccsu_2x8x2_tu_i32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccsu_2x8x2_tu_i32m4: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccsu.2x8x2.nxv16i32.nxv8i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccsu_2x8x2_tu_i32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccsu_2x8x2_tu_i32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_4x8x4.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_4x8x4.ll index 0d7052356e558..8264b876d245d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_4x8x4.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_4x8x4.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccqoq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vqmaccsu.4x8x4.nxv2i32.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccsu_4x8x4_tu_i32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccsu_4x8x4_tu_i32m1: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccsu.4x8x4.nxv4i32.nxv8i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccsu_4x8x4_tu_i32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccsu_4x8x4_tu_i32m2: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccsu.4x8x4.nxv8i32.nxv8i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccsu_4x8x4_tu_i32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccsu_4x8x4_tu_i32m4: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccsu.4x8x4.nxv16i32.nxv8i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccsu_4x8x4_tu_i32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccsu_4x8x4_tu_i32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_2x8x2.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_2x8x2.ll index 6667a89052e9c..875e9b4426fa6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_2x8x2.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_2x8x2.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccdod \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vqmaccu.2x8x2.nxv2i32.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccu_2x8x2_tu_i32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_tu_i32m1: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccu.2x8x2.nxv4i32.nxv8i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccu_2x8x2_tu_i32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_tu_i32m2: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccu.2x8x2.nxv8i32.nxv8i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccu_2x8x2_tu_i32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_tu_i32m4: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccu.2x8x2.nxv16i32.nxv8i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccu_2x8x2_tu_i32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_tu_i32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_4x8x4.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_4x8x4.ll index 3332390f71e01..a6bc5900c6caf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_4x8x4.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_4x8x4.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccqoq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vqmaccu.4x8x4.nxv2i32.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccu_4x8x4_tu_i32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccu_4x8x4_tu_i32m1: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccu.4x8x4.nxv4i32.nxv8i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccu_4x8x4_tu_i32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccu_4x8x4_tu_i32m2: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccu.4x8x4.nxv8i32.nxv8i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccu_4x8x4_tu_i32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccu_4x8x4_tu_i32m4: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccu.4x8x4.nxv16i32.nxv8i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccu_4x8x4_tu_i32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccu_4x8x4_tu_i32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_2x8x2.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_2x8x2.ll index 82a2a2e0fc835..d0ffec843cd01 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_2x8x2.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_2x8x2.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccdod \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vqmaccus.2x8x2.nxv2i32.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccus_2x8x2_tu_i32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccus_2x8x2_tu_i32m1: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccus.2x8x2.nxv4i32.nxv8i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccus_2x8x2_tu_i32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccus_2x8x2_tu_i32m2: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccus.2x8x2.nxv8i32.nxv8i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccus_2x8x2_tu_i32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccus_2x8x2_tu_i32m4: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccus.2x8x2.nxv16i32.nxv8i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccus_2x8x2_tu_i32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccus_2x8x2_tu_i32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_4x8x4.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_4x8x4.ll index 74fb66f5bf351..69f04a5cc9833 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_4x8x4.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_4x8x4.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccqoq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vqmaccus.4x8x4.nxv2i32.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccus_4x8x4_tu_i32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccus_4x8x4_tu_i32m1: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccus.4x8x4.nxv4i32.nxv8i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccus_4x8x4_tu_i32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccus_4x8x4_tu_i32m2: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccus.4x8x4.nxv8i32.nxv8i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccus_4x8x4_tu_i32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccus_4x8x4_tu_i32m4: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccus.4x8x4.nxv16i32.nxv8i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccus_4x8x4_tu_i32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccus_4x8x4_tu_i32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive-O0-ATM-ATK.ll b/llvm/test/CodeGen/RISCV/rvv/sifive-O0-ATM-ATK.ll index d9a49a1b6b6ea..1bd966ef7e481 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive-O0-ATM-ATK.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive-O0-ATM-ATK.ll @@ -14,5 +14,3 @@ entry: ret void } -; Function Attrs: nocallback nofree nosync nounwind willreturn -declare void @llvm.riscv.sf.vtzero.t.i64(i64 immarg, i64, i64, i64 immarg, i64 immarg) #0 diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e4m3.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e4m3.ll index 9b9a849cd7262..c57fce7e919ef 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e4m3.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e4m3.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.e4m3.e4m3.iXLen.nxv64i8(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_e4m3_e4m3_w4_u8m8_u8m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_e4m3_e4m3_w4_u8m8_u8m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e5m2.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e5m2.ll index b63974f04a66e..0131ff6edbb18 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e5m2.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e5m2.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.e4m3.e5m2.iXLen.nxv64i8(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_e4m3_e5m2_w4_u8m8_u8m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_e4m3_e5m2_w4_u8m8_u8m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e4m3.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e4m3.ll index 62d629b1b1f1d..e35fd46958b9d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e4m3.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e4m3.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.e5m2.e4m3.iXLen.nxv64i8(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_e5m2_e5m2_w4_u8m8_u8m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_e5m2_e5m2_w4_u8m8_u8m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e5m2.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e5m2.ll index 7a90c97bcf0be..468a1d59e29a8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e5m2.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e5m2.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.e5m2.e5m2.iXLen.nxv64i8(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_e4m3_e5m2_w4_u8m8_u8m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_e4m3_e5m2_w4_u8m8_u8m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_f_f.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_f_f.ll index 29451c60b9248..dcf35c50bc83a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_f_f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_f_f.ll @@ -6,8 +6,6 @@ ; RUN: -mattr=+zvfh -mattr=+xsfmm32a32f -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.f.f.iXLen.nxv32f16(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_f_f_w2_f16m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_f_f_w2_f16m8: ; CHECK: # %bb.0: # %entry @@ -21,8 +19,6 @@ define void @test_sf_mm_f_f_w2_f16m8(iXLen %mtd, %v1, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_f_f_w1_f32m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_f_f_w1_f32m8: ; CHECK: # %bb.0: # %entry @@ -36,8 +32,6 @@ define void @test_sf_mm_f_f_w1_f32m8(iXLen %mtd, %v1, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_f_f_w1_f64m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_f_f_w1_f64m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_s.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_s.ll index 6a4b29ff0e786..0e698b2ca3d3a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_s.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_s.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8i \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.s.s.iXLen.nxv64i8.nxv64i8(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_s_s_w4_i8m8_i8m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_s_s_w4_i8m8_i8m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_u.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_u.ll index 79239b01cd1d4..71093f381dd63 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_u.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_u.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8i \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.s.u.iXLen.nxv64i8.nxv64i8(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_s_u_w4_i8m8_i8m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_s_u_w4_i8m8_i8m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_s.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_s.ll index b0d039bb194a4..1ca83010fa14c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_s.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_s.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8i \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.u.s.iXLen.nxv64i8.nxv64i8(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_u_s_w4_i8m8_i8m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_u_s_w4_i8m8_i8m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_u.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_u.ll index 913c277655e43..9bfc246f5325f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_u.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_u.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8i \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.u.u.iXLen.nxv64i8.nxv64i8(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_u_u_w4_i8m8_i8m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_u_u_w4_i8m8_i8m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte16.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte16.ll index 8048dec110a5f..8df930e4ce3dc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte16.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vlte16.iXLen(iXLen, ptr, iXLen) - define dso_local void @test_sf_vlte16(iXLen %tss, ptr %base, iXLen %vl) { ; CHECK-LABEL: test_sf_vlte16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte32.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte32.ll index a526dc8471b1a..3e1ac2688d569 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte32.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vlte32.iXLen(iXLen, ptr, iXLen) - define dso_local void @test_sf_vlte32(iXLen %tss, ptr %base, iXLen %vl) { ; CHECK-LABEL: test_sf_vlte32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte64.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte64.ll index ed0c48ac467e6..315839794af8c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte64.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vlte64.iXLen(iXLen, ptr, iXLen) - define dso_local void @test_sf_vlte64(iXLen %tss, ptr %base, iXLen %vl) { ; CHECK-LABEL: test_sf_vlte64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte8.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte8.ll index 67b3ed2ec55ab..7451ef286ffc8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte8.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte8.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vlte8.iXLen(iXLen, ptr, iXLen) - define dso_local void @test_sf_vlte8(iXLen %tss, ptr %base, iXLen %vl) { ; CHECK-LABEL: test_sf_vlte8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettk.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettk.ll index 4da37fad1b536..b8b2c0c3a709b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettk.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettk.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare iXLen @llvm.riscv.sf.vsettk.iXLen(iXLen, iXLen, iXLen) - define iXLen @test_sf_vsettk(iXLen %tk) { ; CHECK-LABEL: test_sf_vsettk: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettm.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettm.ll index 143c26cc8cff1..5d5970726d643 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettm.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare iXLen @llvm.riscv.sf.vsettm.iXLen(iXLen, iXLen, iXLen) - define iXLen @test_sf_vsettm(iXLen %tm) { ; CHECK-LABEL: test_sf_vsettm: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettnt.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettnt.ll index 48fa1bc8f6cbe..07584899fbcac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettnt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettnt.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare iXLen @llvm.riscv.sf.vsettnt.iXLen(iXLen, iXLen, iXLen) - define iXLen @test_sf_vsettnt_e8w1(iXLen %tn) { ; CHECK-LABEL: test_sf_vsettnt_e8w1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste16.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste16.ll index 7a76151e01cc5..9e76b8b3172f5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste16.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vste16.iXLen(iXLen, ptr, iXLen) - define dso_local void @test_sf_vste16(iXLen %tss, ptr %base, iXLen %vl) { ; CHECK-LABEL: test_sf_vste16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste32.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste32.ll index 8ff6e6af3b02d..086bb347fd45a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste32.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vste32.iXLen(iXLen, ptr, iXLen) - define dso_local void @test_sf_vste32(iXLen %tss, ptr %base, iXLen %vl) { ; CHECK-LABEL: test_sf_vste32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste64.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste64.ll index 53990e4dd2483..cff4f78fa8817 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste64.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vste64.iXLen(iXLen, ptr, iXLen) - define dso_local void @test_sf_vste64(iXLen %tss, ptr %base, iXLen %vl) { ; CHECK-LABEL: test_sf_vste64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste8.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste8.ll index 09b72594ac7c6..3e080bec0878b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste8.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste8.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vste8.iXLen(iXLen, ptr, iXLen) - define dso_local void @test_sf_vste8(iXLen %tss, ptr %base, iXLen %vl) { ; CHECK-LABEL: test_sf_vste8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtdiscard.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtdiscard.ll index 394eb60f73743..b98b7216bdb8d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtdiscard.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtdiscard.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vtdiscard() - define dso_local void @test_sf_vtdiscard() { ; CHECK-LABEL: test_sf_vtdiscard: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_t_v.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_t_v.ll index 66c9d26c209f0..f551e48a5ef10 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_t_v.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_t_v.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vtmv.t.v.nxv32bf16.iXLen(iXLen, , iXLen) - define void @test_sf_vtmv_t_v_bf16m8(iXLen %tss, %src, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_t_v_bf16m8: ; CHECK: # %bb.0: # %entry @@ -22,8 +20,6 @@ define void @test_sf_vtmv_t_v_bf16m8(iXLen %tss, %src, iX ret void } -declare void @llvm.riscv.sf.vtmv.t.v.nxv32f16.iXLen(iXLen, , iXLen) - define void @test_sf_vtmv_t_v_f16(iXLen %tss, %src, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_t_v_f16: ; CHECK: # %bb.0: # %entry @@ -35,8 +31,6 @@ define void @test_sf_vtmv_t_v_f16(iXLen %tss, %src, iXLen % ret void } -declare void @llvm.riscv.sf.vtmv.t.v.nxv16f32.iXLen(iXLen, , iXLen) - define void @test_sf_vtmv_t_v_f32(iXLen %tss, %src, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_t_v_f32: ; CHECK: # %bb.0: # %entry @@ -48,8 +42,6 @@ define void @test_sf_vtmv_t_v_f32(iXLen %tss, %src, iXLen ret void } -declare void @llvm.riscv.sf.vtmv.t.v.nxv8f64.iXLen(iXLen, , iXLen) - define void @test_sf_vtmv_t_v_f64(iXLen %tss, %src, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_t_v_f64: ; CHECK: # %bb.0: # %entry @@ -61,8 +53,6 @@ define void @test_sf_vtmv_t_v_f64(iXLen %tss, %src, iXLen ret void } -declare void @llvm.riscv.sf.vtmv.t.v.nxv64i8.iXLen(iXLen, , iXLen) - define void @test_sf_vtmv_t_v_i8(iXLen %tss, %src, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_t_v_i8: ; CHECK: # %bb.0: # %entry @@ -74,8 +64,6 @@ define void @test_sf_vtmv_t_v_i8(iXLen %tss, %src, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vtmv.t.v.nxv32i16.iXLen(iXLen, , iXLen) - define void @test_sf_vtmv_t_v_i16(iXLen %tss, %src, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_t_v_i16: ; CHECK: # %bb.0: # %entry @@ -87,8 +75,6 @@ define void @test_sf_vtmv_t_v_i16(iXLen %tss, %src, iXLen %v ret void } -declare void @llvm.riscv.sf.vtmv.t.v.nxv16i32.iXLen(iXLen, , iXLen) - define void @test_sf_vtmv_t_v_i32(iXLen %tss, %src, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_t_v_i32: ; CHECK: # %bb.0: # %entry @@ -100,8 +86,6 @@ define void @test_sf_vtmv_t_v_i32(iXLen %tss, %src, iXLen %v ret void } -declare void @llvm.riscv.sf.vtmv.t.v.nxv8i64.iXLen(iXLen, , iXLen) - define void @test_sf_vtmv_t_v_i64(iXLen %tss, %src, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_t_v_i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_v_t.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_v_t.ll index 0dcc2ab5b9a0d..33445b59cca1c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_v_t.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_v_t.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vtmv.v.t.nxv32bf16.iXLen(iXLen, iXLen) - define @test_sf_vtmv_v_t_bf16m8(iXLen %tss, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_v_t_bf16m8: ; CHECK: # %bb.0: # %entry @@ -22,8 +20,6 @@ define @test_sf_vtmv_v_t_bf16m8(iXLen %tss, iXLen %vl) { ret %0 } -declare @llvm.riscv.sf.vtmv.v.t.nxv32f16.iXLen(iXLen, iXLen) - define @test_sf_vtmv_v_t_f16(iXLen %tss, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_v_t_f16: ; CHECK: # %bb.0: # %entry @@ -35,8 +31,6 @@ define @test_sf_vtmv_v_t_f16(iXLen %tss, iXLen %vl) { ret %0 } -declare @llvm.riscv.sf.vtmv.v.t.nxv16f32.iXLen(iXLen, iXLen) - define @test_sf_vtmv_v_t_f32(iXLen %tss, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_v_t_f32: ; CHECK: # %bb.0: # %entry @@ -48,8 +42,6 @@ define @test_sf_vtmv_v_t_f32(iXLen %tss, iXLen %vl) { ret %0 } -declare @llvm.riscv.sf.vtmv.v.t.nxv8f64.iXLen(iXLen, iXLen) - define @test_sf_vtmv_v_t_f64(iXLen %tss, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_v_t_f64: ; CHECK: # %bb.0: # %entry @@ -61,8 +53,6 @@ define @test_sf_vtmv_v_t_f64(iXLen %tss, iXLen %vl) { ret %0 } -declare @llvm.riscv.sf.vtmv.v.t.nxv64i8.iXLen(iXLen, iXLen) - define @test_sf_vtmv_v_t_i8(iXLen %tss, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_v_t_i8: ; CHECK: # %bb.0: # %entry @@ -74,8 +64,6 @@ define @test_sf_vtmv_v_t_i8(iXLen %tss, iXLen %vl) { ret %0 } -declare @llvm.riscv.sf.vtmv.v.t.nxv32i16.iXLen(iXLen, iXLen) - define @test_sf_vtmv_v_t_i16(iXLen %tss, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_v_t_i16: ; CHECK: # %bb.0: # %entry @@ -87,8 +75,6 @@ define @test_sf_vtmv_v_t_i16(iXLen %tss, iXLen %vl) { ret %0 } -declare @llvm.riscv.sf.vtmv.v.t.nxv16i32.iXLen(iXLen, iXLen) - define @test_sf_vtmv_v_t_i32(iXLen %tss, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_v_t_i32: ; CHECK: # %bb.0: # %entry @@ -100,8 +86,6 @@ define @test_sf_vtmv_v_t_i32(iXLen %tss, iXLen %vl) { ret %0 } -declare @llvm.riscv.sf.vtmv.v.t.nxv8i64.iXLen(iXLen, iXLen) - define @test_sf_vtmv_v_t_i64(iXLen %tss, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_v_t_i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtzero_t.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtzero_t.ll index bbccb026f161b..3ae5ec09be4dd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtzero_t.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtzero_t.ll @@ -9,7 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vtzero.t.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) define void @test_sf_vtzero_t(iXLen %tm, iXLen %tn) { ; CHECK-LABEL: test_sf_vtzero_t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands-i1.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands-i1.ll index 350c888a2c7d6..8608179fb09e0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands-i1.ll @@ -4,8 +4,6 @@ ; Make sure we don't unnecessrily sink i1 vector splats. -declare <8 x i1> @llvm.vp.and.v4i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define void @sink_splat_vp_and_i1(ptr nocapture %a, i1 zeroext %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_and_i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll index 19a184148c0b6..519312766feeb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -2270,11 +2270,6 @@ for.body: ; preds = %for.body.preheader, br i1 %cmp.not, label %for.cond.cleanup, label %for.body } -declare i64 @llvm.vscale.i64() -declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) -declare @llvm.fma.nxv2f32(, , ) -declare float @llvm.fma.f32(float, float, float) - define void @sink_splat_icmp(ptr nocapture %x, i32 signext %y) { ; CHECK-LABEL: sink_splat_icmp: ; CHECK: # %bb.0: # %entry @@ -2309,7 +2304,6 @@ vector.body: ; preds = %vector.body, %entry for.cond.cleanup: ; preds = %vector.body ret void } -declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>) define void @sink_splat_fcmp(ptr nocapture %x, float %y) { ; CHECK-LABEL: sink_splat_fcmp: @@ -2345,7 +2339,6 @@ vector.body: ; preds = %vector.body, %entry for.cond.cleanup: ; preds = %vector.body ret void } -declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32, <4 x i1>) define void @sink_splat_udiv(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_udiv: @@ -2847,8 +2840,6 @@ for.body: ; preds = %for.body.preheader, br i1 %cmp.not, label %for.cond.cleanup, label %for.body } -declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) - define void @sink_splat_min(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_min: ; CHECK: # %bb.0: # %entry @@ -2917,8 +2908,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) - define void @sink_splat_max(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_max: ; CHECK: # %bb.0: # %entry @@ -2987,8 +2976,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>) - define void @sink_splat_umin(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_umin: ; CHECK: # %bb.0: # %entry @@ -3057,8 +3044,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>) - define void @sink_splat_umax(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_umax: ; CHECK: # %bb.0: # %entry @@ -3127,8 +3112,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>) - define void @sink_splat_sadd_sat(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_sadd_sat: ; CHECK: # %bb.0: # %entry @@ -3197,8 +3180,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>) - define void @sink_splat_ssub_sat(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_ssub_sat: ; CHECK: # %bb.0: # %entry @@ -3233,8 +3214,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>) - define void @sink_splat_uadd_sat(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_uadd_sat: ; CHECK: # %bb.0: # %entry @@ -3303,8 +3282,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>) - define void @sink_splat_usub_sat(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_usub_sat: ; CHECK: # %bb.0: # %entry @@ -3339,8 +3316,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.mul.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_mul(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_mul: ; CHECK: # %bb.0: # %entry @@ -3377,8 +3352,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_add(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_add: ; CHECK: # %bb.0: # %entry @@ -3451,8 +3424,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.sub.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_sub(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_sub: ; CHECK: # %bb.0: # %entry @@ -3525,8 +3496,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.shl.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_shl(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_shl: ; CHECK: # %bb.0: # %entry @@ -3563,8 +3532,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_lshr(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_lshr: ; CHECK: # %bb.0: # %entry @@ -3601,8 +3568,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_ashr(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_ashr: ; CHECK: # %bb.0: # %entry @@ -3639,8 +3604,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x float> @llvm.vp.fmul.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32) - define void @sink_splat_vp_fmul(ptr nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_fmul: ; CHECK: # %bb.0: # %entry @@ -3677,8 +3640,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x float> @llvm.vp.fdiv.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32) - define void @sink_splat_vp_fdiv(ptr nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_fdiv: ; CHECK: # %bb.0: # %entry @@ -3751,8 +3712,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x float> @llvm.vp.fadd.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32) - define void @sink_splat_vp_fadd(ptr nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_fadd: ; CHECK: # %bb.0: # %entry @@ -3789,8 +3748,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x float> @llvm.vp.fsub.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32) - define void @sink_splat_vp_fsub(ptr nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_fsub: ; CHECK: # %bb.0: # %entry @@ -3827,8 +3784,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x float> @llvm.vp.frsub.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32) - define void @sink_splat_vp_frsub(ptr nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_frsub: ; CHECK: # %bb.0: # %entry @@ -3865,8 +3820,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_udiv(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_udiv: ; CHECK: # %bb.0: # %entry @@ -3903,8 +3856,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_sdiv(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_sdiv: ; CHECK: # %bb.0: # %entry @@ -3941,8 +3892,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.urem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_urem(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_urem: ; CHECK: # %bb.0: # %entry @@ -3979,8 +3928,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.srem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_srem(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_srem: ; CHECK: # %bb.0: # %entry @@ -4056,8 +4003,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x float> @llvm.vp.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32) - define void @sink_splat_vp_fma(ptr noalias nocapture %a, ptr nocapture readonly %b, float %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_fma: ; CHECK: # %bb.0: # %entry @@ -4138,7 +4083,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } - define void @sink_splat_mul_lmul2(ptr nocapture %a, i64 signext %x) { ; CHECK-LABEL: sink_splat_mul_lmul2: ; CHECK: # %bb.0: # %entry @@ -4860,8 +4804,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i1> @llvm.vp.icmp.v4i32(<4 x i32>, <4 x i32>, metadata, <4 x i1>, i32) - define void @sink_splat_vp_icmp(ptr nocapture %x, i32 signext %y, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_icmp: ; CHECK: # %bb.0: # %entry @@ -4901,8 +4843,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i1> @llvm.vp.fcmp.v4f32(<4 x float>, <4 x float>, metadata, <4 x i1>, i32) - define void @sink_splat_vp_fcmp(ptr nocapture %x, float %y, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_fcmp: ; CHECK: # %bb.0: # %entry @@ -4942,8 +4882,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.smin.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_min(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_min: ; CHECK: # %bb.0: # %entry @@ -5016,8 +4954,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.smax.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_max(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_max: ; CHECK: # %bb.0: # %entry @@ -5126,8 +5062,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.umax.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_umax(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_umax: ; CHECK: # %bb.0: # %entry @@ -5200,8 +5134,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_sadd_sat(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_sadd_sat: ; CHECK: # %bb.0: # %entry @@ -5274,8 +5206,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_ssub_sat(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_ssub_sat: ; CHECK: # %bb.0: # %entry @@ -5312,8 +5242,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_uadd_sat(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_uadd_sat: ; CHECK: # %bb.0: # %entry @@ -5386,8 +5314,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_usub_sat(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_usub_sat: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll index e0e8a80037733..b5861fe7afaa8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare { , } @llvm.smul.with.overflow.nxv1i8(, ) - define @smulo_nxv1i8( %x, %y) { ; CHECK-LABEL: smulo_nxv1i8: ; CHECK: # %bb.0: @@ -20,8 +18,6 @@ define @smulo_nxv1i8( %x, % ret %d } -declare { , } @llvm.smul.with.overflow.nxv2i8(, ) - define @smulo_nxv2i8( %x, %y) { ; CHECK-LABEL: smulo_nxv2i8: ; CHECK: # %bb.0: @@ -39,8 +35,6 @@ define @smulo_nxv2i8( %x, % ret %d } -declare { , } @llvm.smul.with.overflow.nxv4i8(, ) - define @smulo_nxv4i8( %x, %y) { ; CHECK-LABEL: smulo_nxv4i8: ; CHECK: # %bb.0: @@ -58,8 +52,6 @@ define @smulo_nxv4i8( %x, % ret %d } -declare { , } @llvm.smul.with.overflow.nxv8i8(, ) - define @smulo_nxv8i8( %x, %y) { ; CHECK-LABEL: smulo_nxv8i8: ; CHECK: # %bb.0: @@ -77,8 +69,6 @@ define @smulo_nxv8i8( %x, % ret %d } -declare { , } @llvm.smul.with.overflow.nxv16i8(, ) - define @smulo_nxv16i8( %x, %y) { ; CHECK-LABEL: smulo_nxv16i8: ; CHECK: # %bb.0: @@ -96,8 +86,6 @@ define @smulo_nxv16i8( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv32i8(, ) - define @smulo_nxv32i8( %x, %y) { ; CHECK-LABEL: smulo_nxv32i8: ; CHECK: # %bb.0: @@ -115,8 +103,6 @@ define @smulo_nxv32i8( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv64i8(, ) - define @smulo_nxv64i8( %x, %y) { ; CHECK-LABEL: smulo_nxv64i8: ; CHECK: # %bb.0: @@ -134,8 +120,6 @@ define @smulo_nxv64i8( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv1i16(, ) - define @smulo_nxv1i16( %x, %y) { ; CHECK-LABEL: smulo_nxv1i16: ; CHECK: # %bb.0: @@ -153,8 +137,6 @@ define @smulo_nxv1i16( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv2i16(, ) - define @smulo_nxv2i16( %x, %y) { ; CHECK-LABEL: smulo_nxv2i16: ; CHECK: # %bb.0: @@ -172,8 +154,6 @@ define @smulo_nxv2i16( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv4i16(, ) - define @smulo_nxv4i16( %x, %y) { ; CHECK-LABEL: smulo_nxv4i16: ; CHECK: # %bb.0: @@ -191,8 +171,6 @@ define @smulo_nxv4i16( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv8i16(, ) - define @smulo_nxv8i16( %x, %y) { ; CHECK-LABEL: smulo_nxv8i16: ; CHECK: # %bb.0: @@ -210,8 +188,6 @@ define @smulo_nxv8i16( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv16i16(, ) - define @smulo_nxv16i16( %x, %y) { ; CHECK-LABEL: smulo_nxv16i16: ; CHECK: # %bb.0: @@ -229,8 +205,6 @@ define @smulo_nxv16i16( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv32i16(, ) - define @smulo_nxv32i16( %x, %y) { ; CHECK-LABEL: smulo_nxv32i16: ; CHECK: # %bb.0: @@ -248,8 +222,6 @@ define @smulo_nxv32i16( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv1i32(, ) - define @smulo_nxv1i32( %x, %y) { ; CHECK-LABEL: smulo_nxv1i32: ; CHECK: # %bb.0: @@ -267,8 +239,6 @@ define @smulo_nxv1i32( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv2i32(, ) - define @smulo_nxv2i32( %x, %y) { ; CHECK-LABEL: smulo_nxv2i32: ; CHECK: # %bb.0: @@ -286,8 +256,6 @@ define @smulo_nxv2i32( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv4i32(, ) - define @smulo_nxv4i32( %x, %y) { ; CHECK-LABEL: smulo_nxv4i32: ; CHECK: # %bb.0: @@ -305,8 +273,6 @@ define @smulo_nxv4i32( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv8i32(, ) - define @smulo_nxv8i32( %x, %y) { ; CHECK-LABEL: smulo_nxv8i32: ; CHECK: # %bb.0: @@ -324,8 +290,6 @@ define @smulo_nxv8i32( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv16i32(, ) - define @smulo_nxv16i32( %x, %y) { ; CHECK-LABEL: smulo_nxv16i32: ; CHECK: # %bb.0: @@ -343,8 +307,6 @@ define @smulo_nxv16i32( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv1i64(, ) - define @smulo_nxv1i64( %x, %y) { ; CHECK-LABEL: smulo_nxv1i64: ; CHECK: # %bb.0: @@ -363,8 +325,6 @@ define @smulo_nxv1i64( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv2i64(, ) - define @smulo_nxv2i64( %x, %y) { ; CHECK-LABEL: smulo_nxv2i64: ; CHECK: # %bb.0: @@ -383,8 +343,6 @@ define @smulo_nxv2i64( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv4i64(, ) - define @smulo_nxv4i64( %x, %y) { ; CHECK-LABEL: smulo_nxv4i64: ; CHECK: # %bb.0: @@ -403,8 +361,6 @@ define @smulo_nxv4i64( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv8i64(, ) - define @smulo_nxv8i64( %x, %y) { ; CHECK-LABEL: smulo_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll index 26325328e5671..f4e77b1a4b4f8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK -declare @llvm.bitreverse.nxv2i64() - define i32 @splat_vector_split_i64() { ; CHECK-LABEL: splat_vector_split_i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/splats-with-mixed-vl.ll b/llvm/test/CodeGen/RISCV/rvv/splats-with-mixed-vl.ll index fc67eec0f48a0..97ea888e6dda1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/splats-with-mixed-vl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/splats-with-mixed-vl.ll @@ -226,5 +226,3 @@ define void @extract_vector_mixed3(ptr %p, ptr %p2, i32 %v) { ret void } - -declare <4 x i32> @llvm.vector.extract.v4i32.nxv1132( %vec, i64 %idx) diff --git a/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll index 03b090def5119..36b57865fea9b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll @@ -1,11 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare <2 x i64> @llvm.sshl.sat.v2i64(<2 x i64>, <2 x i64>) -declare <4 x i32> @llvm.sshl.sat.v4i32(<4 x i32>, <4 x i32>) -declare <8 x i16> @llvm.sshl.sat.v8i16(<8 x i16>, <8 x i16>) -declare <16 x i8> @llvm.sshl.sat.v16i8(<16 x i8>, <16 x i8>) - define <2 x i64> @vec_v2i64(<2 x i64> %x, <2 x i64> %y) nounwind { ; CHECK-LABEL: vec_v2i64: ; CHECK: # %bb.0: @@ -79,11 +74,6 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ret <16 x i8> %tmp } -declare @llvm.sshl.sat.nxv2i64(, ) -declare @llvm.sshl.sat.nxv4i32(, ) -declare @llvm.sshl.sat.nxv8i16(, ) -declare @llvm.sshl.sat.nxv16i8(, ) - define @vec_nxv2i64( %x, %y) nounwind { ; CHECK-LABEL: vec_nxv2i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll index 95c1292e41927..a98c40b532c4e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64 -declare @llvm.stepvector.nxv1i8() - define @stepvector_nxv1i8() { ; CHECK-LABEL: stepvector_nxv1i8: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define @stepvector_nxv1i8() { ret %v } -declare @llvm.stepvector.nxv2i8() - define @stepvector_nxv2i8() { ; CHECK-LABEL: stepvector_nxv2i8: ; CHECK: # %bb.0: @@ -26,8 +22,6 @@ define @stepvector_nxv2i8() { ret %v } -declare @llvm.stepvector.nxv3i8() - define @stepvector_nxv3i8() { ; CHECK-LABEL: stepvector_nxv3i8: ; CHECK: # %bb.0: @@ -38,8 +32,6 @@ define @stepvector_nxv3i8() { ret %v } -declare @llvm.stepvector.nxv4i8() - define @stepvector_nxv4i8() { ; CHECK-LABEL: stepvector_nxv4i8: ; CHECK: # %bb.0: @@ -50,8 +42,6 @@ define @stepvector_nxv4i8() { ret %v } -declare @llvm.stepvector.nxv8i8() - define @stepvector_nxv8i8() { ; CHECK-LABEL: stepvector_nxv8i8: ; CHECK: # %bb.0: @@ -103,8 +93,6 @@ entry: ret %3 } -declare @llvm.stepvector.nxv16i8() - define @stepvector_nxv16i8() { ; CHECK-LABEL: stepvector_nxv16i8: ; CHECK: # %bb.0: @@ -115,8 +103,6 @@ define @stepvector_nxv16i8() { ret %v } -declare @llvm.stepvector.nxv32i8() - define @stepvector_nxv32i8() { ; CHECK-LABEL: stepvector_nxv32i8: ; CHECK: # %bb.0: @@ -127,8 +113,6 @@ define @stepvector_nxv32i8() { ret %v } -declare @llvm.stepvector.nxv64i8() - define @stepvector_nxv64i8() { ; CHECK-LABEL: stepvector_nxv64i8: ; CHECK: # %bb.0: @@ -139,8 +123,6 @@ define @stepvector_nxv64i8() { ret %v } -declare @llvm.stepvector.nxv1i16() - define @stepvector_nxv1i16() { ; CHECK-LABEL: stepvector_nxv1i16: ; CHECK: # %bb.0: @@ -151,8 +133,6 @@ define @stepvector_nxv1i16() { ret %v } -declare @llvm.stepvector.nxv2i16() - define @stepvector_nxv2i16() { ; CHECK-LABEL: stepvector_nxv2i16: ; CHECK: # %bb.0: @@ -163,8 +143,6 @@ define @stepvector_nxv2i16() { ret %v } -declare @llvm.stepvector.nxv2i15() - define @stepvector_nxv2i15() { ; CHECK-LABEL: stepvector_nxv2i15: ; CHECK: # %bb.0: @@ -175,8 +153,6 @@ define @stepvector_nxv2i15() { ret %v } -declare @llvm.stepvector.nxv3i16() - define @stepvector_nxv3i16() { ; CHECK-LABEL: stepvector_nxv3i16: ; CHECK: # %bb.0: @@ -187,8 +163,6 @@ define @stepvector_nxv3i16() { ret %v } -declare @llvm.stepvector.nxv4i16() - define @stepvector_nxv4i16() { ; CHECK-LABEL: stepvector_nxv4i16: ; CHECK: # %bb.0: @@ -199,8 +173,6 @@ define @stepvector_nxv4i16() { ret %v } -declare @llvm.stepvector.nxv8i16() - define @stepvector_nxv8i16() { ; CHECK-LABEL: stepvector_nxv8i16: ; CHECK: # %bb.0: @@ -211,8 +183,6 @@ define @stepvector_nxv8i16() { ret %v } -declare @llvm.stepvector.nxv16i16() - define @stepvector_nxv16i16() { ; CHECK-LABEL: stepvector_nxv16i16: ; CHECK: # %bb.0: @@ -264,8 +234,6 @@ entry: ret %3 } -declare @llvm.stepvector.nxv32i16() - define @stepvector_nxv32i16() { ; CHECK-LABEL: stepvector_nxv32i16: ; CHECK: # %bb.0: @@ -276,8 +244,6 @@ define @stepvector_nxv32i16() { ret %v } -declare @llvm.stepvector.nxv1i32() - define @stepvector_nxv1i32() { ; CHECK-LABEL: stepvector_nxv1i32: ; CHECK: # %bb.0: @@ -288,8 +254,6 @@ define @stepvector_nxv1i32() { ret %v } -declare @llvm.stepvector.nxv2i32() - define @stepvector_nxv2i32() { ; CHECK-LABEL: stepvector_nxv2i32: ; CHECK: # %bb.0: @@ -300,8 +264,6 @@ define @stepvector_nxv2i32() { ret %v } -declare @llvm.stepvector.nxv3i32() - define @stepvector_nxv3i32() { ; CHECK-LABEL: stepvector_nxv3i32: ; CHECK: # %bb.0: @@ -312,8 +274,6 @@ define @stepvector_nxv3i32() { ret %v } -declare @llvm.stepvector.nxv4i32() - define @stepvector_nxv4i32() { ; CHECK-LABEL: stepvector_nxv4i32: ; CHECK: # %bb.0: @@ -324,8 +284,6 @@ define @stepvector_nxv4i32() { ret %v } -declare @llvm.stepvector.nxv8i32() - define @stepvector_nxv8i32() { ; CHECK-LABEL: stepvector_nxv8i32: ; CHECK: # %bb.0: @@ -336,8 +294,6 @@ define @stepvector_nxv8i32() { ret %v } -declare @llvm.stepvector.nxv16i32() - define @stepvector_nxv16i32() { ; CHECK-LABEL: stepvector_nxv16i32: ; CHECK: # %bb.0: @@ -389,8 +345,6 @@ entry: ret %3 } -declare @llvm.stepvector.nxv1i64() - define @stepvector_nxv1i64() { ; CHECK-LABEL: stepvector_nxv1i64: ; CHECK: # %bb.0: @@ -401,8 +355,6 @@ define @stepvector_nxv1i64() { ret %v } -declare @llvm.stepvector.nxv2i64() - define @stepvector_nxv2i64() { ; CHECK-LABEL: stepvector_nxv2i64: ; CHECK: # %bb.0: @@ -413,8 +365,6 @@ define @stepvector_nxv2i64() { ret %v } -declare @llvm.stepvector.nxv3i64() - define @stepvector_nxv3i64() { ; CHECK-LABEL: stepvector_nxv3i64: ; CHECK: # %bb.0: @@ -425,8 +375,6 @@ define @stepvector_nxv3i64() { ret %v } -declare @llvm.stepvector.nxv4i64() - define @stepvector_nxv4i64() { ; CHECK-LABEL: stepvector_nxv4i64: ; CHECK: # %bb.0: @@ -437,8 +385,6 @@ define @stepvector_nxv4i64() { ret %v } -declare @llvm.stepvector.nxv8i64() - define @stepvector_nxv8i64() { ; CHECK-LABEL: stepvector_nxv8i64: ; CHECK: # %bb.0: @@ -525,8 +471,6 @@ entry: ret %3 } -declare @llvm.stepvector.nxv16i64() - define @stepvector_nxv16i64() { ; RV32-LABEL: stepvector_nxv16i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll index d801c5187b592..6c51848d9080d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll @@ -3,8 +3,6 @@ %struct.foo = type { i32, i32, i32, i32 } -declare @llvm.stepvector.nxv1i64() - define @gather(ptr %a, i32 %len) { ; CHECK-LABEL: @gather( ; CHECK-NEXT: vector.ph: @@ -662,11 +660,6 @@ define @vector_base_vector_offset(ptr %p, ret %x } -declare i64 @llvm.vscale.i64() -declare void @llvm.masked.scatter.nxv1i64.nxv1p0(, , i32, ) -declare @llvm.masked.gather.nxv1i64.nxv1p0(, i32, , ) - - define @vp_gather(ptr %a, i32 %len) { ; CHECK-LABEL: @vp_gather( ; CHECK-NEXT: vector.ph: diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll index 081afcfab8dae..f087efcc5f57b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll @@ -5,8 +5,6 @@ ; these instructions. MachineMemOperand handling can't currently deal with a ; negative stride that would allow memory before the pointer to be read. -declare @llvm.experimental.vp.strided.load.nxv1i8.p0.i8(ptr, i8, , i32) - define @strided_vpload_nxv1i8_i8(ptr %ptr, i8 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: name: strided_vpload_nxv1i8_i8 ; CHECK: bb.0 (%ir-block.0): @@ -24,8 +22,6 @@ define @strided_vpload_nxv1i8_i8(ptr %ptr, i8 signext %stride, ret %load } -declare void @llvm.experimental.vp.strided.store.nxv1i8.p0.i8(, ptr, i8, , i32) - define void @strided_vpstore_nxv1i8_i8( %val, ptr %ptr, i8 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: name: strided_vpstore_nxv1i8_i8 ; CHECK: bb.0 (%ir-block.0): diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll index b6aa4affbb10f..6381887a1a2f9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll @@ -24,8 +24,6 @@ ; RUN: -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-RV64,CHECK-NO-OPT,CHECK-NO-OPT-ZVFHMIN,CHECK-NO-OPT-RV64 -declare @llvm.experimental.vp.strided.load.nxv1i8.p0.i8(ptr, i8, , i32) - define @strided_vpload_nxv1i8_i8(ptr %ptr, i8 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i8_i8: ; CHECK: # %bb.0: @@ -36,8 +34,6 @@ define @strided_vpload_nxv1i8_i8(ptr %ptr, i8 signext %stride, ret %load } -declare @llvm.experimental.vp.strided.load.nxv1i8.p0.i16(ptr, i16, , i32) - define @strided_vpload_nxv1i8_i16(ptr %ptr, i16 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i8_i16: ; CHECK: # %bb.0: @@ -48,8 +44,6 @@ define @strided_vpload_nxv1i8_i16(ptr %ptr, i16 signext %strid ret %load } -declare @llvm.experimental.vp.strided.load.nxv1i8.p0.i64(ptr, i64, , i32) - define @strided_vpload_nxv1i8_i64(ptr %ptr, i64 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i64: ; CHECK-RV32: # %bb.0: @@ -82,8 +76,6 @@ define @strided_vpload_nxv1i8_i64_allones_mask(ptr %ptr, i64 s ret %load } -declare @llvm.experimental.vp.strided.load.nxv1i8.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv1i8(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i8: ; CHECK: # %bb.0: @@ -104,8 +96,6 @@ define @strided_vpload_nxv1i8_allones_mask(ptr %ptr, i32 signe ret %load } -declare @llvm.experimental.vp.strided.load.nxv2i8.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv2i8(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2i8: ; CHECK: # %bb.0: @@ -116,8 +106,6 @@ define @strided_vpload_nxv2i8(ptr %ptr, i32 signext %stride, < ret %load } -declare @llvm.experimental.vp.strided.load.nxv4i8.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv4i8(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4i8: ; CHECK: # %bb.0: @@ -128,8 +116,6 @@ define @strided_vpload_nxv4i8(ptr %ptr, i32 signext %stride, < ret %load } -declare @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv8i8(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i8: ; CHECK: # %bb.0: @@ -160,8 +146,6 @@ define @strided_vpload_nxv8i8_allones_mask(ptr %ptr, i32 signe ret %load } -declare @llvm.experimental.vp.strided.load.nxv1i16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv1i16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i16: ; CHECK: # %bb.0: @@ -172,8 +156,6 @@ define @strided_vpload_nxv1i16(ptr %ptr, i32 signext %stride, ret %load } -declare @llvm.experimental.vp.strided.load.nxv2i16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv2i16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2i16: ; CHECK: # %bb.0: @@ -194,8 +176,6 @@ define @strided_vpload_nxv2i16_allones_mask(ptr %ptr, i32 sig ret %load } -declare @llvm.experimental.vp.strided.load.nxv4i16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv4i16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4i16: ; CHECK: # %bb.0: @@ -216,8 +196,6 @@ define @strided_vpload_nxv4i16_unit_stride(ptr %ptr, %load } -declare @llvm.experimental.vp.strided.load.nxv8i16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv8i16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i16: ; CHECK: # %bb.0: @@ -228,8 +206,6 @@ define @strided_vpload_nxv8i16(ptr %ptr, i32 signext %stride, ret %load } -declare @llvm.experimental.vp.strided.load.nxv1i32.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv1i32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i32: ; CHECK: # %bb.0: @@ -240,8 +216,6 @@ define @strided_vpload_nxv1i32(ptr %ptr, i32 signext %stride, ret %load } -declare @llvm.experimental.vp.strided.load.nxv2i32.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv2i32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2i32: ; CHECK: # %bb.0: @@ -262,8 +236,6 @@ define @strided_vpload_nxv2i32_unit_stride(ptr %ptr, %load } -declare @llvm.experimental.vp.strided.load.nxv4i32.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv4i32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4i32: ; CHECK: # %bb.0: @@ -284,8 +256,6 @@ define @strided_vpload_nxv4i32_allones_mask(ptr %ptr, i32 sig ret %load } -declare @llvm.experimental.vp.strided.load.nxv8i32.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv8i32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i32: ; CHECK: # %bb.0: @@ -296,8 +266,6 @@ define @strided_vpload_nxv8i32(ptr %ptr, i32 signext %stride, ret %load } -declare @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv1i64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i64: ; CHECK: # %bb.0: @@ -328,8 +296,6 @@ define @strided_vpload_nxv1i64_allones_mask(ptr %ptr, i32 sig ret %load } -declare @llvm.experimental.vp.strided.load.nxv2i64.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv2i64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2i64: ; CHECK: # %bb.0: @@ -340,8 +306,6 @@ define @strided_vpload_nxv2i64(ptr %ptr, i32 signext %stride, ret %load } -declare @llvm.experimental.vp.strided.load.nxv4i64.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv4i64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4i64: ; CHECK: # %bb.0: @@ -352,8 +316,6 @@ define @strided_vpload_nxv4i64(ptr %ptr, i32 signext %stride, ret %load } -declare @llvm.experimental.vp.strided.load.nxv8i64.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv8i64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i64: ; CHECK: # %bb.0: @@ -364,8 +326,6 @@ define @strided_vpload_nxv8i64(ptr %ptr, i32 signext %stride, ret %load } -declare @llvm.experimental.vp.strided.load.nxv1bf16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv1bf16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1bf16: ; CHECK: # %bb.0: @@ -376,8 +336,6 @@ define @strided_vpload_nxv1bf16(ptr %ptr, i32 signext %str ret %load } -declare @llvm.experimental.vp.strided.load.nxv2bf16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv2bf16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2bf16: ; CHECK: # %bb.0: @@ -398,8 +356,6 @@ define @strided_vpload_nxv2bf16_allones_mask(ptr %ptr, i32 ret %load } -declare @llvm.experimental.vp.strided.load.nxv4bf16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv4bf16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4bf16: ; CHECK: # %bb.0: @@ -420,8 +376,6 @@ define @strided_vpload_nxv4bf16_unit_stride(ptr %ptr, %load } -declare @llvm.experimental.vp.strided.load.nxv8bf16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv8bf16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8bf16: ; CHECK: # %bb.0: @@ -432,8 +386,6 @@ define @strided_vpload_nxv8bf16(ptr %ptr, i32 signext %str ret %load } -declare @llvm.experimental.vp.strided.load.nxv1f16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv1f16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1f16: ; CHECK: # %bb.0: @@ -444,8 +396,6 @@ define @strided_vpload_nxv1f16(ptr %ptr, i32 signext %stride ret %load } -declare @llvm.experimental.vp.strided.load.nxv2f16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv2f16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2f16: ; CHECK: # %bb.0: @@ -466,8 +416,6 @@ define @strided_vpload_nxv2f16_allones_mask(ptr %ptr, i32 si ret %load } -declare @llvm.experimental.vp.strided.load.nxv4f16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv4f16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4f16: ; CHECK: # %bb.0: @@ -488,8 +436,6 @@ define @strided_vpload_nxv4f16_unit_stride(ptr %ptr, %load } -declare @llvm.experimental.vp.strided.load.nxv8f16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv8f16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8f16: ; CHECK: # %bb.0: @@ -500,8 +446,6 @@ define @strided_vpload_nxv8f16(ptr %ptr, i32 signext %stride ret %load } -declare @llvm.experimental.vp.strided.load.nxv1f32.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv1f32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1f32: ; CHECK: # %bb.0: @@ -512,8 +456,6 @@ define @strided_vpload_nxv1f32(ptr %ptr, i32 signext %strid ret %load } -declare @llvm.experimental.vp.strided.load.nxv2f32.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv2f32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2f32: ; CHECK: # %bb.0: @@ -534,8 +476,6 @@ define @strided_vpload_nxv2f32_unit_stride(ptr %ptr, %load } -declare @llvm.experimental.vp.strided.load.nxv4f32.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv4f32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4f32: ; CHECK: # %bb.0: @@ -546,8 +486,6 @@ define @strided_vpload_nxv4f32(ptr %ptr, i32 signext %strid ret %load } -declare @llvm.experimental.vp.strided.load.nxv8f32.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv8f32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8f32: ; CHECK: # %bb.0: @@ -568,8 +506,6 @@ define @strided_vpload_nxv8f32_allones_mask(ptr %ptr, i32 s ret %load } -declare @llvm.experimental.vp.strided.load.nxv1f64.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv1f64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1f64: ; CHECK: # %bb.0: @@ -590,8 +526,6 @@ define @strided_vpload_nxv1f64_unit_stride(ptr %ptr, %load } -declare @llvm.experimental.vp.strided.load.nxv2f64.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv2f64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2f64: ; CHECK: # %bb.0: @@ -602,8 +536,6 @@ define @strided_vpload_nxv2f64(ptr %ptr, i32 signext %stri ret %load } -declare @llvm.experimental.vp.strided.load.nxv4f64.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv4f64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4f64: ; CHECK: # %bb.0: @@ -624,8 +556,6 @@ define @strided_vpload_nxv4f64_allones_mask(ptr %ptr, i32 ret %load } -declare @llvm.experimental.vp.strided.load.nxv8f64.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv8f64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8f64: ; CHECK: # %bb.0: @@ -657,8 +587,6 @@ define @strided_vpload_nxv3f64_allones_mask(ptr %ptr, i32 ret %v } -declare @llvm.experimental.vp.strided.load.nxv3f64.p0.i32(ptr, i32, , i32) - ; Splitting define @strided_load_nxv16f64(ptr %ptr, i64 %stride, %mask, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_load_nxv16f64: @@ -758,8 +686,6 @@ define @strided_load_nxv16f64_allones_mask(ptr %ptr, i64 ret %v } -declare @llvm.experimental.vp.strided.load.nxv16f64.p0.i64(ptr, i64, , i32) - ; Widening + splitting (with HiIsEmpty == true) ; NOTE: We can't return as that introduces a vector ; store that can't yet be legalized through widening. In order to test purely @@ -867,10 +793,6 @@ define @strided_load_nxv17f64(ptr %ptr, i64 %stride, %lo } -declare @llvm.experimental.vp.strided.load.nxv17f64.p0.i64(ptr, i64, , i32) -declare @llvm.experimental.vector.extract.nxv1f64( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv16f64( %vec, i64 %idx) - ; Test unmasked integer zero strided define @zero_strided_unmasked_vpload_nxv1i8_i8(ptr %ptr) { ; CHECK-OPT-LABEL: zero_strided_unmasked_vpload_nxv1i8_i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll index 2791b262cafd1..2ec89888af077 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll @@ -12,8 +12,6 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s \ ; RUN: -check-prefixes=CHECK,CHECK-RV64 -declare void @llvm.experimental.vp.strided.store.nxv1i8.p0.i8(, ptr, i8, , i32) - define void @strided_vpstore_nxv1i8_i8( %val, ptr %ptr, i8 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1i8_i8: ; CHECK: # %bb.0: @@ -24,8 +22,6 @@ define void @strided_vpstore_nxv1i8_i8( %val, ptr %ptr, i8 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv1i8.p0.i16(, ptr, i16, , i32) - define void @strided_vpstore_nxv1i8_i16( %val, ptr %ptr, i16 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1i8_i16: ; CHECK: # %bb.0: @@ -36,8 +32,6 @@ define void @strided_vpstore_nxv1i8_i16( %val, ptr %ptr, i16 si ret void } -declare void @llvm.experimental.vp.strided.store.nxv1i8.p0.i64(, ptr, i64, , i32) - define void @strided_vpstore_nxv1i8_i64( %val, ptr %ptr, i64 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv1i8_i64: ; CHECK-RV32: # %bb.0: @@ -54,8 +48,6 @@ define void @strided_vpstore_nxv1i8_i64( %val, ptr %ptr, i64 si ret void } -declare void @llvm.experimental.vp.strided.store.nxv1i8.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv1i8( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1i8: ; CHECK: # %bb.0: @@ -66,8 +58,6 @@ define void @strided_vpstore_nxv1i8( %val, ptr %ptr, i32 signex ret void } -declare void @llvm.experimental.vp.strided.store.nxv2i8.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv2i8( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv2i8: ; CHECK: # %bb.0: @@ -78,8 +68,6 @@ define void @strided_vpstore_nxv2i8( %val, ptr %ptr, i32 signex ret void } -declare void @llvm.experimental.vp.strided.store.nxv4i8.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv4i8( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4i8: ; CHECK: # %bb.0: @@ -90,8 +78,6 @@ define void @strided_vpstore_nxv4i8( %val, ptr %ptr, i32 signex ret void } -declare void @llvm.experimental.vp.strided.store.nxv8i8.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv8i8( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8i8: ; CHECK: # %bb.0: @@ -112,8 +98,6 @@ define void @strided_vpstore_nxv8i8_unit_stride( %val, ptr %ptr ret void } -declare void @llvm.experimental.vp.strided.store.nxv1i16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv1i16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1i16: ; CHECK: # %bb.0: @@ -124,8 +108,6 @@ define void @strided_vpstore_nxv1i16( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv2i16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv2i16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv2i16: ; CHECK: # %bb.0: @@ -136,8 +118,6 @@ define void @strided_vpstore_nxv2i16( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv4i16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv4i16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4i16: ; CHECK: # %bb.0: @@ -158,8 +138,6 @@ define void @strided_vpstore_nxv4i16_unit_stride( %val, ptr %p ret void } -declare void @llvm.experimental.vp.strided.store.nxv8i16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv8i16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8i16: ; CHECK: # %bb.0: @@ -170,8 +148,6 @@ define void @strided_vpstore_nxv8i16( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv1i32.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv1i32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1i32: ; CHECK: # %bb.0: @@ -182,8 +158,6 @@ define void @strided_vpstore_nxv1i32( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv2i32.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv2i32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv2i32: ; CHECK: # %bb.0: @@ -194,8 +168,6 @@ define void @strided_vpstore_nxv2i32( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv4i32.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv4i32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4i32: ; CHECK: # %bb.0: @@ -216,8 +188,6 @@ define void @strided_vpstore_nxv4i32_unit_stride( %val, ptr %p ret void } -declare void @llvm.experimental.vp.strided.store.nxv8i32.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv8i32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8i32: ; CHECK: # %bb.0: @@ -228,8 +198,6 @@ define void @strided_vpstore_nxv8i32( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv1i64.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv1i64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1i64: ; CHECK: # %bb.0: @@ -250,8 +218,6 @@ define void @strided_vpstore_nxv1i64_unit_stride( %val, ptr %p ret void } -declare void @llvm.experimental.vp.strided.store.nxv2i64.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv2i64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv2i64: ; CHECK: # %bb.0: @@ -262,8 +228,6 @@ define void @strided_vpstore_nxv2i64( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv4i64.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv4i64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4i64: ; CHECK: # %bb.0: @@ -274,8 +238,6 @@ define void @strided_vpstore_nxv4i64( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv8i64.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv8i64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8i64: ; CHECK: # %bb.0: @@ -286,8 +248,6 @@ define void @strided_vpstore_nxv8i64( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv1bf16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv1bf16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1bf16: ; CHECK: # %bb.0: @@ -298,8 +258,6 @@ define void @strided_vpstore_nxv1bf16( %val, ptr %ptr, i32 ret void } -declare void @llvm.experimental.vp.strided.store.nxv2bf16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv2bf16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv2bf16: ; CHECK: # %bb.0: @@ -310,8 +268,6 @@ define void @strided_vpstore_nxv2bf16( %val, ptr %ptr, i32 ret void } -declare void @llvm.experimental.vp.strided.store.nxv4bf16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv4bf16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4bf16: ; CHECK: # %bb.0: @@ -332,8 +288,6 @@ define void @strided_vpstore_nxv4bf16_unit_stride( %val, pt ret void } -declare void @llvm.experimental.vp.strided.store.nxv8bf16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv8bf16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8bf16: ; CHECK: # %bb.0: @@ -344,8 +298,6 @@ define void @strided_vpstore_nxv8bf16( %val, ptr %ptr, i32 ret void } -declare void @llvm.experimental.vp.strided.store.nxv1f16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv1f16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1f16: ; CHECK: # %bb.0: @@ -356,8 +308,6 @@ define void @strided_vpstore_nxv1f16( %val, ptr %ptr, i32 sig ret void } -declare void @llvm.experimental.vp.strided.store.nxv2f16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv2f16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv2f16: ; CHECK: # %bb.0: @@ -368,8 +318,6 @@ define void @strided_vpstore_nxv2f16( %val, ptr %ptr, i32 sig ret void } -declare void @llvm.experimental.vp.strided.store.nxv4f16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv4f16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4f16: ; CHECK: # %bb.0: @@ -390,8 +338,6 @@ define void @strided_vpstore_nxv4f16_unit_stride( %val, ptr % ret void } -declare void @llvm.experimental.vp.strided.store.nxv8f16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv8f16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8f16: ; CHECK: # %bb.0: @@ -402,8 +348,6 @@ define void @strided_vpstore_nxv8f16( %val, ptr %ptr, i32 sig ret void } -declare void @llvm.experimental.vp.strided.store.nxv1f32.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv1f32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1f32: ; CHECK: # %bb.0: @@ -414,8 +358,6 @@ define void @strided_vpstore_nxv1f32( %val, ptr %ptr, i32 si ret void } -declare void @llvm.experimental.vp.strided.store.nxv2f32.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv2f32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv2f32: ; CHECK: # %bb.0: @@ -426,8 +368,6 @@ define void @strided_vpstore_nxv2f32( %val, ptr %ptr, i32 si ret void } -declare void @llvm.experimental.vp.strided.store.nxv4f32.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv4f32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4f32: ; CHECK: # %bb.0: @@ -448,8 +388,6 @@ define void @strided_vpstore_nxv4f32_unit_stride( %val, ptr ret void } -declare void @llvm.experimental.vp.strided.store.nxv8f32.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv8f32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8f32: ; CHECK: # %bb.0: @@ -460,8 +398,6 @@ define void @strided_vpstore_nxv8f32( %val, ptr %ptr, i32 si ret void } -declare void @llvm.experimental.vp.strided.store.nxv1f64.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv1f64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1f64: ; CHECK: # %bb.0: @@ -482,8 +418,6 @@ define void @strided_vpstore_nxv1f64_unit_stride( %val, ptr ret void } -declare void @llvm.experimental.vp.strided.store.nxv2f64.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv2f64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv2f64: ; CHECK: # %bb.0: @@ -494,8 +428,6 @@ define void @strided_vpstore_nxv2f64( %val, ptr %ptr, i32 s ret void } -declare void @llvm.experimental.vp.strided.store.nxv4f64.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv4f64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4f64: ; CHECK: # %bb.0: @@ -506,8 +438,6 @@ define void @strided_vpstore_nxv4f64( %val, ptr %ptr, i32 s ret void } -declare void @llvm.experimental.vp.strided.store.nxv8f64.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv8f64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8f64: ; CHECK: # %bb.0: @@ -549,8 +479,6 @@ define void @strided_vpstore_nxv3f32_allones_mask( %v, ptr % ret void } -declare void @llvm.experimental.vp.strided.store.nxv3f32.p0.i32(, ptr , i32, , i32) - ; Splitting define void @strided_store_nxv16f64( %v, ptr %ptr, i32 signext %stride, %mask, i32 zeroext %evl) { ; CHECK-LABEL: strided_store_nxv16f64: @@ -603,8 +531,6 @@ define void @strided_store_nxv16f64_allones_mask( %v, ptr ret void } -declare void @llvm.experimental.vp.strided.store.nxv16f64.p0.i32(, ptr, i32, , i32) - ; Widening + splitting (with HiIsEmpty == true) define void @strided_store_nxv17f64( %v, ptr %ptr, i32 signext %stride, %mask, i32 zeroext %evl) { ; CHECK-LABEL: strided_store_nxv17f64: @@ -658,4 +584,3 @@ define void @strided_store_nxv17f64( %v, ptr %ptr, i32 sig ret void } -declare void @llvm.experimental.vp.strided.store.nxv17f64.p0.i32(, ptr, i32, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir index a2cdd473163df..adfa39b71fd4a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir @@ -19,11 +19,7 @@ ret %load } - ; Function Attrs: argmemonly nofree nosync nounwind readonly willreturn - declare @llvm.masked.load.nxv8i64.p0(ptr, i32 immarg, , ) #1 - attributes #0 = { nounwind "target-features"="+v" } - attributes #1 = { argmemonly nofree nosync nounwind readonly willreturn "target-features"="+v" } ... --- diff --git a/llvm/test/CodeGen/RISCV/rvv/trunc-sat-clip-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/trunc-sat-clip-sdnode.ll index 3ed437eeed2ff..4d642d913a64e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/trunc-sat-clip-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/trunc-sat-clip-sdnode.ll @@ -1,17 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare @llvm.smax.v4i16(, ) -declare @llvm.smin.v4i16(, ) -declare @llvm.smax.v4i32(, ) -declare @llvm.smin.v4i32(, ) -declare @llvm.smax.v4i64(, ) -declare @llvm.smin.v4i64(, ) - -declare @llvm.umin.v4i16(, ) -declare @llvm.umin.v4i32(, ) -declare @llvm.umin.v4i64(, ) - define void @trunc_sat_i8i16_maxmin(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_i8i16_maxmin: ; CHECK: # %bb.0: @@ -134,7 +123,6 @@ define void @trunc_sat_u8u16_minmax(ptr %x, ptr %y) { ret void } - define void @trunc_sat_i16i32_notopt(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_i16i32_notopt: ; CHECK: # %bb.0: @@ -261,7 +249,6 @@ define void @trunc_sat_u16u32_minmax(ptr %x, ptr %y) { ret void } - define void @trunc_sat_i32i64_notopt(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_i32i64_notopt: ; CHECK: # %bb.0: @@ -317,7 +304,6 @@ define void @trunc_sat_i32i64_minmax(ptr %x, ptr %y) { ret void } - define void @trunc_sat_u32u64_notopt(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_u32u64_notopt: ; CHECK: # %bb.0: @@ -352,7 +338,6 @@ define void @trunc_sat_u32u64_min(ptr %x, ptr %y) { ret void } - define void @trunc_sat_u32u64_maxmin(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_u32u64_maxmin: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll index 68e0c0089d0c7..4ac59b412b45c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare { , } @llvm.umul.with.overflow.nxv1i8(, ) - define @umulo_nxv1i8( %x, %y) { ; CHECK-LABEL: umulo_nxv1i8: ; CHECK: # %bb.0: @@ -19,8 +17,6 @@ define @umulo_nxv1i8( %x, % ret %d } -declare { , } @llvm.umul.with.overflow.nxv2i8(, ) - define @umulo_nxv2i8( %x, %y) { ; CHECK-LABEL: umulo_nxv2i8: ; CHECK: # %bb.0: @@ -37,8 +33,6 @@ define @umulo_nxv2i8( %x, % ret %d } -declare { , } @llvm.umul.with.overflow.nxv4i8(, ) - define @umulo_nxv4i8( %x, %y) { ; CHECK-LABEL: umulo_nxv4i8: ; CHECK: # %bb.0: @@ -55,8 +49,6 @@ define @umulo_nxv4i8( %x, % ret %d } -declare { , } @llvm.umul.with.overflow.nxv8i8(, ) - define @umulo_nxv8i8( %x, %y) { ; CHECK-LABEL: umulo_nxv8i8: ; CHECK: # %bb.0: @@ -73,8 +65,6 @@ define @umulo_nxv8i8( %x, % ret %d } -declare { , } @llvm.umul.with.overflow.nxv16i8(, ) - define @umulo_nxv16i8( %x, %y) { ; CHECK-LABEL: umulo_nxv16i8: ; CHECK: # %bb.0: @@ -91,8 +81,6 @@ define @umulo_nxv16i8( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv32i8(, ) - define @umulo_nxv32i8( %x, %y) { ; CHECK-LABEL: umulo_nxv32i8: ; CHECK: # %bb.0: @@ -109,8 +97,6 @@ define @umulo_nxv32i8( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv64i8(, ) - define @umulo_nxv64i8( %x, %y) { ; CHECK-LABEL: umulo_nxv64i8: ; CHECK: # %bb.0: @@ -127,8 +113,6 @@ define @umulo_nxv64i8( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv1i16(, ) - define @umulo_nxv1i16( %x, %y) { ; CHECK-LABEL: umulo_nxv1i16: ; CHECK: # %bb.0: @@ -145,8 +129,6 @@ define @umulo_nxv1i16( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv2i16(, ) - define @umulo_nxv2i16( %x, %y) { ; CHECK-LABEL: umulo_nxv2i16: ; CHECK: # %bb.0: @@ -163,8 +145,6 @@ define @umulo_nxv2i16( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv4i16(, ) - define @umulo_nxv4i16( %x, %y) { ; CHECK-LABEL: umulo_nxv4i16: ; CHECK: # %bb.0: @@ -181,8 +161,6 @@ define @umulo_nxv4i16( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv8i16(, ) - define @umulo_nxv8i16( %x, %y) { ; CHECK-LABEL: umulo_nxv8i16: ; CHECK: # %bb.0: @@ -199,8 +177,6 @@ define @umulo_nxv8i16( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv16i16(, ) - define @umulo_nxv16i16( %x, %y) { ; CHECK-LABEL: umulo_nxv16i16: ; CHECK: # %bb.0: @@ -217,8 +193,6 @@ define @umulo_nxv16i16( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv32i16(, ) - define @umulo_nxv32i16( %x, %y) { ; CHECK-LABEL: umulo_nxv32i16: ; CHECK: # %bb.0: @@ -235,8 +209,6 @@ define @umulo_nxv32i16( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv1i32(, ) - define @umulo_nxv1i32( %x, %y) { ; CHECK-LABEL: umulo_nxv1i32: ; CHECK: # %bb.0: @@ -253,8 +225,6 @@ define @umulo_nxv1i32( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv2i32(, ) - define @umulo_nxv2i32( %x, %y) { ; CHECK-LABEL: umulo_nxv2i32: ; CHECK: # %bb.0: @@ -271,8 +241,6 @@ define @umulo_nxv2i32( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv4i32(, ) - define @umulo_nxv4i32( %x, %y) { ; CHECK-LABEL: umulo_nxv4i32: ; CHECK: # %bb.0: @@ -289,8 +257,6 @@ define @umulo_nxv4i32( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv8i32(, ) - define @umulo_nxv8i32( %x, %y) { ; CHECK-LABEL: umulo_nxv8i32: ; CHECK: # %bb.0: @@ -307,8 +273,6 @@ define @umulo_nxv8i32( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv16i32(, ) - define @umulo_nxv16i32( %x, %y) { ; CHECK-LABEL: umulo_nxv16i32: ; CHECK: # %bb.0: @@ -325,8 +289,6 @@ define @umulo_nxv16i32( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv1i64(, ) - define @umulo_nxv1i64( %x, %y) { ; CHECK-LABEL: umulo_nxv1i64: ; CHECK: # %bb.0: @@ -343,8 +305,6 @@ define @umulo_nxv1i64( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv2i64(, ) - define @umulo_nxv2i64( %x, %y) { ; CHECK-LABEL: umulo_nxv2i64: ; CHECK: # %bb.0: @@ -361,8 +321,6 @@ define @umulo_nxv2i64( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv4i64(, ) - define @umulo_nxv4i64( %x, %y) { ; CHECK-LABEL: umulo_nxv4i64: ; CHECK: # %bb.0: @@ -379,8 +337,6 @@ define @umulo_nxv4i64( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv8i64(, ) - define @umulo_nxv8i64( %x, %y) { ; CHECK-LABEL: umulo_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll index 41d0b63285752..183cfdfdae626 100644 --- a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll +++ b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll @@ -146,15 +146,6 @@ loopIR3.i.i: ; preds = %loopIR3.i.i, %loopI br label %loopIR3.i.i } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare @llvm.riscv.vrgather.vx.nxv2f32.i64(, , i64, i64) #2 -declare void @llvm.riscv.vse.nxv2f32.i64(, ptr nocapture, i64) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) -declare @llvm.stepvector.nxv1i16() -declare @llvm.vector.insert.nxv8i16.nxv1i16(, , i64 immarg) -declare @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(, , , i64) - - define void @repeat_shuffle(<2 x double> %v, ptr noalias %q) { ; CHECK-LABEL: repeat_shuffle: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.mir b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.mir index 75e0539843ac5..d79539b819ac3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.mir +++ b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.mir @@ -13,10 +13,7 @@ ret %0 } - declare @llvm.riscv.vrgather.vx.nxv2f32.i64(, , i64, i64) #1 - attributes #0 = { "target-features"="+v" } - attributes #1 = { nocallback nofree nosync nounwind willreturn memory(none) "target-features"="+v" } ... --- diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-vp-ops.ll b/llvm/test/CodeGen/RISCV/rvv/undef-vp-ops.ll index de0c4e6b84c1f..8a66131f70954 100644 --- a/llvm/test/CodeGen/RISCV/rvv/undef-vp-ops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/undef-vp-ops.ll @@ -6,8 +6,6 @@ ; Test that we can remove trivially-poison VP operations of various kinds. -declare <4 x i32> @llvm.vp.load.v4i32.p0(ptr, <4 x i1>, i32) - define <4 x i32> @vload_v4i32_zero_evl(ptr %ptr, <4 x i1> %m) { ; CHECK-LABEL: vload_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -24,8 +22,6 @@ define <4 x i32> @vload_v4i32_false_mask(ptr %ptr, i32 %evl) { ret <4 x i32> %v } -declare <4 x i32> @llvm.vp.gather.v4i32.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x i32> @vgather_v4i32_v4i32_zero_evl(<4 x ptr> %ptrs, <4 x i1> %m) { ; CHECK-LABEL: vgather_v4i32_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -42,8 +38,6 @@ define <4 x i32> @vgather_v4i32_v4i32_false_mask(<4 x ptr> %ptrs, i32 %evl) { ret <4 x i32> %v } -declare void @llvm.vp.store.v4i32.p0(<4 x i32>, ptr, <4 x i1>, i32) - define void @vstore_v4i32_zero_evl(<4 x i32> %val, ptr %ptr, <4 x i1> %m) { ; CHECK-LABEL: vstore_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -60,8 +54,6 @@ define void @vstore_v4i32_false_mask(<4 x i32> %val, ptr %ptr, i32 %evl) { ret void } -declare void @llvm.vp.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, <4 x i1>, i32) - define void @vscatter_v4i32_zero_evl(<4 x i32> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; CHECK-LABEL: vscatter_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -78,8 +70,6 @@ define void @vscatter_v4i32_false_mask(<4 x i32> %val, <4 x ptr> %ptrs, i32 %evl ret void } -declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vadd_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vadd_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -96,8 +86,6 @@ define <4 x i32> @vadd_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.and.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vand_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vand_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -114,8 +102,6 @@ define <4 x i32> @vand_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vlshr_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vlshr_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -132,8 +118,6 @@ define <4 x i32> @vlshr_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.mul.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vmul_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vmul_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -150,8 +134,6 @@ define <4 x i32> @vmul_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.or.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vor_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vor_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -168,8 +150,6 @@ define <4 x i32> @vor_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) { ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsdiv_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vsdiv_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -186,8 +166,6 @@ define <4 x i32> @vsdiv_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.srem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsrem_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vsrem_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -204,8 +182,6 @@ define <4 x i32> @vsrem_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.sub.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsub_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vsub_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -222,8 +198,6 @@ define <4 x i32> @vsub_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vudiv_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vudiv_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -240,8 +214,6 @@ define <4 x i32> @vudiv_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.urem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vurem_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vurem_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -258,8 +230,6 @@ define <4 x i32> @vurem_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.xor.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vxor_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vxor_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -276,8 +246,6 @@ define <4 x i32> @vxor_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x float> @llvm.vp.fadd.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfadd_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) { ; CHECK-LABEL: vfadd_v4f32_zero_evl: ; CHECK: # %bb.0: @@ -294,8 +262,6 @@ define <4 x float> @vfadd_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 ret <4 x float> %s } -declare <4 x float> @llvm.vp.fsub.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfsub_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) { ; CHECK-LABEL: vfsub_v4f32_zero_evl: ; CHECK: # %bb.0: @@ -312,8 +278,6 @@ define <4 x float> @vfsub_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 ret <4 x float> %s } -declare <4 x float> @llvm.vp.fmul.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfmul_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) { ; CHECK-LABEL: vfmul_v4f32_zero_evl: ; CHECK: # %bb.0: @@ -330,8 +294,6 @@ define <4 x float> @vfmul_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 ret <4 x float> %s } -declare <4 x float> @llvm.vp.fdiv.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfdiv_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) { ; CHECK-LABEL: vfdiv_v4f32_zero_evl: ; CHECK: # %bb.0: @@ -348,8 +310,6 @@ define <4 x float> @vfdiv_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 ret <4 x float> %s } -declare <4 x float> @llvm.vp.frem.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfrem_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) { ; CHECK-LABEL: vfrem_v4f32_zero_evl: ; CHECK: # %bb.0: @@ -366,8 +326,6 @@ define <4 x float> @vfrem_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 ret <4 x float> %s } -declare i32 @llvm.vp.reduce.add.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_add_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_add_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -384,8 +342,6 @@ define i32 @vreduce_add_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ret i32 %s } -declare i32 @llvm.vp.reduce.mul.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_mul_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_mul_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -402,8 +358,6 @@ define i32 @vreduce_mul_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ret i32 %s } -declare i32 @llvm.vp.reduce.and.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_and_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_and_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -420,8 +374,6 @@ define i32 @vreduce_and_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ret i32 %s } -declare i32 @llvm.vp.reduce.or.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_or_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_or_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -438,8 +390,6 @@ define i32 @vreduce_or_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ret i32 %s } -declare i32 @llvm.vp.reduce.xor.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_xor_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_xor_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -456,8 +406,6 @@ define i32 @vreduce_xor_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ret i32 %s } -declare i32 @llvm.vp.reduce.smax.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_smax_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_smax_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -474,8 +422,6 @@ define i32 @vreduce_smax_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) ret i32 %s } -declare i32 @llvm.vp.reduce.smin.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_smin_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_smin_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -492,8 +438,6 @@ define i32 @vreduce_smin_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) ret i32 %s } -declare i32 @llvm.vp.reduce.umax.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_umax_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_umax_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -510,8 +454,6 @@ define i32 @vreduce_umax_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) ret i32 %s } -declare i32 @llvm.vp.reduce.umin.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_umin_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_umin_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -528,8 +470,6 @@ define i32 @vreduce_umin_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) ret i32 %s } -declare float @llvm.vp.reduce.fadd.v4f32(float, <4 x float>, <4 x i1>, i32) - define float @vreduce_seq_fadd_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_seq_fadd_v4f32_zero_evl: ; CHECK: # %bb.0: @@ -562,8 +502,6 @@ define float @vreduce_fadd_v4f32_false_mask(float %start, <4 x float> %val, i32 ret float %s } -declare float @llvm.vp.reduce.fmul.v4f32(float, <4 x float>, <4 x i1>, i32) - define float @vreduce_seq_fmul_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_seq_fmul_v4f32_zero_evl: ; CHECK: # %bb.0: @@ -596,8 +534,6 @@ define float @vreduce_fmul_v4f32_false_mask(float %start, <4 x float> %val, i32 ret float %s } -declare float @llvm.vp.reduce.fmin.v4f32(float, <4 x float>, <4 x i1>, i32) - define float @vreduce_fmin_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_fmin_v4f32_zero_evl: ; CHECK: # %bb.0: @@ -614,8 +550,6 @@ define float @vreduce_fmin_v4f32_false_mask(float %start, <4 x float> %val, i32 ret float %s } -declare float @llvm.vp.reduce.fmax.v4f32(float, <4 x float>, <4 x i1>, i32) - define float @vreduce_fmax_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_fmax_v4f32_zero_evl: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll index 8bed3c23078e8..a42baea6961cb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vfmacc.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -26,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -48,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -70,12 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -92,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -114,12 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -136,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -158,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -180,12 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -202,12 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -224,12 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -246,12 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -268,13 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv1i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -303,13 +224,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv1i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -338,13 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv1i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -373,13 +280,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv1i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -408,13 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -431,13 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -454,13 +340,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -477,13 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv1i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -500,12 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -522,12 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -544,12 +404,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -566,12 +420,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -588,12 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -610,12 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -632,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -654,12 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -676,12 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv4i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -698,12 +516,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv4i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -720,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv4f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -742,12 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv4f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -764,12 +564,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv4f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -786,12 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv4f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -808,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv2f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -829,11 +611,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv2f32.nxv1f16( - , - , - , - iXLen, iXLen); define @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32: @@ -851,13 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv1i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv1i8_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -875,13 +645,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv1i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -899,8 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv1i64(, i64, iXLen); - define @intrinsic_vmv.s.x_x_nxv1i64(i64 %0, iXLen %1) nounwind { ; RV32-LABEL: intrinsic_vmv.s.x_x_nxv1i64: ; RV32: # %bb.0: # %entry @@ -923,8 +684,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv1f16(, half, iXLen) - define @intrinsic_vfmv.s.f_f_nxv1f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -936,12 +695,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vcompress_um_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcompress_um_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll index 1735a0f5a1f2b..ab43ac7cf03a6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vle.nxv1i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_tu_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vle_v_tu_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vlse( - , - ptr, - iXLen, - iXLen); - - define @intrinsic_vlse_v_tu( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_tu: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare { , iXLen } @llvm.riscv.vleff( - , - ptr, - iXLen); - define @intrinsic_vleff_v_tu( %0, ptr %1, iXLen %2, ptr %3) nounwind { ; RV32-LABEL: intrinsic_vleff_v_tu: ; RV32: # %bb.0: # %entry @@ -79,12 +62,6 @@ entry: ret %b } -declare @llvm.riscv.vloxei.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_tu_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_tu_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -101,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.rm.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -124,12 +95,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.rm.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -147,12 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -168,11 +127,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv1i8.nxv1i8( - , - , - , - iXLen); define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8: @@ -190,12 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.rm.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -213,12 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.rm.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -236,12 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -258,12 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +210,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -302,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -324,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -346,12 +258,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -368,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -390,12 +290,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -412,12 +306,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -434,12 +322,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -456,12 +338,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -478,12 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -500,12 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -522,12 +386,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -544,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -566,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -588,12 +434,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -611,12 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -633,12 +467,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -655,12 +483,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -677,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -699,13 +515,6 @@ entry: ret %a } - -declare @llvm.riscv.vslide1down.nxv1i64( - , - , - i64, - iXLen); - define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -732,12 +541,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -764,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -786,12 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -808,12 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -830,12 +615,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -852,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -874,12 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -896,12 +663,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -918,12 +679,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -940,13 +695,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -964,13 +712,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -988,12 +729,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1010,12 +745,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1032,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1054,12 +777,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1076,17 +793,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv1i8.nxv1i8( - , - , - , - iXLen); -declare @llvm.riscv.vrgather.vv.nxv1i8.i32( - , - , - , - iXLen); - define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1103,12 +809,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1i8( - , - , - iXLen, - iXLen); - define @intrinsic_vrgather_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1125,12 +825,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1147,12 +841,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1182,12 +870,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1217,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1239,12 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1261,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i8.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1285,13 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i64.i64( - , - , - i64, - iXLen, - iXLen); - define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1323,12 +979,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1344,11 +994,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv1i8.nxv1i8( - , - , - , - iXLen); define @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8: @@ -1366,13 +1011,6 @@ entry: ret %a } -declare @llvm.riscv.vssra.nxv1i8.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1390,13 +1028,6 @@ entry: ret %a } -declare @llvm.riscv.vssrl.nxv1i8.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1414,12 +1045,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1436,12 +1061,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1458,12 +1077,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1493,12 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1528,12 +1135,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1550,12 +1151,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1572,12 +1167,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1594,12 +1183,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1616,12 +1199,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1638,12 +1215,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1660,12 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1682,12 +1247,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1704,12 +1263,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1742,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1764,12 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1786,12 +1327,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1808,11 +1343,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv1i64.nxv1i8( - , - , - iXLen); - define @intrinsic_vsext_vf8_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1828,11 +1358,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv1i64.nxv1i8( - , - , - iXLen); - define @intrinsic_vzext_vf8_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1848,11 +1373,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -1868,10 +1388,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv1i8( - , - iXLen); - define @intrinsic_vid_v_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1886,11 +1402,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv1i16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1909,11 +1420,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1929,11 +1435,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1949,11 +1450,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1969,11 +1465,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1989,11 +1480,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2009,11 +1495,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -2029,11 +1510,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2049,11 +1525,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2069,11 +1540,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2089,11 +1555,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -2109,11 +1570,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2129,11 +1585,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2149,11 +1600,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2169,11 +1615,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2189,11 +1630,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2209,11 +1645,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv1f16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2229,11 +1660,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2249,11 +1675,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2269,11 +1690,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2289,11 +1705,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2309,11 +1720,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2329,11 +1735,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2349,11 +1750,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2369,11 +1765,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2389,11 +1780,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv1i8( - , - , - iXLen); - define @intrinsic_viota_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -2409,13 +1795,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv1i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2433,13 +1812,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv1i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2457,13 +1829,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2481,13 +1846,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2552,13 +1910,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv8f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -2576,13 +1927,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1f16.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2600,13 +1944,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv1f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -2624,11 +1961,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1i8( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2644,11 +1976,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1f32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -2664,11 +1991,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv1i64( - , - i64, - iXLen); - define @intrinsic_vmv.v.x_x_nxv1i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmv.v.x_x_nxv1i64: ; RV32: # %bb.0: # %entry @@ -2695,11 +2017,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv1f32( - , - float, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv1f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/ushl_sat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/ushl_sat_vec.ll index ba970e62875a9..bf6952dc78b0c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ushl_sat_vec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ushl_sat_vec.ll @@ -1,11 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare <2 x i64> @llvm.ushl.sat.v2i64(<2 x i64>, <2 x i64>) -declare <4 x i32> @llvm.ushl.sat.v4i32(<4 x i32>, <4 x i32>) -declare <8 x i16> @llvm.ushl.sat.v8i16(<8 x i16>, <8 x i16>) -declare <16 x i8> @llvm.ushl.sat.v16i8(<16 x i8>, <16 x i8>) - define <2 x i64> @vec_v2i64(<2 x i64> %x, <2 x i64> %y) nounwind { ; CHECK-LABEL: vec_v2i64: ; CHECK: # %bb.0: @@ -58,11 +53,6 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ret <16 x i8> %tmp } -declare @llvm.ushl.sat.nxv2i64(, ) -declare @llvm.ushl.sat.nxv4i32(, ) -declare @llvm.ushl.sat.nxv8i16(, ) -declare @llvm.ushl.sat.nxv16i8(, ) - define @vec_nxv2i64( %x, %y) nounwind { ; CHECK-LABEL: vec_nxv2i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd.ll index ba9bb84fe3608..6942169587c48 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vaadd.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv2i8.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv4i8.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,13 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -171,13 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -196,12 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv16i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,13 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -244,12 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv32i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -267,13 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -292,12 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv64i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -315,13 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -341,12 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv1i16.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -364,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -389,12 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv2i16.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,13 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -437,12 +320,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv4i16.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -460,13 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -485,12 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv8i16.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -508,13 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -533,12 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv16i16.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -556,13 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -581,12 +425,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv32i16.nxv32i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -604,13 +442,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -630,12 +461,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv1i32.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -653,13 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -678,12 +496,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv2i32.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -701,13 +513,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -726,12 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv4i32.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -749,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -774,12 +566,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv8i32.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -797,13 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -822,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv16i32.nxv16i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -845,13 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -871,12 +637,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv1i64.nxv1i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -894,13 +654,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -919,12 +672,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv2i64.nxv2i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -942,13 +689,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -967,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv4i64.nxv4i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -990,13 +724,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1015,12 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv8i64.nxv8i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1038,13 +759,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1064,12 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv1i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1087,13 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1112,12 +813,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv2i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1135,13 +830,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1160,12 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv4i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,13 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1208,12 +883,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv8i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1231,13 +900,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1256,12 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv16i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1279,13 +935,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1304,12 +953,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv32i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1327,13 +970,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1352,12 +988,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv64i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1375,13 +1005,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1400,12 +1023,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv1i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1423,13 +1040,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1448,12 +1058,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv2i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1471,13 +1075,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1496,12 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv4i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1519,13 +1110,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1544,12 +1128,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv8i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1567,13 +1145,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1592,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv16i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1615,13 +1180,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1640,12 +1198,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv32i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1663,13 +1215,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1688,12 +1233,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv1i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1711,13 +1250,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1736,12 +1268,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv2i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1759,13 +1285,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1784,12 +1303,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv4i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1807,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1832,12 +1338,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv8i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1855,13 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1880,12 +1373,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv16i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1903,13 +1390,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1928,12 +1408,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv1i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1964,13 +1438,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -2002,12 +1469,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv2i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2038,13 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2076,12 +1530,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv4i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2112,13 +1560,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2150,12 +1591,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv8i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2186,13 +1621,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll index aa3fa9a86f497..7fd02f99f618a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vaaddu.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv2i8.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv4i8.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,13 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -171,13 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -196,12 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv16i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,13 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -244,12 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv32i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -267,13 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -292,12 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv64i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -315,13 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -341,12 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv1i16.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -364,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -389,12 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv2i16.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,13 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -437,12 +320,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv4i16.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -460,13 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -485,12 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv8i16.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -508,13 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -533,12 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv16i16.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -556,13 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -581,12 +425,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv32i16.nxv32i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -604,13 +442,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -630,12 +461,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv1i32.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -653,13 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -678,12 +496,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv2i32.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -701,13 +513,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -726,12 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv4i32.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -749,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -774,12 +566,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv8i32.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -797,13 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -822,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv16i32.nxv16i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -845,13 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -871,12 +637,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv1i64.nxv1i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -894,13 +654,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -919,12 +672,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv2i64.nxv2i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -942,13 +689,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -967,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv4i64.nxv4i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -990,13 +724,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1015,12 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv8i64.nxv8i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1038,13 +759,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1064,12 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv1i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1087,13 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1112,12 +813,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv2i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1135,13 +830,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1160,12 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv4i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,13 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1208,12 +883,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv8i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1231,13 +900,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1256,12 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv16i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1279,13 +935,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1304,12 +953,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv32i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1327,13 +970,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1352,12 +988,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv64i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1375,13 +1005,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1400,12 +1023,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv1i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1423,13 +1040,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1448,12 +1058,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv2i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1471,13 +1075,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1496,12 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv4i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1519,13 +1110,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1544,12 +1128,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv8i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1567,13 +1145,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1592,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv16i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1615,13 +1180,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1640,12 +1198,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv32i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1663,13 +1215,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1688,12 +1233,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv1i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1711,13 +1250,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1736,12 +1268,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv2i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1759,13 +1285,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1784,12 +1303,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv4i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1807,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1832,12 +1338,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv8i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1855,13 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1880,12 +1373,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv16i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1903,13 +1390,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1928,12 +1408,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv1i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1964,13 +1438,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -2002,12 +1469,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv2i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2038,13 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2076,12 +1530,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv4i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2112,13 +1560,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2150,12 +1591,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv8i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2186,13 +1621,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc.ll b/llvm/test/CodeGen/RISCV/rvv/vadc.ll index 6c7b81450f501..711893787819b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadc.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vadc.nxv1i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv2i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -52,13 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv4i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -76,13 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -100,13 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv16i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -124,13 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv32i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -148,13 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv64i8.nxv64i8( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -172,13 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv1i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -196,13 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv2i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -220,13 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -244,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv8i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -268,13 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv16i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -292,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv32i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -316,13 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv1i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -340,13 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -364,13 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv4i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -388,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv8i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -412,13 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv16i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -436,13 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -460,13 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv2i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -484,13 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv4i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -508,13 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv8i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -532,13 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -556,13 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -580,13 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -604,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -628,13 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -652,13 +463,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -676,13 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv64i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -700,13 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -724,13 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -748,13 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -772,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -796,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -820,13 +582,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv32i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -844,13 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -868,13 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -892,13 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -916,13 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -940,13 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv16i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -964,13 +684,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1000,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1036,13 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1072,13 +771,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv8i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp-mask.ll index 33eaee89d77c0..7800d9309bfa2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp-mask.ll @@ -4,9 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK - -declare @llvm.vp.add.nxv2i1(, , , i32) - define @vadd_vv_nxv2i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i1: ; CHECK: # %bb.0: @@ -17,8 +14,6 @@ define @vadd_vv_nxv2i1( %va, %v } -declare @llvm.vp.add.nxv4i1(, , , i32) - define @vadd_vv_nxv4i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i1: ; CHECK: # %bb.0: @@ -29,8 +24,6 @@ define @vadd_vv_nxv4i1( %va, %v } -declare @llvm.vp.add.nxv8i1(, , , i32) - define @vadd_vv_nxv8i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i1: ; CHECK: # %bb.0: @@ -41,8 +34,6 @@ define @vadd_vv_nxv8i1( %va, %v } -declare @llvm.vp.add.nxv16i1(, , , i32) - define @vadd_vv_nxv16i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv16i1: ; CHECK: # %bb.0: @@ -53,8 +44,6 @@ define @vadd_vv_nxv16i1( %va, %v } -declare @llvm.vp.add.nxv32i1(, , , i32) - define @vadd_vv_nxv32i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv32i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll index 946c0bbd7ff6f..c64b755051898 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.add.nxv8i7(, , , i32) - define @vadd_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv8i7: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define @vadd_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.add.nxv1i8(, , , i32) - define @vadd_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv1i8: ; CHECK: # %bb.0: @@ -96,8 +92,6 @@ define @vadd_vi_nxv1i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.add.nxv2i8(, , , i32) - define @vadd_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i8: ; CHECK: # %bb.0: @@ -162,8 +156,6 @@ define @vadd_vi_nxv2i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.add.nxv3i8(, , , i32) - define @vadd_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv3i8: ; CHECK: # %bb.0: @@ -228,8 +220,6 @@ define @vadd_vi_nxv3i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.add.nxv4i8(, , , i32) - define @vadd_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i8: ; CHECK: # %bb.0: @@ -294,8 +284,6 @@ define @vadd_vi_nxv4i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.add.nxv8i8(, , , i32) - define @vadd_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i8: ; CHECK: # %bb.0: @@ -360,8 +348,6 @@ define @vadd_vi_nxv8i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.add.nxv16i8(, , , i32) - define @vadd_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv16i8: ; CHECK: # %bb.0: @@ -426,8 +412,6 @@ define @vadd_vi_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv32i8(, , , i32) - define @vadd_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv32i8: ; CHECK: # %bb.0: @@ -492,8 +476,6 @@ define @vadd_vi_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv64i8(, , , i32) - define @vadd_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv64i8: ; CHECK: # %bb.0: @@ -560,8 +542,6 @@ define @vadd_vi_nxv64i8_unmasked( %va, i32 ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.add.nxv128i8(, , , i32) - define @vadd_vi_nxv128i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv128i8: ; CHECK: # %bb.0: @@ -610,8 +590,6 @@ define @vadd_vi_nxv128i8_unmasked( %va, i ret %v } -declare @llvm.vp.add.nxv1i16(, , , i32) - define @vadd_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv1i16: ; CHECK: # %bb.0: @@ -676,8 +654,6 @@ define @vadd_vi_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv2i16(, , , i32) - define @vadd_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i16: ; CHECK: # %bb.0: @@ -742,8 +718,6 @@ define @vadd_vi_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv4i16(, , , i32) - define @vadd_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i16: ; CHECK: # %bb.0: @@ -808,8 +782,6 @@ define @vadd_vi_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv8i16(, , , i32) - define @vadd_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i16: ; CHECK: # %bb.0: @@ -874,8 +846,6 @@ define @vadd_vi_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv16i16(, , , i32) - define @vadd_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv16i16: ; CHECK: # %bb.0: @@ -940,8 +910,6 @@ define @vadd_vi_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.add.nxv32i16(, , , i32) - define @vadd_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1006,8 +974,6 @@ define @vadd_vi_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.add.nxv1i32(, , , i32) - define @vadd_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1072,8 +1038,6 @@ define @vadd_vi_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv2i32(, , , i32) - define @vadd_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1138,8 +1102,6 @@ define @vadd_vi_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv4i32(, , , i32) - define @vadd_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1204,8 +1166,6 @@ define @vadd_vi_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv8i32(, , , i32) - define @vadd_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1270,8 +1230,6 @@ define @vadd_vi_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv16i32(, , , i32) - define @vadd_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1338,8 +1296,6 @@ define @vadd_vi_nxv16i32_unmasked( %va, i ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.add.nxv32i32(, , , i32) - define @vadd_vi_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv32i32: ; CHECK: # %bb.0: @@ -1391,8 +1347,6 @@ define @vadd_vi_nxv32i32_unmasked( %va, i ; Test splitting when the %evl is a constant (albeit an unknown one). -declare i32 @llvm.vscale.i32() - define @vadd_vi_nxv32i32_evl_nx8( %va, %m) { ; RV32-LABEL: vadd_vi_nxv32i32_evl_nx8: ; RV32: # %bb.0: @@ -1454,8 +1408,6 @@ define @vadd_vi_nxv32i32_evl_nx16( %va, < ret %v } -declare @llvm.vp.add.nxv1i64(, , , i32) - define @vadd_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1548,8 +1500,6 @@ define @vadd_vi_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv2i64(, , , i32) - define @vadd_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1642,8 +1592,6 @@ define @vadd_vi_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv4i64(, , , i32) - define @vadd_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1736,8 +1684,6 @@ define @vadd_vi_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv8i64(, , , i32) - define @vadd_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd.ll b/llvm/test/CodeGen/RISCV/rvv/vadd.ll index bdc62a974f098..8d0259a426d04 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vadd.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -327,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -349,13 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -373,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -395,13 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -419,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -441,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -465,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -487,13 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -511,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -533,13 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -557,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -579,13 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -604,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -650,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -672,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -696,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,13 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -742,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -764,13 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -788,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -810,13 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -835,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -857,13 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -881,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -903,13 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -927,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -949,13 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -973,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -995,13 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1020,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1042,13 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1066,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1088,13 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1112,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1134,13 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1180,13 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1226,13 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1250,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1272,13 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1296,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1318,13 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1342,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1364,13 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1388,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1410,13 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1434,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1456,13 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1480,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1502,13 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1526,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1548,13 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1572,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1594,13 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1618,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1640,13 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1664,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1686,13 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1710,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1732,13 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1756,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1778,13 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1802,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1824,13 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1848,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vadd_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vadd_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1925,13 +1399,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1961,12 +1428,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vadd_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vadd_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1995,13 +1456,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2031,12 +1485,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vadd_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vadd_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2065,13 +1513,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1542,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vadd_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vadd_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,13 +1570,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll b/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll index 9d394a1ee3ff7..9dc0006b7a736 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkned \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vaesdf.vv.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdf_vv_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdf_vv_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdf.vv.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdf_vv_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdf_vv_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdf.vv.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdf_vv_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdf_vv_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdf.vs.nxv4i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdf_vs_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdf_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdf.vs.nxv8i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdf_vs_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdf_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdf.vs.nxv16i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdf_vs_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdf_vs_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll b/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll index f21bdcac032f7..e94f3b102a093 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkned \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vaesdm.vv.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdm_vv_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdm_vv_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdm.vv.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdm_vv_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdm_vv_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdm.vv.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdm_vv_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdm_vv_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdm.vs.nxv4i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdm_vs_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdm_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdm.vs.nxv8i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdm_vs_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdm_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdm.vs.nxv16i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdm_vs_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdm_vs_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesef.ll b/llvm/test/CodeGen/RISCV/rvv/vaesef.ll index ee11786583d7f..fff4d3a19753e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaesef.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaesef.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkned \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vaesef.vv.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesef_vv_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesef_vv_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vaesef.vv.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesef_vv_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesef_vv_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vaesef.vv.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesef_vv_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesef_vv_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vaesef.vs.nxv4i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesef_vs_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesef_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vaesef.vs.nxv8i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesef_vs_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesef_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vaesef.vs.nxv16i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesef_vs_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesef_vs_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesem.ll b/llvm/test/CodeGen/RISCV/rvv/vaesem.ll index 65486e119842b..bb310fe61ce34 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaesem.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaesem.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkned \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vaesem.vv.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesem_vv_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesem_vv_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vaesem.vv.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesem_vv_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesem_vv_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vaesem.vv.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesem_vv_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesem_vv_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vaesem.vs.nxv4i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesem_vs_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesem_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vaesem.vs.nxv8i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesem_vs_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesem_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vaesem.vs.nxv16i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesem_vs_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesem_vs_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll b/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll index 94eb803169ce9..5ea5b5eb4f601 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkned \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vaeskf1.nxv4i32.i32( - , - , - iXLen, - iXLen) - define @intrinsic_vaeskf1_vi_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -26,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vaeskf1.nxv8i32.i32( - , - , - iXLen, - iXLen) - define @intrinsic_vaeskf1_vi_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -48,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vaeskf1.nxv16i32.i32( - , - , - iXLen, - iXLen) - define @intrinsic_vaeskf1_vi_nxv16i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv16i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll b/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll index 5abe0821d2299..08f22b58b9f59 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkned \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vaeskf2.nxv4i32.i32( - , - , - iXLen, - iXLen, - iXLen) - define @intrinsic_vaeskf2_vi_nxv4i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vaeskf2.nxv8i32.i32( - , - , - iXLen, - iXLen, - iXLen) - define @intrinsic_vaeskf2_vi_nxv8i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -52,13 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vaeskf2.nxv16i32.i32( - , - , - iXLen, - iXLen, - iXLen) - define @intrinsic_vaeskf2_vi_nxv16i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv16i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesz.ll b/llvm/test/CodeGen/RISCV/rvv/vaesz.ll index 2453119ce92d3..6a0b9f52c2b4b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaesz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaesz.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkned \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vaesz.vs.nxv4i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesz_vs_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesz_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vaesz.vs.nxv8i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesz_vs_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesz_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vaesz.vs.nxv16i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesz_vs_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesz_vs_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll index 4866bb06f19ec..eaa272f425086 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.and.nxv8i7(, , , i32) - define @vand_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv8i7: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define @vand_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.and.nxv1i8(, , , i32) - define @vand_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv1i8: ; CHECK: # %bb.0: @@ -84,8 +80,6 @@ define @vand_vi_nxv1i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.and.nxv2i8(, , , i32) - define @vand_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv2i8: ; CHECK: # %bb.0: @@ -150,8 +144,6 @@ define @vand_vi_nxv2i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.and.nxv4i8(, , , i32) - define @vand_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv4i8: ; CHECK: # %bb.0: @@ -216,8 +208,6 @@ define @vand_vi_nxv4i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.and.nxv8i8(, , , i32) - define @vand_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv8i8: ; CHECK: # %bb.0: @@ -282,8 +272,6 @@ define @vand_vi_nxv8i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.and.nxv16i8(, , , i32) - define @vand_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv16i8: ; CHECK: # %bb.0: @@ -348,8 +336,6 @@ define @vand_vi_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv32i8(, , , i32) - define @vand_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv32i8: ; CHECK: # %bb.0: @@ -414,8 +400,6 @@ define @vand_vi_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv64i8(, , , i32) - define @vand_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv64i8: ; CHECK: # %bb.0: @@ -480,8 +464,6 @@ define @vand_vi_nxv64i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv1i16(, , , i32) - define @vand_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv1i16: ; CHECK: # %bb.0: @@ -546,8 +528,6 @@ define @vand_vi_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv2i16(, , , i32) - define @vand_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv2i16: ; CHECK: # %bb.0: @@ -612,8 +592,6 @@ define @vand_vi_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv4i16(, , , i32) - define @vand_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv4i16: ; CHECK: # %bb.0: @@ -678,8 +656,6 @@ define @vand_vi_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv8i16(, , , i32) - define @vand_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv8i16: ; CHECK: # %bb.0: @@ -744,8 +720,6 @@ define @vand_vi_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv14i16(, , , i32) - define @vand_vv_nxv14i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv14i16: ; CHECK: # %bb.0: @@ -810,8 +784,6 @@ define @vand_vi_nxv14i16_unmasked( %va, i ret %v } -declare @llvm.vp.and.nxv16i16(, , , i32) - define @vand_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv16i16: ; CHECK: # %bb.0: @@ -876,8 +848,6 @@ define @vand_vi_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.and.nxv32i16(, , , i32) - define @vand_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv32i16: ; CHECK: # %bb.0: @@ -954,8 +924,6 @@ define @vand_vi_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.and.nxv1i32(, , , i32) - define @vand_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1020,8 +988,6 @@ define @vand_vi_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv2i32(, , , i32) - define @vand_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1086,8 +1052,6 @@ define @vand_vi_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv4i32(, , , i32) - define @vand_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1152,8 +1116,6 @@ define @vand_vi_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv8i32(, , , i32) - define @vand_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1218,8 +1180,6 @@ define @vand_vi_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv16i32(, , , i32) - define @vand_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1284,8 +1244,6 @@ define @vand_vi_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.and.nxv1i64(, , , i32) - define @vand_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1378,8 +1336,6 @@ define @vand_vi_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv2i64(, , , i32) - define @vand_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1472,8 +1428,6 @@ define @vand_vi_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv4i64(, , , i32) - define @vand_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1566,8 +1520,6 @@ define @vand_vi_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv8i64(, , , i32) - define @vand_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vand.ll b/llvm/test/CodeGen/RISCV/rvv/vand.ll index fafc25e2a5819..d1c8714a6abdc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vand.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vand.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vand_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vand_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vand_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vand_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vand_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vand_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vand_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vand_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vand_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vand_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vand_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vand_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vand_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vand_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vand_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vand_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vand_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vand_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vand_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vand_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vand_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vand_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll index f295bd8d74df3..fe477d8a6f8f3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll @@ -2234,8 +2234,6 @@ identity: ret %x } -declare i64 @llvm.vscale.i64() - define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) { ; CHECK-RV32-LABEL: vand_vx_loop_hoisted_not: ; CHECK-RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll index 5d29b266546f5..c08e3d695691b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll @@ -4,9 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB32 ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB64 -declare @llvm.vp.and.nxv1i8(, , , i32) -declare @llvm.vp.xor.nxv1i8(, , , i32) - define @vandn_vv_vp_nxv1i8( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv1i8: ; CHECK: # %bb.0: @@ -63,9 +60,6 @@ define @vandn_vx_vp_nxv1i8(i8 %a, %b, %x } -declare @llvm.vp.and.nxv2i8(, , , i32) -declare @llvm.vp.xor.nxv2i8(, , , i32) - define @vandn_vv_vp_nxv2i8( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv2i8: ; CHECK: # %bb.0: @@ -122,9 +116,6 @@ define @vandn_vx_vp_nxv2i8(i8 %a, %b, %x } -declare @llvm.vp.and.nxv4i8(, , , i32) -declare @llvm.vp.xor.nxv4i8(, , , i32) - define @vandn_vv_vp_nxv4i8( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv4i8: ; CHECK: # %bb.0: @@ -181,9 +172,6 @@ define @vandn_vx_vp_nxv4i8(i8 %a, %b, %x } -declare @llvm.vp.and.nxv8i8(, , , i32) -declare @llvm.vp.xor.nxv8i8(, , , i32) - define @vandn_vv_vp_nxv8i8( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv8i8: ; CHECK: # %bb.0: @@ -240,9 +228,6 @@ define @vandn_vx_vp_nxv8i8(i8 %a, %b, %x } -declare @llvm.vp.and.nxv16i8(, , , i32) -declare @llvm.vp.xor.nxv16i8(, , , i32) - define @vandn_vv_vp_nxv16i8( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv16i8: ; CHECK: # %bb.0: @@ -299,9 +284,6 @@ define @vandn_vx_vp_nxv16i8(i8 %a, %b, %x } -declare @llvm.vp.and.nxv32i8(, , , i32) -declare @llvm.vp.xor.nxv32i8(, , , i32) - define @vandn_vv_vp_nxv32i8( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv32i8: ; CHECK: # %bb.0: @@ -358,9 +340,6 @@ define @vandn_vx_vp_nxv32i8(i8 %a, %b, %x } -declare @llvm.vp.and.nxv64i8(, , , i32) -declare @llvm.vp.xor.nxv64i8(, , , i32) - define @vandn_vv_vp_nxv64i8( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv64i8: ; CHECK: # %bb.0: @@ -417,9 +396,6 @@ define @vandn_vx_vp_nxv64i8(i8 %a, %b, %x } -declare @llvm.vp.and.nxv1i16(, , , i32) -declare @llvm.vp.xor.nxv1i16(, , , i32) - define @vandn_vv_vp_nxv1i16( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv1i16: ; CHECK: # %bb.0: @@ -476,9 +452,6 @@ define @vandn_vx_vp_nxv1i16(i16 %a, %b, %x } -declare @llvm.vp.and.nxv2i16(, , , i32) -declare @llvm.vp.xor.nxv2i16(, , , i32) - define @vandn_vv_vp_nxv2i16( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv2i16: ; CHECK: # %bb.0: @@ -535,9 +508,6 @@ define @vandn_vx_vp_nxv2i16(i16 %a, %b, %x } -declare @llvm.vp.and.nxv4i16(, , , i32) -declare @llvm.vp.xor.nxv4i16(, , , i32) - define @vandn_vv_vp_nxv4i16( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv4i16: ; CHECK: # %bb.0: @@ -594,9 +564,6 @@ define @vandn_vx_vp_nxv4i16(i16 %a, %b, %x } -declare @llvm.vp.and.nxv8i16(, , , i32) -declare @llvm.vp.xor.nxv8i16(, , , i32) - define @vandn_vv_vp_nxv8i16( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv8i16: ; CHECK: # %bb.0: @@ -653,9 +620,6 @@ define @vandn_vx_vp_nxv8i16(i16 %a, %b, %x } -declare @llvm.vp.and.nxv16i16(, , , i32) -declare @llvm.vp.xor.nxv16i16(, , , i32) - define @vandn_vv_vp_nxv16i16( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv16i16: ; CHECK: # %bb.0: @@ -712,9 +676,6 @@ define @vandn_vx_vp_nxv16i16(i16 %a, %b, ret %x } -declare @llvm.vp.and.nxv32i16(, , , i32) -declare @llvm.vp.xor.nxv32i16(, , , i32) - define @vandn_vv_vp_nxv32i16( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv32i16: ; CHECK: # %bb.0: @@ -771,9 +732,6 @@ define @vandn_vx_vp_nxv32i16(i16 %a, %b, ret %x } -declare @llvm.vp.and.nxv1i32(, , , i32) -declare @llvm.vp.xor.nxv1i32(, , , i32) - define @vandn_vv_vp_nxv1i32( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv1i32: ; CHECK: # %bb.0: @@ -830,9 +788,6 @@ define @vandn_vx_vp_nxv1i32(i32 %a, %b, %x } -declare @llvm.vp.and.nxv2i32(, , , i32) -declare @llvm.vp.xor.nxv2i32(, , , i32) - define @vandn_vv_vp_nxv2i32( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv2i32: ; CHECK: # %bb.0: @@ -889,9 +844,6 @@ define @vandn_vx_vp_nxv2i32(i32 %a, %b, %x } -declare @llvm.vp.and.nxv4i32(, , , i32) -declare @llvm.vp.xor.nxv4i32(, , , i32) - define @vandn_vv_vp_nxv4i32( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv4i32: ; CHECK: # %bb.0: @@ -948,9 +900,6 @@ define @vandn_vx_vp_nxv4i32(i32 %a, %b, %x } -declare @llvm.vp.and.nxv8i32(, , , i32) -declare @llvm.vp.xor.nxv8i32(, , , i32) - define @vandn_vv_vp_nxv8i32( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv8i32: ; CHECK: # %bb.0: @@ -1007,9 +956,6 @@ define @vandn_vx_vp_nxv8i32(i32 %a, %b, %x } -declare @llvm.vp.and.nxv16i32(, , , i32) -declare @llvm.vp.xor.nxv16i32(, , , i32) - define @vandn_vv_vp_nxv16i32( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv16i32: ; CHECK: # %bb.0: @@ -1066,9 +1012,6 @@ define @vandn_vx_vp_nxv16i32(i32 %a, %b, ret %x } -declare @llvm.vp.and.nxv1i64(, , , i32) -declare @llvm.vp.xor.nxv1i64(, , , i32) - define @vandn_vv_vp_nxv1i64( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv1i64: ; CHECK: # %bb.0: @@ -1157,9 +1100,6 @@ define @vandn_vx_vp_nxv1i64(i64 %a, %b, %x } -declare @llvm.vp.and.nxv2i64(, , , i32) -declare @llvm.vp.xor.nxv2i64(, , , i32) - define @vandn_vv_vp_nxv2i64( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv2i64: ; CHECK: # %bb.0: @@ -1248,9 +1188,6 @@ define @vandn_vx_vp_nxv2i64(i64 %a, %b, %x } -declare @llvm.vp.and.nxv4i64(, , , i32) -declare @llvm.vp.xor.nxv4i64(, , , i32) - define @vandn_vv_vp_nxv4i64( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv4i64: ; CHECK: # %bb.0: @@ -1339,9 +1276,6 @@ define @vandn_vx_vp_nxv4i64(i64 %a, %b, %x } -declare @llvm.vp.and.nxv8i64(, , , i32) -declare @llvm.vp.xor.nxv8i64(, , , i32) - define @vandn_vv_vp_nxv8i64( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn.ll b/llvm/test/CodeGen/RISCV/rvv/vandn.ll index b346207a5339d..88a51658e4ef3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vandn.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vandn.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vandn.nxv1i8.nxv1i8( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv2i8.nxv2i8( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv4i8.nxv4i8( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv8i8.nxv8i8( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv16i8.nxv16i8( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv32i8.nxv32i8( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv64i8.nxv64i8( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv1i16.nxv1i16( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv2i16.nxv2i16( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv4i16.nxv4i16( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv8i16.nxv8i16( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv16i16.nxv16i16( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv32i16.nxv32i16( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv1i32.nxv1i32( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv2i32.nxv2i32( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv4i32.nxv4i32( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv8i32.nxv8i32( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv16i32.nxv16i32( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv1i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vandn_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv2i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vandn_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv4i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vandn_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv8i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vandn_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv16i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vandn_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv32i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vandn_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv64i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vandn_vx_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv1i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vandn_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv2i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vandn_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv4i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vandn_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv8i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vandn_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv16i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vandn_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv32i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vandn_vx_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv1i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vandn_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv2i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vandn_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv4i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vandn_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv8i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vandn_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv16i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vandn_vx_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vandn_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vandn_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vandn_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vandn_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vandn_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vandn_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vandn_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vandn_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vandn_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vandn_vx_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vandn_vx_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vandn_mask_vx_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub.ll b/llvm/test/CodeGen/RISCV/rvv/vasub.ll index 1dfba884d9404..2b7f8dbd34cf1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vasub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vasub.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv2i8.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv4i8.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,13 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -171,13 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -196,12 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv16i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,13 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -244,12 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv32i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -267,13 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -292,12 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv64i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -315,13 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -341,12 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv1i16.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -364,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -389,12 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv2i16.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,13 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -437,12 +320,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv4i16.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -460,13 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -485,12 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv8i16.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -508,13 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -533,12 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv16i16.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -556,13 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -581,12 +425,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv32i16.nxv32i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -604,13 +442,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -630,12 +461,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv1i32.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -653,13 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -678,12 +496,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv2i32.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -701,13 +513,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -726,12 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv4i32.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -749,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -774,12 +566,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv8i32.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -797,13 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -822,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv16i32.nxv16i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -845,13 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -871,12 +637,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv1i64.nxv1i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -894,13 +654,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -919,12 +672,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv2i64.nxv2i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -942,13 +689,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -967,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv4i64.nxv4i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -990,13 +724,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1015,12 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv8i64.nxv8i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1038,13 +759,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1064,12 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv1i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1087,13 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1112,12 +813,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv2i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1135,13 +830,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1160,12 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv4i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,13 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1208,12 +883,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv8i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1231,13 +900,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1256,12 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv16i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1279,13 +935,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1304,12 +953,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv32i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1327,13 +970,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1352,12 +988,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv64i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1375,13 +1005,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1400,12 +1023,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv1i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1423,13 +1040,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1448,12 +1058,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv2i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1471,13 +1075,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1496,12 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv4i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1519,13 +1110,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1544,12 +1128,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv8i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1567,13 +1145,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1592,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv16i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1615,13 +1180,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1640,12 +1198,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv32i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1663,13 +1215,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1688,12 +1233,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv1i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1711,13 +1250,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1736,12 +1268,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv2i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1759,13 +1285,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1784,12 +1303,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv4i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1807,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1832,12 +1338,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv8i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1855,13 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1880,12 +1373,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv16i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1903,13 +1390,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1928,12 +1408,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv1i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasub_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1964,13 +1438,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -2002,12 +1469,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv2i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasub_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2038,13 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2076,12 +1530,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv4i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasub_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2112,13 +1560,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2150,12 +1591,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv8i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasub_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2186,13 +1621,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu.ll index 24fa668f7955e..c96a467bb425a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vasubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vasubu.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv2i8.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv4i8.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,13 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -171,13 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -196,12 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv16i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,13 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -244,12 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv32i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -267,13 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -292,12 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv64i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -315,13 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -341,12 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv1i16.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -364,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -389,12 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv2i16.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,13 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -437,12 +320,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv4i16.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -460,13 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -485,12 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv8i16.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -508,13 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -533,12 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv16i16.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -556,13 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -581,12 +425,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv32i16.nxv32i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -604,13 +442,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -630,12 +461,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv1i32.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -653,13 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -678,12 +496,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv2i32.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -701,13 +513,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -726,12 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv4i32.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -749,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -774,12 +566,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv8i32.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -797,13 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -822,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv16i32.nxv16i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -845,13 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -871,12 +637,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv1i64.nxv1i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -894,13 +654,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -919,12 +672,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv2i64.nxv2i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -942,13 +689,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -967,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv4i64.nxv4i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -990,13 +724,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1015,12 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv8i64.nxv8i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1038,13 +759,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1064,12 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv1i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1087,13 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1112,12 +813,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv2i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1135,13 +830,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1160,12 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv4i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,13 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1208,12 +883,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv8i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1231,13 +900,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1256,12 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv16i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1279,13 +935,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1304,12 +953,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv32i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1327,13 +970,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1352,12 +988,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv64i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1375,13 +1005,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1400,12 +1023,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv1i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1423,13 +1040,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1448,12 +1058,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv2i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1471,13 +1075,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1496,12 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv4i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1519,13 +1110,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1544,12 +1128,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv8i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1567,13 +1145,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1592,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv16i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1615,13 +1180,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1640,12 +1198,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv32i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1663,13 +1215,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1688,12 +1233,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv1i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1711,13 +1250,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1736,12 +1268,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv2i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1759,13 +1285,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1784,12 +1303,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv4i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1807,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1832,12 +1338,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv8i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1855,13 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1880,12 +1373,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv16i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1903,13 +1390,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1928,12 +1408,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv1i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1964,13 +1438,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -2002,12 +1469,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv2i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2038,13 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2076,12 +1530,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv4i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2112,13 +1560,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2150,12 +1591,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv8i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2186,13 +1621,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vbrev.ll b/llvm/test/CodeGen/RISCV/rvv/vbrev.ll index d8a98945e1192..57ba17ea78e99 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vbrev.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vbrev.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vbrev.nxv1i8( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv2i8( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv4i8( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv8i8( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv16i8( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv32i8( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv64i8( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv64i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv64i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv1i16( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv2i16( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv4i16( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv8i16( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv16i16( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv32i16( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv32i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv1i32( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv2i32( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -649,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv4i32( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -669,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -692,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv8i32( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -735,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv16i32( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -755,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv16i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -778,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv1i64( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -798,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -821,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv2i64( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -841,13 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv4i64( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -884,13 +639,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -907,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv8i64( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -927,13 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv8i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll b/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll index 77ee4b1ac14a8..0edcf2417cd74 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vbrev8.nxv1i8( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv2i8( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv4i8( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv8i8( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv16i8( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv32i8( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv64i8( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv64i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv64i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv1i16( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv2i16( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv4i16( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv8i16( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv16i16( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv32i16( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv32i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv1i32( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv2i32( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -649,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv4i32( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -669,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -692,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv8i32( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -735,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv16i32( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -755,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv16i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -778,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv1i64( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -798,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -821,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv2i64( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -841,13 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv4i64( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -884,13 +639,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -907,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv8i64( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -927,13 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv8i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vclmul.ll b/llvm/test/CodeGen/RISCV/rvv/vclmul.ll index 5452191de30a3..8ea8edf218385 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vclmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vclmul.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbc \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vclmul.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vclmul_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vclmul_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vclmul_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vclmul_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vclmul_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vclmul_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vclmul_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vclmul_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vclmul_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vclmul_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vclmul_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vclmul_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -193,12 +137,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vclmul_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vclmul_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -227,14 +165,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vclmul_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -264,12 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vclmul_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vclmul_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -298,14 +222,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vclmul_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -335,12 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vclmul_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vclmul_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -369,14 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vclmul_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -406,12 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vclmul_vx_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vclmul_vx_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -440,14 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vclmul_mask_vx_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll b/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll index b1acb6d1ca3d9..e5b09cc067c5b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbc \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vclmulh.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vclmulh_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vclmulh_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vclmulh_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vclmulh_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vclmulh_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vclmulh_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vclmulh_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vclmulh_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vclmulh_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -193,12 +137,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vclmulh_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vclmulh_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -227,14 +165,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vclmulh_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -264,12 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vclmulh_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vclmulh_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -298,14 +222,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vclmulh_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -335,12 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vclmulh_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vclmulh_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -369,14 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vclmulh_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -406,12 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vclmulh_vx_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vclmulh_vx_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -440,14 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vclmulh_mask_vx_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vclz.ll b/llvm/test/CodeGen/RISCV/rvv/vclz.ll index 8e651fb3aa201..cea7523a450c9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vclz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vclz.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vclz.nxv1i8( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv2i8( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv4i8( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv8i8( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv16i8( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv32i8( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv64i8( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv64i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv64i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv1i16( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv2i16( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv4i16( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv8i16( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv16i16( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv32i16( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv32i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv1i32( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv2i32( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -649,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv4i32( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -669,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -692,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv8i32( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -735,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv16i32( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -755,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv16i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -778,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv1i64( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -798,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -821,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv2i64( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -841,13 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv4i64( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -884,13 +639,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -907,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv8i64( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -927,13 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv8i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vcompress.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress.ll index 5ee82e6d95d4d..faf3a8eac0aac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vcompress.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcompress.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vcompress.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -48,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -70,12 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -92,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -114,12 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -136,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -158,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -180,12 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -202,12 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -224,12 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -246,12 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -268,12 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -290,12 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -312,12 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -334,12 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -356,12 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -378,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -400,12 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -422,12 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -444,12 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -466,12 +340,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -488,12 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -510,12 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -532,12 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -554,12 +404,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -576,12 +420,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -598,12 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -620,12 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -642,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv2f32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -664,12 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -686,12 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -708,12 +516,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -730,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv1f64( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -752,12 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv2f64( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -774,12 +564,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -796,12 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -818,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv1bf16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -840,12 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv2bf16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -862,12 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv4bf16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -884,12 +644,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv8bf16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -906,12 +660,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv16bf16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -928,12 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv32bf16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll index 2f5fde3bb3b20..b6ebe3ff2556e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll @@ -372,8 +372,6 @@ define @vfsgnj_vv_nxv32bf16_unmasked( %v } -declare @llvm.vp.copysign.nxv1f16(, , , i32) - define @vfsgnj_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsgnj_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -434,8 +432,6 @@ define @vfsgnj_vv_nxv1f16_unmasked( %va, ret %v } -declare @llvm.vp.copysign.nxv2f16(, , , i32) - define @vfsgnj_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsgnj_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -496,8 +492,6 @@ define @vfsgnj_vv_nxv2f16_unmasked( %va, ret %v } -declare @llvm.vp.copysign.nxv4f16(, , , i32) - define @vfsgnj_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsgnj_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -558,8 +552,6 @@ define @vfsgnj_vv_nxv4f16_unmasked( %va, ret %v } -declare @llvm.vp.copysign.nxv8f16(, , , i32) - define @vfsgnj_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsgnj_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -620,8 +612,6 @@ define @vfsgnj_vv_nxv8f16_unmasked( %va, ret %v } -declare @llvm.vp.copysign.nxv16f16(, , , i32) - define @vfsgnj_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsgnj_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -682,8 +672,6 @@ define @vfsgnj_vv_nxv16f16_unmasked( %v ret %v } -declare @llvm.vp.copysign.nxv32f16(, , , i32) - define @vfsgnj_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsgnj_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -744,8 +732,6 @@ define @vfsgnj_vv_nxv32f16_unmasked( %v ret %v } -declare @llvm.vp.copysign.nxv1f32(, , , i32) - define @vfsgnj_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv1f32: ; CHECK: # %bb.0: @@ -766,8 +752,6 @@ define @vfsgnj_vv_nxv1f32_unmasked( %va ret %v } -declare @llvm.vp.copysign.nxv2f32(, , , i32) - define @vfsgnj_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv2f32: ; CHECK: # %bb.0: @@ -788,8 +772,6 @@ define @vfsgnj_vv_nxv2f32_unmasked( %va ret %v } -declare @llvm.vp.copysign.nxv4f32(, , , i32) - define @vfsgnj_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv4f32: ; CHECK: # %bb.0: @@ -810,8 +792,6 @@ define @vfsgnj_vv_nxv4f32_unmasked( %va ret %v } -declare @llvm.vp.copysign.nxv8f32(, , , i32) - define @vfsgnj_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv8f32: ; CHECK: # %bb.0: @@ -832,8 +812,6 @@ define @vfsgnj_vv_nxv8f32_unmasked( %va ret %v } -declare @llvm.vp.copysign.nxv16f32(, , , i32) - define @vfsgnj_vv_nxv16f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv16f32: ; CHECK: # %bb.0: @@ -854,8 +832,6 @@ define @vfsgnj_vv_nxv16f32_unmasked( ret %v } -declare @llvm.vp.copysign.nxv1f64(, , , i32) - define @vfsgnj_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv1f64: ; CHECK: # %bb.0: @@ -876,8 +852,6 @@ define @vfsgnj_vv_nxv1f64_unmasked( % ret %v } -declare @llvm.vp.copysign.nxv2f64(, , , i32) - define @vfsgnj_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv2f64: ; CHECK: # %bb.0: @@ -898,8 +872,6 @@ define @vfsgnj_vv_nxv2f64_unmasked( % ret %v } -declare @llvm.vp.copysign.nxv4f64(, , , i32) - define @vfsgnj_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv4f64: ; CHECK: # %bb.0: @@ -920,8 +892,6 @@ define @vfsgnj_vv_nxv4f64_unmasked( % ret %v } -declare @llvm.vp.copysign.nxv8f64(, , , i32) - define @vfsgnj_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll index 6b35e4767b239..6b85cd3e33054 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll @@ -4,10 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare iXLen @llvm.riscv.vcpop.iXLen.nxv1i1( - , - iXLen); - define iXLen @intrinsic_vcpop_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -35,11 +31,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv1i1( - , - , - iXLen); - define iXLen @intrinsic_vcpop_mask_m_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -71,10 +62,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.iXLen.nxv2i1( - , - iXLen); - define iXLen @intrinsic_vcpop_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -89,11 +76,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv2i1( - , - , - iXLen); - define iXLen @intrinsic_vcpop_mask_m_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -111,10 +93,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.iXLen.nxv4i1( - , - iXLen); - define iXLen @intrinsic_vcpop_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -129,11 +107,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv4i1( - , - , - iXLen); - define iXLen @intrinsic_vcpop_mask_m_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -151,10 +124,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.iXLen.nxv8i1( - , - iXLen); - define iXLen @intrinsic_vcpop_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -169,11 +138,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv8i1( - , - , - iXLen); - define iXLen @intrinsic_vcpop_mask_m_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -191,10 +155,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.iXLen.nxv16i1( - , - iXLen); - define iXLen @intrinsic_vcpop_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -209,11 +169,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv16i1( - , - , - iXLen); - define iXLen @intrinsic_vcpop_mask_m_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -231,10 +186,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.iXLen.nxv32i1( - , - iXLen); - define iXLen @intrinsic_vcpop_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -249,11 +200,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv32i1( - , - , - iXLen); - define iXLen @intrinsic_vcpop_mask_m_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -271,10 +217,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.iXLen.nxv64i1( - , - iXLen); - define iXLen @intrinsic_vcpop_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -289,11 +231,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv64i1( - , - , - iXLen); - define iXLen @intrinsic_vcpop_mask_m_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpopv.ll b/llvm/test/CodeGen/RISCV/rvv/vcpopv.ll index 0429bcd93c1b7..6c37679836e28 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vcpopv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcpopv.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vcpopv.nxv1i8( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv2i8( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv4i8( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv8i8( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv16i8( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv32i8( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv64i8( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv64i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv64i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv1i16( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv2i16( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv4i16( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv8i16( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv16i16( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv32i16( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv32i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv1i32( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv2i32( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -649,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv4i32( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -669,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -692,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv8i32( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -735,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv16i32( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -755,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv16i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -778,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv1i64( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -798,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -821,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv2i64( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -841,13 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv4i64( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -884,13 +639,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -907,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv8i64( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -927,13 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv8i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vctz.ll b/llvm/test/CodeGen/RISCV/rvv/vctz.ll index 67cd5d5430e38..e1b4915fa7dd4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vctz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vctz.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vctz.nxv1i8( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv2i8( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv4i8( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv8i8( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv16i8( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv32i8( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv64i8( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv64i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv64i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv1i16( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv2i16( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv4i16( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv8i16( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv16i16( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv32i16( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv32i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv1i32( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv2i32( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -649,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv4i32( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -669,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -692,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv8i32( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -735,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv16i32( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -755,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv16i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -778,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv1i64( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -798,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -821,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv2i64( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -841,13 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv4i64( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -884,13 +639,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -907,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv8i64( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -927,13 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv8i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll index 03e4e1f445bee..e2b26ce9d1810 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.sdiv.nxv8i7(, , , i32) - define @vdiv_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv8i7: ; CHECK: # %bb.0: @@ -23,8 +21,6 @@ define @vdiv_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.sdiv.nxv1i8(, , , i32) - define @vdiv_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv1i8: ; CHECK: # %bb.0: @@ -69,8 +65,6 @@ define @vdiv_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sdiv.nxv2i8(, , , i32) - define @vdiv_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv2i8: ; CHECK: # %bb.0: @@ -115,8 +109,6 @@ define @vdiv_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sdiv.nxv3i8(, , , i32) - define @vdiv_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv3i8: ; CHECK: # %bb.0: @@ -127,8 +119,6 @@ define @vdiv_vv_nxv3i8( %va, %v } -declare @llvm.vp.sdiv.nxv4i8(, , , i32) - define @vdiv_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv4i8: ; CHECK: # %bb.0: @@ -173,8 +163,6 @@ define @vdiv_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sdiv.nxv8i8(, , , i32) - define @vdiv_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv8i8: ; CHECK: # %bb.0: @@ -219,8 +207,6 @@ define @vdiv_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sdiv.nxv16i8(, , , i32) - define @vdiv_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv16i8: ; CHECK: # %bb.0: @@ -265,8 +251,6 @@ define @vdiv_vx_nxv16i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.sdiv.nxv32i8(, , , i32) - define @vdiv_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv32i8: ; CHECK: # %bb.0: @@ -311,8 +295,6 @@ define @vdiv_vx_nxv32i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.sdiv.nxv64i8(, , , i32) - define @vdiv_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv64i8: ; CHECK: # %bb.0: @@ -357,8 +339,6 @@ define @vdiv_vx_nxv64i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.sdiv.nxv1i16(, , , i32) - define @vdiv_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv1i16: ; CHECK: # %bb.0: @@ -403,8 +383,6 @@ define @vdiv_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.sdiv.nxv2i16(, , , i32) - define @vdiv_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv2i16: ; CHECK: # %bb.0: @@ -449,8 +427,6 @@ define @vdiv_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.sdiv.nxv4i16(, , , i32) - define @vdiv_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv4i16: ; CHECK: # %bb.0: @@ -495,8 +471,6 @@ define @vdiv_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.sdiv.nxv8i16(, , , i32) - define @vdiv_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv8i16: ; CHECK: # %bb.0: @@ -541,8 +515,6 @@ define @vdiv_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.sdiv.nxv16i16(, , , i32) - define @vdiv_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv16i16: ; CHECK: # %bb.0: @@ -587,8 +559,6 @@ define @vdiv_vx_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.sdiv.nxv32i16(, , , i32) - define @vdiv_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv32i16: ; CHECK: # %bb.0: @@ -633,8 +603,6 @@ define @vdiv_vx_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.sdiv.nxv1i32(, , , i32) - define @vdiv_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv1i32: ; CHECK: # %bb.0: @@ -679,8 +647,6 @@ define @vdiv_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sdiv.nxv2i32(, , , i32) - define @vdiv_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv2i32: ; CHECK: # %bb.0: @@ -725,8 +691,6 @@ define @vdiv_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sdiv.nxv4i32(, , , i32) - define @vdiv_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv4i32: ; CHECK: # %bb.0: @@ -771,8 +735,6 @@ define @vdiv_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sdiv.nxv8i32(, , , i32) - define @vdiv_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv8i32: ; CHECK: # %bb.0: @@ -817,8 +779,6 @@ define @vdiv_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sdiv.nxv16i32(, , , i32) - define @vdiv_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv16i32: ; CHECK: # %bb.0: @@ -863,8 +823,6 @@ define @vdiv_vx_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.sdiv.nxv1i64(, , , i32) - define @vdiv_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv1i64: ; CHECK: # %bb.0: @@ -937,8 +895,6 @@ define @vdiv_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.sdiv.nxv2i64(, , , i32) - define @vdiv_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1011,8 +967,6 @@ define @vdiv_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.sdiv.nxv4i64(, , , i32) - define @vdiv_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1085,8 +1039,6 @@ define @vdiv_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.sdiv.nxv8i64(, , , i32) - define @vdiv_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv.ll index 122ebe50704ab..b2a7f27cb23bf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdiv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll index 2f35f91d77a4e..de278dc2e748d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.udiv.nxv8i7(, , , i32) - define @vdivu_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv8i7: ; CHECK: # %bb.0: @@ -22,8 +20,6 @@ define @vdivu_vx_nxv8i7( %a, i7 signext %b, < ret %v } -declare @llvm.vp.udiv.nxv1i8(, , , i32) - define @vdivu_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv1i8: ; CHECK: # %bb.0: @@ -68,8 +64,6 @@ define @vdivu_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.udiv.nxv2i8(, , , i32) - define @vdivu_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv2i8: ; CHECK: # %bb.0: @@ -114,8 +108,6 @@ define @vdivu_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.udiv.nxv3i8(, , , i32) - define @vdivu_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv3i8: ; CHECK: # %bb.0: @@ -126,8 +118,6 @@ define @vdivu_vv_nxv3i8( %va, %v } -declare @llvm.vp.udiv.nxv4i8(, , , i32) - define @vdivu_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv4i8: ; CHECK: # %bb.0: @@ -172,8 +162,6 @@ define @vdivu_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.udiv.nxv8i8(, , , i32) - define @vdivu_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv8i8: ; CHECK: # %bb.0: @@ -218,8 +206,6 @@ define @vdivu_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.udiv.nxv16i8(, , , i32) - define @vdivu_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv16i8: ; CHECK: # %bb.0: @@ -264,8 +250,6 @@ define @vdivu_vx_nxv16i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.udiv.nxv32i8(, , , i32) - define @vdivu_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv32i8: ; CHECK: # %bb.0: @@ -310,8 +294,6 @@ define @vdivu_vx_nxv32i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.udiv.nxv64i8(, , , i32) - define @vdivu_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv64i8: ; CHECK: # %bb.0: @@ -356,8 +338,6 @@ define @vdivu_vx_nxv64i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.udiv.nxv1i16(, , , i32) - define @vdivu_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv1i16: ; CHECK: # %bb.0: @@ -402,8 +382,6 @@ define @vdivu_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.udiv.nxv2i16(, , , i32) - define @vdivu_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv2i16: ; CHECK: # %bb.0: @@ -448,8 +426,6 @@ define @vdivu_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.udiv.nxv4i16(, , , i32) - define @vdivu_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv4i16: ; CHECK: # %bb.0: @@ -494,8 +470,6 @@ define @vdivu_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.udiv.nxv8i16(, , , i32) - define @vdivu_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv8i16: ; CHECK: # %bb.0: @@ -540,8 +514,6 @@ define @vdivu_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.udiv.nxv16i16(, , , i32) - define @vdivu_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv16i16: ; CHECK: # %bb.0: @@ -586,8 +558,6 @@ define @vdivu_vx_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.udiv.nxv32i16(, , , i32) - define @vdivu_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv32i16: ; CHECK: # %bb.0: @@ -632,8 +602,6 @@ define @vdivu_vx_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.udiv.nxv1i32(, , , i32) - define @vdivu_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv1i32: ; CHECK: # %bb.0: @@ -678,8 +646,6 @@ define @vdivu_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.udiv.nxv2i32(, , , i32) - define @vdivu_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv2i32: ; CHECK: # %bb.0: @@ -724,8 +690,6 @@ define @vdivu_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.udiv.nxv4i32(, , , i32) - define @vdivu_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv4i32: ; CHECK: # %bb.0: @@ -770,8 +734,6 @@ define @vdivu_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.udiv.nxv8i32(, , , i32) - define @vdivu_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv8i32: ; CHECK: # %bb.0: @@ -816,8 +778,6 @@ define @vdivu_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.udiv.nxv16i32(, , , i32) - define @vdivu_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv16i32: ; CHECK: # %bb.0: @@ -862,8 +822,6 @@ define @vdivu_vx_nxv16i32_unmasked( %va, ret %v } -declare @llvm.vp.udiv.nxv1i64(, , , i32) - define @vdivu_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv1i64: ; CHECK: # %bb.0: @@ -936,8 +894,6 @@ define @vdivu_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.udiv.nxv2i64(, , , i32) - define @vdivu_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1010,8 +966,6 @@ define @vdivu_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.udiv.nxv4i64(, , , i32) - define @vdivu_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1084,8 +1038,6 @@ define @vdivu_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.udiv.nxv8i64(, , , i32) - define @vdivu_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu.ll index af05f09293546..847738f0dc140 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdivu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll b/llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll index 1df4076aa2069..a3d634f1d7591 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll @@ -322,15 +322,3 @@ define double @extract_last_double_scalable( %data, , <16 x i1>, i8) -declare i16 @llvm.experimental.vector.extract.last.active.v8i16(<8 x i16>, <8 x i1>, i16) -declare i32 @llvm.experimental.vector.extract.last.active.v4i32(<4 x i32>, <4 x i1>, i32) -declare i64 @llvm.experimental.vector.extract.last.active.v2i64(<2 x i64>, <2 x i1>, i64) -declare float @llvm.experimental.vector.extract.last.active.v4f32(<4 x float>, <4 x i1>, float) -declare double @llvm.experimental.vector.extract.last.active.v2f64(<2 x double>, <2 x i1>, double) -declare i8 @llvm.experimental.vector.extract.last.active.nxv16i8(, , i8) -declare i16 @llvm.experimental.vector.extract.last.active.nxv8i16(, , i16) -declare i32 @llvm.experimental.vector.extract.last.active.nxv4i32(, , i32) -declare i64 @llvm.experimental.vector.extract.last.active.nxv2i64(, , i64) -declare float @llvm.experimental.vector.extract.last.active.nxv4f32(, , float) -declare double @llvm.experimental.vector.extract.last.active.nxv2f64(, , double) diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll index d132be96775ac..abe8e173b636f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll @@ -1,31 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc -mtriple=riscv32 -mattr='+v' -O3 %s -o - | FileCheck %s -declare @llvm.riscv.vadd.nxv1i8.nxv1i8( - , - , - , - i32) - -declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( - , - , - , - , - i32, i32) - -declare @llvm.riscv.vsub.nxv1i8.nxv1i8( - , - , - , - i32) - -declare @llvm.riscv.vmul.nxv1i8.nxv1i8( - , - , - , - i32) - define @simple_vadd_vv( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: simple_vadd_vv: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll index 677a0aa712b5d..e3f43cd904198 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll @@ -10,8 +10,6 @@ ; Tests assume VLEN=128 or vscale_range_min=2. -declare @llvm.vector.splice.nxv1i1(, , i32) - define @splice_nxv1i1_offset_negone( %a, %b) #0 { ; NOVLDEP-LABEL: splice_nxv1i1_offset_negone: ; NOVLDEP: # %bb.0: @@ -96,8 +94,6 @@ define @splice_nxv1i1_offset_max( %a, %res } -declare @llvm.vector.splice.nxv2i1(, , i32) - define @splice_nxv2i1_offset_negone( %a, %b) #0 { ; NOVLDEP-LABEL: splice_nxv2i1_offset_negone: ; NOVLDEP: # %bb.0: @@ -182,8 +178,6 @@ define @splice_nxv2i1_offset_max( %a, %res } -declare @llvm.vector.splice.nxv4i1(, , i32) - define @splice_nxv4i1_offset_negone( %a, %b) #0 { ; NOVLDEP-LABEL: splice_nxv4i1_offset_negone: ; NOVLDEP: # %bb.0: @@ -268,8 +262,6 @@ define @splice_nxv4i1_offset_max( %a, %res } -declare @llvm.vector.splice.nxv8i1(, , i32) - define @splice_nxv8i1_offset_negone( %a, %b) #0 { ; NOVLDEP-LABEL: splice_nxv8i1_offset_negone: ; NOVLDEP: # %bb.0: @@ -350,8 +342,6 @@ define @splice_nxv8i1_offset_max( %a, %res } -declare @llvm.vector.splice.nxv16i1(, , i32) - define @splice_nxv16i1_offset_negone( %a, %b) #0 { ; NOVLDEP-LABEL: splice_nxv16i1_offset_negone: ; NOVLDEP: # %bb.0: @@ -436,8 +426,6 @@ define @splice_nxv16i1_offset_max( %a, %res } -declare @llvm.vector.splice.nxv32i1(, , i32) - define @splice_nxv32i1_offset_negone( %a, %b) #0 { ; NOVLDEP-LABEL: splice_nxv32i1_offset_negone: ; NOVLDEP: # %bb.0: @@ -522,8 +510,6 @@ define @splice_nxv32i1_offset_max( %a, %res } -declare @llvm.vector.splice.nxv64i1(, , i32) - define @splice_nxv64i1_offset_negone( %a, %b) #0 { ; NOVLDEP-LABEL: splice_nxv64i1_offset_negone: ; NOVLDEP: # %bb.0: @@ -608,8 +594,6 @@ define @splice_nxv64i1_offset_max( %a, %res } -declare @llvm.vector.splice.nxv1i8(, , i32) - define @splice_nxv1i8_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1i8_offset_zero: ; CHECK: # %bb.0: @@ -693,8 +677,6 @@ define @splice_nxv1i8_offset_max( %a, %res } -declare @llvm.vector.splice.nxv2i8(, , i32) - define @splice_nxv2i8_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i8_offset_zero: ; CHECK: # %bb.0: @@ -778,8 +760,6 @@ define @splice_nxv2i8_offset_max( %a, %res } -declare @llvm.vector.splice.nxv4i8(, , i32) - define @splice_nxv4i8_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i8_offset_zero: ; CHECK: # %bb.0: @@ -863,8 +843,6 @@ define @splice_nxv4i8_offset_max( %a, %res } -declare @llvm.vector.splice.nxv8i8(, , i32) - define @splice_nxv8i8_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i8_offset_zero: ; CHECK: # %bb.0: @@ -942,8 +920,6 @@ define @splice_nxv8i8_offset_max( %a, %res } -declare @llvm.vector.splice.nxv16i8(, , i32) - define @splice_nxv16i8_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i8_offset_zero: ; CHECK: # %bb.0: @@ -1029,8 +1005,6 @@ define @splice_nxv16i8_offset_max( %a, %res } -declare @llvm.vector.splice.nxv32i8(, , i32) - define @splice_nxv32i8_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv32i8_offset_zero: ; CHECK: # %bb.0: @@ -1118,8 +1092,6 @@ define @splice_nxv32i8_offset_max( %a, %res } -declare @llvm.vector.splice.nxv64i8(, , i32) - define @splice_nxv64i8_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv64i8_offset_zero: ; CHECK: # %bb.0: @@ -1207,8 +1179,6 @@ define @splice_nxv64i8_offset_max( %a, %res } -declare @llvm.vector.splice.nxv1i16(, , i32) - define @splice_nxv1i16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1i16_offset_zero: ; CHECK: # %bb.0: @@ -1292,8 +1262,6 @@ define @splice_nxv1i16_offset_max( %a, %res } -declare @llvm.vector.splice.nxv2i16(, , i32) - define @splice_nxv2i16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i16_offset_zero: ; CHECK: # %bb.0: @@ -1377,8 +1345,6 @@ define @splice_nxv2i16_offset_max( %a, %res } -declare @llvm.vector.splice.nxv4i16(, , i32) - define @splice_nxv4i16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i16_offset_zero: ; CHECK: # %bb.0: @@ -1462,8 +1428,6 @@ define @splice_nxv4i16_offset_max( %a, %res } -declare @llvm.vector.splice.nxv8i16(, , i32) - define @splice_nxv8i16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i16_offset_zero: ; CHECK: # %bb.0: @@ -1541,8 +1505,6 @@ define @splice_nxv8i16_offset_max( %a, %res } -declare @llvm.vector.splice.nxv16i16(, , i32) - define @splice_nxv16i16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i16_offset_zero: ; CHECK: # %bb.0: @@ -1628,8 +1590,6 @@ define @splice_nxv16i16_offset_max( %a, < ret %res } -declare @llvm.vector.splice.nxv32i16(, , i32) - define @splice_nxv32i16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv32i16_offset_zero: ; CHECK: # %bb.0: @@ -1717,8 +1677,6 @@ define @splice_nxv32i16_offset_max( %a, < ret %res } -declare @llvm.vector.splice.nxv1i32(, , i32) - define @splice_nxv1i32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1i32_offset_zero: ; CHECK: # %bb.0: @@ -1802,8 +1760,6 @@ define @splice_nxv1i32_offset_max( %a, %res } -declare @llvm.vector.splice.nxv2i32(, , i32) - define @splice_nxv2i32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i32_offset_zero: ; CHECK: # %bb.0: @@ -1887,8 +1843,6 @@ define @splice_nxv2i32_offset_max( %a, %res } -declare @llvm.vector.splice.nxv4i32(, , i32) - define @splice_nxv4i32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i32_offset_zero: ; CHECK: # %bb.0: @@ -1972,8 +1926,6 @@ define @splice_nxv4i32_offset_max( %a, %res } -declare @llvm.vector.splice.nxv8i32(, , i32) - define @splice_nxv8i32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i32_offset_zero: ; CHECK: # %bb.0: @@ -2051,8 +2003,6 @@ define @splice_nxv8i32_offset_max( %a, %res } -declare @llvm.vector.splice.nxv16i32(, , i32) - define @splice_nxv16i32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i32_offset_zero: ; CHECK: # %bb.0: @@ -2138,8 +2088,6 @@ define @splice_nxv16i32_offset_max( %a, < ret %res } -declare @llvm.vector.splice.nxv1i64(, , i32) - define @splice_nxv1i64_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1i64_offset_zero: ; CHECK: # %bb.0: @@ -2223,8 +2171,6 @@ define @splice_nxv1i64_offset_max( %a, %res } -declare @llvm.vector.splice.nxv2i64(, , i32) - define @splice_nxv2i64_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i64_offset_zero: ; CHECK: # %bb.0: @@ -2308,8 +2254,6 @@ define @splice_nxv2i64_offset_max( %a, %res } -declare @llvm.vector.splice.nxv4i64(, , i32) - define @splice_nxv4i64_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i64_offset_zero: ; CHECK: # %bb.0: @@ -2393,8 +2337,6 @@ define @splice_nxv4i64_offset_max( %a, %res } -declare @llvm.vector.splice.nxv8i64(, , i32) - define @splice_nxv8i64_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i64_offset_zero: ; CHECK: # %bb.0: @@ -2472,8 +2414,6 @@ define @splice_nxv8i64_offset_max( %a, %res } -declare @llvm.vector.splice.nxv1bf16(, , i32) - define @splice_nxv1bf16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1bf16_offset_zero: ; CHECK: # %bb.0: @@ -2557,8 +2497,6 @@ define @splice_nxv1bf16_offset_max( % ret %res } -declare @llvm.vector.splice.nxv2bf16(, , i32) - define @splice_nxv2bf16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2bf16_offset_zero: ; CHECK: # %bb.0: @@ -2642,8 +2580,6 @@ define @splice_nxv2bf16_offset_max( % ret %res } -declare @llvm.vector.splice.nxv4bf16(, , i32) - define @splice_nxv4bf16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4bf16_offset_zero: ; CHECK: # %bb.0: @@ -2727,8 +2663,6 @@ define @splice_nxv4bf16_offset_max( % ret %res } -declare @llvm.vector.splice.nxv8bf16(, , i32) - define @splice_nxv8bf16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8bf16_offset_zero: ; CHECK: # %bb.0: @@ -2806,8 +2740,6 @@ define @splice_nxv8bf16_offset_max( % ret %res } -declare @llvm.vector.splice.nxv16bf16(, , i32) - define @splice_nxv16bf16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16bf16_offset_zero: ; CHECK: # %bb.0: @@ -2893,8 +2825,6 @@ define @splice_nxv16bf16_offset_max( %res } -declare @llvm.vector.splice.nxv32bf16(, , i32) - define @splice_nxv32bf16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv32bf16_offset_zero: ; CHECK: # %bb.0: @@ -2982,8 +2912,6 @@ define @splice_nxv32bf16_offset_max( %res } -declare @llvm.vector.splice.nxv1f16(, , i32) - define @splice_nxv1f16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1f16_offset_zero: ; CHECK: # %bb.0: @@ -3067,8 +2995,6 @@ define @splice_nxv1f16_offset_max( %a, %res } -declare @llvm.vector.splice.nxv2f16(, , i32) - define @splice_nxv2f16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2f16_offset_zero: ; CHECK: # %bb.0: @@ -3152,8 +3078,6 @@ define @splice_nxv2f16_offset_max( %a, %res } -declare @llvm.vector.splice.nxv4f16(, , i32) - define @splice_nxv4f16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4f16_offset_zero: ; CHECK: # %bb.0: @@ -3237,8 +3161,6 @@ define @splice_nxv4f16_offset_max( %a, %res } -declare @llvm.vector.splice.nxv8f16(, , i32) - define @splice_nxv8f16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8f16_offset_zero: ; CHECK: # %bb.0: @@ -3316,8 +3238,6 @@ define @splice_nxv8f16_offset_max( %a, %res } -declare @llvm.vector.splice.nxv16f16(, , i32) - define @splice_nxv16f16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16f16_offset_zero: ; CHECK: # %bb.0: @@ -3403,8 +3323,6 @@ define @splice_nxv16f16_offset_max( %a, ret %res } -declare @llvm.vector.splice.nxv32f16(, , i32) - define @splice_nxv32f16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv32f16_offset_zero: ; CHECK: # %bb.0: @@ -3492,8 +3410,6 @@ define @splice_nxv32f16_offset_max( %a, ret %res } -declare @llvm.vector.splice.nxv1f32(, , i32) - define @splice_nxv1f32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1f32_offset_zero: ; CHECK: # %bb.0: @@ -3577,8 +3493,6 @@ define @splice_nxv1f32_offset_max( %a, ret %res } -declare @llvm.vector.splice.nxv2f32(, , i32) - define @splice_nxv2f32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2f32_offset_zero: ; CHECK: # %bb.0: @@ -3662,8 +3576,6 @@ define @splice_nxv2f32_offset_max( %a, ret %res } -declare @llvm.vector.splice.nxv4f32(, , i32) - define @splice_nxv4f32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4f32_offset_zero: ; CHECK: # %bb.0: @@ -3747,8 +3659,6 @@ define @splice_nxv4f32_offset_max( %a, ret %res } -declare @llvm.vector.splice.nxv8f32(, , i32) - define @splice_nxv8f32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8f32_offset_zero: ; CHECK: # %bb.0: @@ -3826,8 +3736,6 @@ define @splice_nxv8f32_offset_max( %a, ret %res } -declare @llvm.vector.splice.nxv16f32(, , i32) - define @splice_nxv16f32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16f32_offset_zero: ; CHECK: # %bb.0: @@ -3913,8 +3821,6 @@ define @splice_nxv16f32_offset_max( % ret %res } -declare @llvm.vector.splice.nxv1f64(, , i32) - define @splice_nxv1f64_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1f64_offset_zero: ; CHECK: # %bb.0: @@ -3998,8 +3904,6 @@ define @splice_nxv1f64_offset_max( %a ret %res } -declare @llvm.vector.splice.nxv2f64(, , i32) - define @splice_nxv2f64_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2f64_offset_zero: ; CHECK: # %bb.0: @@ -4083,8 +3987,6 @@ define @splice_nxv2f64_offset_max( %a ret %res } -declare @llvm.vector.splice.nxv4f64(, , i32) - define @splice_nxv4f64_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4f64_offset_zero: ; CHECK: # %bb.0: @@ -4168,8 +4070,6 @@ define @splice_nxv4f64_offset_max( %a ret %res } -declare @llvm.vector.splice.nxv8f64(, , i32) - define @splice_nxv8f64_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8f64_offset_zero: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-tuple-align.ll b/llvm/test/CodeGen/RISCV/rvv/vector-tuple-align.ll index 37a50e1539982..831912fb61fb5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-tuple-align.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-tuple-align.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel -target-abi=lp64 < %s | FileCheck %s -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) - define target("riscv.vector.tuple", , 2) @test_vlseg_nxv8i8(ptr %p, i64 %vl) { ; CHECK-LABEL: name: test_vlseg_nxv8i8 ; CHECK: bb.0.entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll index 28426ad018b83..a7874d1f519fd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll @@ -174,8 +174,6 @@ define @nxv32bf16( %v) { ret %r } -declare @llvm.fabs.nxv1f16() - define @vfabs_nxv1f16( %v) { ; ZVFH-LABEL: vfabs_nxv1f16: ; ZVFH: # %bb.0: @@ -202,8 +200,6 @@ define @vfabs_nxv1f16( %v) { ret %r } -declare @llvm.fabs.nxv2f16() - define @vfabs_nxv2f16( %v) { ; ZVFH-LABEL: vfabs_nxv2f16: ; ZVFH: # %bb.0: @@ -230,8 +226,6 @@ define @vfabs_nxv2f16( %v) { ret %r } -declare @llvm.fabs.nxv4f16() - define @vfabs_nxv4f16( %v) { ; ZVFH-LABEL: vfabs_nxv4f16: ; ZVFH: # %bb.0: @@ -258,8 +252,6 @@ define @vfabs_nxv4f16( %v) { ret %r } -declare @llvm.fabs.nxv8f16() - define @vfabs_nxv8f16( %v) { ; ZVFH-LABEL: vfabs_nxv8f16: ; ZVFH: # %bb.0: @@ -286,8 +278,6 @@ define @vfabs_nxv8f16( %v) { ret %r } -declare @llvm.fabs.nxv16f16() - define @vfabs_nxv16f16( %v) { ; ZVFH-LABEL: vfabs_nxv16f16: ; ZVFH: # %bb.0: @@ -314,8 +304,6 @@ define @vfabs_nxv16f16( %v) { ret %r } -declare @llvm.fabs.nxv32f16() - define @vfabs_nxv32f16( %v) { ; ZVFH-LABEL: vfabs_nxv32f16: ; ZVFH: # %bb.0: @@ -342,8 +330,6 @@ define @vfabs_nxv32f16( %v) { ret %r } -declare @llvm.fabs.nxv1f32() - define @vfabs_nxv1f32( %v) { ; CHECK-LABEL: vfabs_nxv1f32: ; CHECK: # %bb.0: @@ -354,8 +340,6 @@ define @vfabs_nxv1f32( %v) { ret %r } -declare @llvm.fabs.nxv2f32() - define @vfabs_nxv2f32( %v) { ; CHECK-LABEL: vfabs_nxv2f32: ; CHECK: # %bb.0: @@ -366,8 +350,6 @@ define @vfabs_nxv2f32( %v) { ret %r } -declare @llvm.fabs.nxv4f32() - define @vfabs_nxv4f32( %v) { ; CHECK-LABEL: vfabs_nxv4f32: ; CHECK: # %bb.0: @@ -378,8 +360,6 @@ define @vfabs_nxv4f32( %v) { ret %r } -declare @llvm.fabs.nxv8f32() - define @vfabs_nxv8f32( %v) { ; CHECK-LABEL: vfabs_nxv8f32: ; CHECK: # %bb.0: @@ -390,8 +370,6 @@ define @vfabs_nxv8f32( %v) { ret %r } -declare @llvm.fabs.nxv16f32() - define @vfabs_nxv16f32( %v) { ; CHECK-LABEL: vfabs_nxv16f32: ; CHECK: # %bb.0: @@ -402,8 +380,6 @@ define @vfabs_nxv16f32( %v) { ret %r } -declare @llvm.fabs.nxv1f64() - define @vfabs_nxv1f64( %v) { ; CHECK-LABEL: vfabs_nxv1f64: ; CHECK: # %bb.0: @@ -414,8 +390,6 @@ define @vfabs_nxv1f64( %v) { ret %r } -declare @llvm.fabs.nxv2f64() - define @vfabs_nxv2f64( %v) { ; CHECK-LABEL: vfabs_nxv2f64: ; CHECK: # %bb.0: @@ -426,8 +400,6 @@ define @vfabs_nxv2f64( %v) { ret %r } -declare @llvm.fabs.nxv4f64() - define @vfabs_nxv4f64( %v) { ; CHECK-LABEL: vfabs_nxv4f64: ; CHECK: # %bb.0: @@ -438,8 +410,6 @@ define @vfabs_nxv4f64( %v) { ret %r } -declare @llvm.fabs.nxv8f64() - define @vfabs_nxv8f64( %v) { ; CHECK-LABEL: vfabs_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll index c6888c0bcae0f..e0fcd4009ad2e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll @@ -324,8 +324,6 @@ define @vfabs_vv_nxv32bf16_unmasked( %v } -declare @llvm.vp.fabs.nxv1f16(, , i32) - define @vfabs_vv_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -378,8 +376,6 @@ define @vfabs_vv_nxv1f16_unmasked( %va, i ret %v } -declare @llvm.vp.fabs.nxv2f16(, , i32) - define @vfabs_vv_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -432,8 +428,6 @@ define @vfabs_vv_nxv2f16_unmasked( %va, i ret %v } -declare @llvm.vp.fabs.nxv4f16(, , i32) - define @vfabs_vv_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -486,8 +480,6 @@ define @vfabs_vv_nxv4f16_unmasked( %va, i ret %v } -declare @llvm.vp.fabs.nxv8f16(, , i32) - define @vfabs_vv_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -540,8 +532,6 @@ define @vfabs_vv_nxv8f16_unmasked( %va, i ret %v } -declare @llvm.vp.fabs.nxv16f16(, , i32) - define @vfabs_vv_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -594,8 +584,6 @@ define @vfabs_vv_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.fabs.nxv32f16(, , i32) - define @vfabs_vv_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -648,8 +636,6 @@ define @vfabs_vv_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.fabs.nxv1f32(, , i32) - define @vfabs_vv_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv1f32: ; CHECK: # %bb.0: @@ -670,8 +656,6 @@ define @vfabs_vv_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.fabs.nxv2f32(, , i32) - define @vfabs_vv_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv2f32: ; CHECK: # %bb.0: @@ -692,8 +676,6 @@ define @vfabs_vv_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.fabs.nxv4f32(, , i32) - define @vfabs_vv_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv4f32: ; CHECK: # %bb.0: @@ -714,8 +696,6 @@ define @vfabs_vv_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.fabs.nxv8f32(, , i32) - define @vfabs_vv_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv8f32: ; CHECK: # %bb.0: @@ -736,8 +716,6 @@ define @vfabs_vv_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.fabs.nxv16f32(, , i32) - define @vfabs_vv_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv16f32: ; CHECK: # %bb.0: @@ -758,8 +736,6 @@ define @vfabs_vv_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.fabs.nxv1f64(, , i32) - define @vfabs_vv_nxv1f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv1f64: ; CHECK: # %bb.0: @@ -780,8 +756,6 @@ define @vfabs_vv_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.fabs.nxv2f64(, , i32) - define @vfabs_vv_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv2f64: ; CHECK: # %bb.0: @@ -802,8 +776,6 @@ define @vfabs_vv_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.fabs.nxv4f64(, , i32) - define @vfabs_vv_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv4f64: ; CHECK: # %bb.0: @@ -824,8 +796,6 @@ define @vfabs_vv_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.fabs.nxv7f64(, , i32) - define @vfabs_vv_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv7f64: ; CHECK: # %bb.0: @@ -846,8 +816,6 @@ define @vfabs_vv_nxv7f64_unmasked( %v ret %v } -declare @llvm.vp.fabs.nxv8f64(, , i32) - define @vfabs_vv_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv8f64: ; CHECK: # %bb.0: @@ -869,7 +837,6 @@ define @vfabs_vv_nxv8f64_unmasked( %v } ; Test splitting. -declare @llvm.vp.fabs.nxv16f64(, , i32) define @vfabs_vv_nxv16f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll index db1b081258d5f..0130af7d9e507 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv32bf16.nxv32bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -305,12 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -329,13 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -355,12 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -379,13 +282,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -405,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -429,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -455,12 +338,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -479,13 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -505,12 +375,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -529,13 +393,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -555,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv32bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -579,13 +430,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll index 03ef641364335..8d10b21fc3e3f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll @@ -267,7 +267,6 @@ define @vfadd_vf_nxv32bf16( %va, bf ret %vc } -declare @llvm.experimental.constrained.fadd.nxv1f16(, , metadata, metadata) define @vfadd_vv_nxv1f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfadd_vv_nxv1f16: ; ZVFH: # %bb.0: # %entry @@ -315,7 +314,6 @@ define @vfadd_vf_nxv1f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fadd.nxv2f16(, , metadata, metadata) define @vfadd_vv_nxv2f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfadd_vv_nxv2f16: ; ZVFH: # %bb.0: # %entry @@ -363,7 +361,6 @@ define @vfadd_vf_nxv2f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fadd.nxv4f16(, , metadata, metadata) define @vfadd_vv_nxv4f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfadd_vv_nxv4f16: ; ZVFH: # %bb.0: # %entry @@ -411,7 +408,6 @@ define @vfadd_vf_nxv4f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fadd.nxv8f16(, , metadata, metadata) define @vfadd_vv_nxv8f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfadd_vv_nxv8f16: ; ZVFH: # %bb.0: # %entry @@ -459,7 +455,6 @@ define @vfadd_vf_nxv8f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fadd.nxv16f16(, , metadata, metadata) define @vfadd_vv_nxv16f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfadd_vv_nxv16f16: ; ZVFH: # %bb.0: # %entry @@ -507,7 +502,6 @@ define @vfadd_vf_nxv16f16( %va, half %b ret %vc } -declare @llvm.experimental.constrained.fadd.nxv32f16(, , metadata, metadata) define @vfadd_vv_nxv32f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfadd_vv_nxv32f16: ; ZVFH: # %bb.0: # %entry @@ -600,7 +594,6 @@ define @vfadd_vf_nxv32f16( %va, half %b ret %vc } -declare @llvm.experimental.constrained.fadd.nxv1f32(, , metadata, metadata) define @vfadd_vv_nxv1f32( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -624,7 +617,6 @@ define @vfadd_vf_nxv1f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fadd.nxv2f32(, , metadata, metadata) define @vfadd_vv_nxv2f32( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -648,7 +640,6 @@ define @vfadd_vf_nxv2f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fadd.nxv4f32(, , metadata, metadata) define @vfadd_vv_nxv4f32( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -672,7 +663,6 @@ define @vfadd_vf_nxv4f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fadd.nxv8f32(, , metadata, metadata) define @vfadd_vv_nxv8f32( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -696,7 +686,6 @@ define @vfadd_vf_nxv8f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fadd.nxv16f32(, , metadata, metadata) define @vfadd_vv_nxv16f32( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -720,7 +709,6 @@ define @vfadd_vf_nxv16f32( %va, float ret %vc } -declare @llvm.experimental.constrained.fadd.nxv1f64(, , metadata, metadata) define @vfadd_vv_nxv1f64( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -744,7 +732,6 @@ define @vfadd_vf_nxv1f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fadd.nxv2f64(, , metadata, metadata) define @vfadd_vv_nxv2f64( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -768,7 +755,6 @@ define @vfadd_vf_nxv2f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fadd.nxv4f64(, , metadata, metadata) define @vfadd_vv_nxv4f64( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -792,7 +778,6 @@ define @vfadd_vf_nxv4f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fadd.nxv8f64(, , metadata, metadata) define @vfadd_vv_nxv8f64( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll index 1ab2209647c80..d03b068e11ea8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll @@ -15,8 +15,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFBFA -declare @llvm.vp.fadd.nxv1bf16(, , , i32) - define @vfadd_vv_nxv1bf16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv1bf16: ; ZVFH: # %bb.0: @@ -267,8 +265,6 @@ define @vfadd_vf_nxv1bf16_unmasked_commute( %v } -declare @llvm.vp.fadd.nxv2bf16(, , , i32) - define @vfadd_vv_nxv2bf16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv2bf16: ; ZVFH: # %bb.0: @@ -431,8 +427,6 @@ define @vfadd_vf_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.fadd.nxv4bf16(, , , i32) - define @vfadd_vv_nxv4bf16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv4bf16: ; ZVFH: # %bb.0: @@ -595,8 +589,6 @@ define @vfadd_vf_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.fadd.nxv8bf16(, , , i32) - define @vfadd_vv_nxv8bf16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv8bf16: ; ZVFH: # %bb.0: @@ -759,8 +751,6 @@ define @vfadd_vf_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.fadd.nxv16bf16(, , , i32) - define @vfadd_vv_nxv16bf16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv16bf16: ; ZVFH: # %bb.0: @@ -923,8 +913,6 @@ define @vfadd_vf_nxv16bf16_unmasked( %v } -declare @llvm.vp.fadd.nxv32bf16(, , , i32) - define @vfadd_vv_nxv32bf16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv32bf16: ; ZVFH: # %bb.0: @@ -1565,7 +1553,6 @@ define @vfadd_vf_nxv32bf16_unmasked( @llvm.vp.fadd.nxv32bf16( %va, %vb, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.fadd.nxv1f16(, , , i32) define @vfadd_vv_nxv1f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv1f16: @@ -1783,8 +1770,6 @@ define @vfadd_vf_nxv1f16_unmasked_commute( %v } -declare @llvm.vp.fadd.nxv2f16(, , , i32) - define @vfadd_vv_nxv2f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -1925,8 +1910,6 @@ define @vfadd_vf_nxv2f16_unmasked( %va, h ret %v } -declare @llvm.vp.fadd.nxv4f16(, , , i32) - define @vfadd_vv_nxv4f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -2067,8 +2050,6 @@ define @vfadd_vf_nxv4f16_unmasked( %va, h ret %v } -declare @llvm.vp.fadd.nxv8f16(, , , i32) - define @vfadd_vv_nxv8f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -2209,8 +2190,6 @@ define @vfadd_vf_nxv8f16_unmasked( %va, h ret %v } -declare @llvm.vp.fadd.nxv16f16(, , , i32) - define @vfadd_vv_nxv16f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -2351,8 +2330,6 @@ define @vfadd_vf_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.fadd.nxv32f16(, , , i32) - define @vfadd_vv_nxv32f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -2819,8 +2796,6 @@ define @vfadd_vf_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.fadd.nxv1f32(, , , i32) - define @vfadd_vv_nxv1f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv1f32: ; CHECK: # %bb.0: @@ -2865,8 +2840,6 @@ define @vfadd_vf_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.fadd.nxv2f32(, , , i32) - define @vfadd_vv_nxv2f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv2f32: ; CHECK: # %bb.0: @@ -2911,8 +2884,6 @@ define @vfadd_vf_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.fadd.nxv4f32(, , , i32) - define @vfadd_vv_nxv4f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv4f32: ; CHECK: # %bb.0: @@ -2957,8 +2928,6 @@ define @vfadd_vf_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.fadd.nxv8f32(, , , i32) - define @vfadd_vv_nxv8f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv8f32: ; CHECK: # %bb.0: @@ -3003,8 +2972,6 @@ define @vfadd_vf_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.fadd.nxv16f32(, , , i32) - define @vfadd_vv_nxv16f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv16f32: ; CHECK: # %bb.0: @@ -3049,8 +3016,6 @@ define @vfadd_vf_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.fadd.nxv1f64(, , , i32) - define @vfadd_vv_nxv1f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv1f64: ; CHECK: # %bb.0: @@ -3095,8 +3060,6 @@ define @vfadd_vf_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.fadd.nxv2f64(, , , i32) - define @vfadd_vv_nxv2f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv2f64: ; CHECK: # %bb.0: @@ -3141,8 +3104,6 @@ define @vfadd_vf_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.fadd.nxv4f64(, , , i32) - define @vfadd_vv_nxv4f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv4f64: ; CHECK: # %bb.0: @@ -3187,8 +3148,6 @@ define @vfadd_vf_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.fadd.nxv7f64(, , , i32) - define @vfadd_vv_nxv7f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv7f64: ; CHECK: # %bb.0: @@ -3199,8 +3158,6 @@ define @vfadd_vv_nxv7f64( %va, %v } -declare @llvm.vp.fadd.nxv8f64(, , , i32) - define @vfadd_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll index 10f7d922efeed..d9e522be048d9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll @@ -14,12 +14,6 @@ ; ZVFMIN: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vfadd -declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -38,13 +32,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -64,12 +51,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -88,13 +69,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -114,12 +88,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -138,13 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -164,12 +125,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -188,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -214,12 +162,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -238,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -264,12 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv32f16.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -288,13 +217,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -315,12 +237,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -339,13 +255,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -365,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -389,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -415,12 +311,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -439,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -465,12 +348,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -489,13 +366,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -515,12 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16f32.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -539,13 +403,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -566,12 +423,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -590,13 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -616,12 +460,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -640,13 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -666,12 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -690,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -716,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f64.nxv8f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -740,13 +552,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -767,12 +572,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -791,13 +590,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -817,12 +609,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -841,13 +627,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -867,12 +646,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -891,13 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -917,12 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -941,13 +701,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -967,12 +720,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -991,13 +738,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -1017,12 +757,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv32f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -1041,13 +775,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv32f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -1067,12 +794,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1091,13 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1117,12 +831,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1141,13 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1167,12 +868,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1191,13 +886,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1217,12 +905,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1241,13 +923,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1267,12 +942,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1291,13 +960,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1317,12 +979,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1341,13 +997,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1367,12 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1391,13 +1034,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1417,12 +1053,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1441,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1467,12 +1090,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1491,13 +1108,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll index d7d49b379b5a4..2af8a8e668ea7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfclass.nxv1i16.nxv1bf16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv1i16_nxv1bf16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv1i16.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv1i16_nxv1bf16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -52,11 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv2i16.nxv2bf16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv2i16_nxv2bf16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv2i16.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv2i16_nxv2bf16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -100,11 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv4i16.nxv4bf16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv4i16_nxv4bf16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -122,12 +95,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv4i16.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv4i16_nxv4bf16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -148,11 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv8i16.nxv8bf16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv8i16_nxv8bf16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -170,12 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv8i16.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv8i16_nxv8bf16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -196,11 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv16i16.nxv16bf16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv16i16_nxv16bf16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -218,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv16i16.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv16i16_nxv16bf16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -244,11 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv32i16.nxv32bf16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv32i16_nxv32bf16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -266,12 +206,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv32i16.nxv32bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv32i16_nxv32bf16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-sdnode.ll index 862a8355d4321..4879449129161 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfclass-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-sdnode.ll @@ -63,7 +63,6 @@ define @isnan_nxv2f32( %x) { ret %1 } - define @isnan_nxv4f32( %x) { ; CHECK-LABEL: isnan_nxv4f32: ; CHECK: # %bb.0: @@ -188,11 +187,3 @@ define @isnotfinite_nxv16f32( %x) { ret %1 } -declare @llvm.is.fpclass.nxv2f16(, i32) -declare @llvm.is.fpclass.nxv2f32(, i32) -declare @llvm.is.fpclass.nxv4f32(, i32) -declare @llvm.is.fpclass.nxv8f32(, i32) -declare @llvm.is.fpclass.nxv16f32(, i32) -declare @llvm.is.fpclass.nxv2f64(, i32) -declare @llvm.is.fpclass.nxv4f64(, i32) -declare @llvm.is.fpclass.nxv8f64(, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll index 36e1bea1f9994..8c1c973c9bb0c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll @@ -218,12 +218,3 @@ define @isneginf_nxv8f64_unmasked( %x, i3 ret %1 } - -declare @llvm.vp.is.fpclass.nxv2f16(, i32, , i32) -declare @llvm.vp.is.fpclass.nxv2f32(, i32, , i32) -declare @llvm.vp.is.fpclass.nxv4f32(, i32, , i32) -declare @llvm.vp.is.fpclass.nxv8f32(, i32, , i32) -declare @llvm.vp.is.fpclass.nxv16f32(, i32, , i32) -declare @llvm.vp.is.fpclass.nxv2f64(, i32, , i32) -declare @llvm.vp.is.fpclass.nxv4f64(, i32, , i32) -declare @llvm.vp.is.fpclass.nxv8f64(, i32, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass.ll index 7017946276be8..e0ef834fb5000 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfclass.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfclass.nxv1i16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -52,11 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv2i16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv2i16_nxv2f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -100,11 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv4i16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv4i16_nxv4f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -122,12 +95,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -148,11 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv8i16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv8i16_nxv8f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -170,12 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -196,11 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv16i16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv16i16_nxv16f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -218,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -244,11 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv32i16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv32i16_nxv32f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -266,12 +206,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv32i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -292,11 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv1i32( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv1i32_nxv1f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -314,12 +243,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -340,11 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv2i32( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv2i32_nxv2f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -362,12 +280,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -388,11 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv4i32( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv4i32_nxv4f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -410,12 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -436,11 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv8i32( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv8i32_nxv8f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -458,12 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -484,11 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv16i32( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv16i32_nxv16f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -506,12 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv16i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -532,11 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv1i64( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv1i64_nxv1f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +428,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv1i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -580,11 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv2i64( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv2i64_nxv2f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -602,12 +465,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv2i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -628,11 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv4i64( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv4i64_nxv4f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -650,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv4i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -676,11 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv8i64( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv8i64_nxv8f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -698,12 +539,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv8i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcmp-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfcmp-constrained-sdnode.ll index ec6ab422d6405..1981c78398b06 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcmp-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcmp-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.experimental.constrained.fcmp.nxv1f16(, , metadata, metadata) define @fcmp_oeq_vv_nxv1f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv1f16: ; CHECK: # %bb.0: @@ -637,7 +636,6 @@ define @fcmp_uno_fv_nxv1f16( %va, half %b) ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv2f16(, , metadata, metadata) define @fcmp_oeq_vv_nxv2f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv2f16: ; CHECK: # %bb.0: @@ -1270,7 +1268,6 @@ define @fcmp_uno_fv_nxv2f16( %va, half %b) ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv4f16(, , metadata, metadata) define @fcmp_oeq_vv_nxv4f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv4f16: ; CHECK: # %bb.0: @@ -1903,7 +1900,6 @@ define @fcmp_uno_fv_nxv4f16( %va, half %b) ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv8f16(, , metadata, metadata) define @fcmp_oeq_vv_nxv8f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv8f16: ; CHECK: # %bb.0: @@ -2578,7 +2574,6 @@ define @fcmp_uno_fv_nxv8f16( %va, half %b) ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv16f16(, , metadata, metadata) define @fcmp_oeq_vv_nxv16f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv16f16: ; CHECK: # %bb.0: @@ -3253,7 +3248,6 @@ define @fcmp_uno_fv_nxv16f16( %va, half % ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv32f16(, , metadata, metadata) define @fcmp_oeq_vv_nxv32f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv32f16: ; CHECK: # %bb.0: @@ -3928,7 +3922,6 @@ define @fcmp_uno_fv_nxv32f16( %va, half % ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv1f32(, , metadata, metadata) define @fcmp_oeq_vv_nxv1f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv1f32: ; CHECK: # %bb.0: @@ -4561,7 +4554,6 @@ define @fcmp_uno_fv_nxv1f32( %va, float %b ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv2f32(, , metadata, metadata) define @fcmp_oeq_vv_nxv2f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv2f32: ; CHECK: # %bb.0: @@ -5194,7 +5186,6 @@ define @fcmp_uno_fv_nxv2f32( %va, float %b ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv4f32(, , metadata, metadata) define @fcmp_oeq_vv_nxv4f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv4f32: ; CHECK: # %bb.0: @@ -5869,7 +5860,6 @@ define @fcmp_uno_fv_nxv4f32( %va, float %b ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv8f32(, , metadata, metadata) define @fcmp_oeq_vv_nxv8f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv8f32: ; CHECK: # %bb.0: @@ -6544,7 +6534,6 @@ define @fcmp_uno_fv_nxv8f32( %va, float %b ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv16f32(, , metadata, metadata) define @fcmp_oeq_vv_nxv16f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv16f32: ; CHECK: # %bb.0: @@ -7219,7 +7208,6 @@ define @fcmp_uno_fv_nxv16f32( %va, float ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv1f64(, , metadata, metadata) define @fcmp_oeq_vv_nxv1f64( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv1f64: ; CHECK: # %bb.0: @@ -7852,7 +7840,6 @@ define @fcmp_uno_fv_nxv1f64( %va, double ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv2f64(, , metadata, metadata) define @fcmp_oeq_vv_nxv2f64( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv2f64: ; CHECK: # %bb.0: @@ -8527,7 +8514,6 @@ define @fcmp_uno_fv_nxv2f64( %va, double ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv4f64(, , metadata, metadata) define @fcmp_oeq_vv_nxv4f64( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv4f64: ; CHECK: # %bb.0: @@ -9202,7 +9188,6 @@ define @fcmp_uno_fv_nxv4f64( %va, double ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv8f64(, , metadata, metadata) define @fcmp_oeq_vv_nxv8f64( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcmps-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfcmps-constrained-sdnode.ll index 2ca9dd24e915a..d33cd5a558630 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcmps-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcmps-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.experimental.constrained.fcmps.nxv1f16(, , metadata, metadata) define @fcmps_oeq_vv_nxv1f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv1f16: ; CHECK: # %bb.0: @@ -536,7 +535,6 @@ define @fcmps_uno_fv_nxv1f16( %va, half %b) ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv2f16(, , metadata, metadata) define @fcmps_oeq_vv_nxv2f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv2f16: ; CHECK: # %bb.0: @@ -1068,7 +1066,6 @@ define @fcmps_uno_fv_nxv2f16( %va, half %b) ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv4f16(, , metadata, metadata) define @fcmps_oeq_vv_nxv4f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv4f16: ; CHECK: # %bb.0: @@ -1600,7 +1597,6 @@ define @fcmps_uno_fv_nxv4f16( %va, half %b) ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv8f16(, , metadata, metadata) define @fcmps_oeq_vv_nxv8f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv8f16: ; CHECK: # %bb.0: @@ -2132,7 +2128,6 @@ define @fcmps_uno_fv_nxv8f16( %va, half %b) ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv16f16(, , metadata, metadata) define @fcmps_oeq_vv_nxv16f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv16f16: ; CHECK: # %bb.0: @@ -2664,7 +2659,6 @@ define @fcmps_uno_fv_nxv16f16( %va, half ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv32f16(, , metadata, metadata) define @fcmps_oeq_vv_nxv32f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv32f16: ; CHECK: # %bb.0: @@ -3196,7 +3190,6 @@ define @fcmps_uno_fv_nxv32f16( %va, half ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv1f32(, , metadata, metadata) define @fcmps_oeq_vv_nxv1f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv1f32: ; CHECK: # %bb.0: @@ -3728,7 +3721,6 @@ define @fcmps_uno_fv_nxv1f32( %va, float % ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv2f32(, , metadata, metadata) define @fcmps_oeq_vv_nxv2f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv2f32: ; CHECK: # %bb.0: @@ -4260,7 +4252,6 @@ define @fcmps_uno_fv_nxv2f32( %va, float % ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv4f32(, , metadata, metadata) define @fcmps_oeq_vv_nxv4f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv4f32: ; CHECK: # %bb.0: @@ -4792,7 +4783,6 @@ define @fcmps_uno_fv_nxv4f32( %va, float % ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv8f32(, , metadata, metadata) define @fcmps_oeq_vv_nxv8f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv8f32: ; CHECK: # %bb.0: @@ -5324,7 +5314,6 @@ define @fcmps_uno_fv_nxv8f32( %va, float % ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv16f32(, , metadata, metadata) define @fcmps_oeq_vv_nxv16f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv16f32: ; CHECK: # %bb.0: @@ -5856,7 +5845,6 @@ define @fcmps_uno_fv_nxv16f32( %va, floa ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv1f64(, , metadata, metadata) define @fcmps_oeq_vv_nxv1f64( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv1f64: ; CHECK: # %bb.0: @@ -6388,7 +6376,6 @@ define @fcmps_uno_fv_nxv1f64( %va, double ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv2f64(, , metadata, metadata) define @fcmps_oeq_vv_nxv2f64( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv2f64: ; CHECK: # %bb.0: @@ -6920,7 +6907,6 @@ define @fcmps_uno_fv_nxv2f64( %va, double ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv4f64(, , metadata, metadata) define @fcmps_oeq_vv_nxv4f64( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv4f64: ; CHECK: # %bb.0: @@ -7452,7 +7438,6 @@ define @fcmps_uno_fv_nxv4f64( %va, double ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv8f64(, , metadata, metadata) define @fcmps_oeq_vv_nxv8f64( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll index bef2e8d3b57fc..3a3fdbe59e6c9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll @@ -198,8 +198,6 @@ define @nxv32bf32( %vm, %r } -declare @llvm.copysign.nxv1f16(, ) - define @vfcopysign_vv_nxv1f16( %vm, %vs) { ; ZVFH-LABEL: vfcopysign_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -678,8 +676,6 @@ define @vfcopynsign_exttrunc_vf_nxv1f16_nxv1f64( %r } -declare @llvm.copysign.nxv2f16(, ) - define @vfcopysign_vv_nxv2f16( %vm, %vs) { ; ZVFH-LABEL: vfcopysign_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -818,8 +814,6 @@ define @vfcopynsign_vf_nxv2f16( %vm, half ret %r } -declare @llvm.copysign.nxv4f16(, ) - define @vfcopysign_vv_nxv4f16( %vm, %vs) { ; ZVFH-LABEL: vfcopysign_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -958,8 +952,6 @@ define @vfcopynsign_vf_nxv4f16( %vm, half ret %r } -declare @llvm.copysign.nxv8f16(, ) - define @vfcopysign_vv_nxv8f16( %vm, %vs) { ; ZVFH-LABEL: vfcopysign_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -1438,8 +1430,6 @@ define @vfcopynsign_exttrunc_vf_nxv8f16_nxv8f64( %r } -declare @llvm.copysign.nxv16f16(, ) - define @vfcopysign_vv_nxv16f16( %vm, %vs) { ; ZVFH-LABEL: vfcopysign_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -1578,8 +1568,6 @@ define @vfcopynsign_vf_nxv16f16( %vm, h ret %r } -declare @llvm.copysign.nxv32f16(, ) - define @vfcopysign_vv_nxv32f16( %vm, %vs) { ; ZVFH-LABEL: vfcopysign_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -1718,8 +1706,6 @@ define @vfcopynsign_vf_nxv32f16( %vm, h ret %r } -declare @llvm.copysign.nxv1f32(, ) - define @vfcopysign_vv_nxv1f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv1f32: ; CHECK: # %bb.0: @@ -1880,8 +1866,6 @@ define @vfcopynsign_exttrunc_vf_nxv1f32_nxv1f64( %r } -declare @llvm.copysign.nxv2f32(, ) - define @vfcopysign_vv_nxv2f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1928,8 +1912,6 @@ define @vfcopynsign_vf_nxv2f32( %vm, fl ret %r } -declare @llvm.copysign.nxv4f32(, ) - define @vfcopysign_vv_nxv4f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1976,8 +1958,6 @@ define @vfcopynsign_vf_nxv4f32( %vm, fl ret %r } -declare @llvm.copysign.nxv8f32(, ) - define @vfcopysign_vv_nxv8f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv8f32: ; CHECK: # %bb.0: @@ -2138,8 +2118,6 @@ define @vfcopynsign_exttrunc_vf_nxv8f32_nxv8f64( %r } -declare @llvm.copysign.nxv16f32(, ) - define @vfcopysign_vv_nxv16f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv16f32: ; CHECK: # %bb.0: @@ -2186,8 +2164,6 @@ define @vfcopynsign_vf_nxv16f32( %vm, ret %r } -declare @llvm.copysign.nxv1f64(, ) - define @vfcopysign_vv_nxv1f64( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv1f64: ; CHECK: # %bb.0: @@ -2350,8 +2326,6 @@ define @vfcopynsign_exttrunc_vf_nxv1f64_nxv1f32( %r } -declare @llvm.copysign.nxv2f64(, ) - define @vfcopysign_vv_nxv2f64( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv2f64: ; CHECK: # %bb.0: @@ -2398,8 +2372,6 @@ define @vfcopynsign_vf_nxv2f64( %vm, ret %r } -declare @llvm.copysign.nxv4f64(, ) - define @vfcopysign_vv_nxv4f64( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv4f64: ; CHECK: # %bb.0: @@ -2446,8 +2418,6 @@ define @vfcopynsign_vf_nxv4f64( %vm, ret %r } -declare @llvm.copysign.nxv8f64(, ) - define @vfcopysign_vv_nxv8f64( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll index 2d90371856b73..5af4db662d867 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -50,11 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -72,12 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -96,11 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -118,12 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -142,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -164,12 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -210,12 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -234,11 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -256,12 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -280,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -302,12 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -326,11 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -348,12 +266,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -372,11 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -394,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -418,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -440,12 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -464,11 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -486,12 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -510,11 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -532,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -556,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -578,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -602,11 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -624,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -648,11 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -670,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll index eb5c5ea58f1b3..1e826cc4b1b42 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -50,11 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -72,12 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -96,11 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -118,12 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -142,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -164,12 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -210,12 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -234,11 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -256,12 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -280,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -302,12 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -326,11 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -348,12 +266,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -372,11 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -394,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -418,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -440,12 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -464,11 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -486,12 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -510,11 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -532,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -556,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -578,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -602,11 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -624,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -648,11 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -670,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll index b7f9d1a2b2c41..d70c31068aec4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll index c2db9d64b1842..0fe2d0be48046 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll index f49eaf69f164f..c3131926b3d79 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -50,11 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -72,12 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -96,11 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -118,12 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -142,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -164,12 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -210,12 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -234,11 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -256,12 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -280,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -302,12 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -326,11 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,12 +266,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -372,11 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -394,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -418,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -440,12 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -464,11 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -486,12 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -510,11 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -532,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -556,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -578,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -602,11 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -624,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -648,11 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -670,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll index 662ff865b9a7f..8b7c0fb8d6889 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -50,11 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -72,12 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -96,11 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -118,12 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -142,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -164,12 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -210,12 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -234,11 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -256,12 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -280,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -302,12 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -326,11 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,12 +266,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -372,11 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -394,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -418,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -440,12 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -464,11 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -486,12 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -510,11 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -532,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -556,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -578,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -602,11 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -624,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -648,11 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -670,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll index bb121416ddec3..6f23723712d90 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll @@ -286,7 +286,6 @@ define @vfdiv_vf_nxv32bf16( %va, bf ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv1f16(, , metadata, metadata) define @vfdiv_vv_nxv1f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfdiv_vv_nxv1f16: ; ZVFH: # %bb.0: # %entry @@ -334,7 +333,6 @@ define @vfdiv_vf_nxv1f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv2f16(, , metadata, metadata) define @vfdiv_vv_nxv2f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfdiv_vv_nxv2f16: ; ZVFH: # %bb.0: # %entry @@ -382,7 +380,6 @@ define @vfdiv_vf_nxv2f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv4f16(, , metadata, metadata) define @vfdiv_vv_nxv4f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfdiv_vv_nxv4f16: ; ZVFH: # %bb.0: # %entry @@ -430,7 +427,6 @@ define @vfdiv_vf_nxv4f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv8f16(, , metadata, metadata) define @vfdiv_vv_nxv8f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfdiv_vv_nxv8f16: ; ZVFH: # %bb.0: # %entry @@ -503,7 +499,6 @@ define @vfdiv_fv_nxv8f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv16f16(, , metadata, metadata) define @vfdiv_vv_nxv16f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfdiv_vv_nxv16f16: ; ZVFH: # %bb.0: # %entry @@ -551,7 +546,6 @@ define @vfdiv_vf_nxv16f16( %va, half %b ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv32f16(, , metadata, metadata) define @vfdiv_vv_nxv32f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfdiv_vv_nxv32f16: ; ZVFH: # %bb.0: # %entry @@ -644,7 +638,6 @@ define @vfdiv_vf_nxv32f16( %va, half %b ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv1f32(, , metadata, metadata) define @vfdiv_vv_nxv1f32( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -668,7 +661,6 @@ define @vfdiv_vf_nxv1f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv2f32(, , metadata, metadata) define @vfdiv_vv_nxv2f32( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -692,7 +684,6 @@ define @vfdiv_vf_nxv2f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv4f32(, , metadata, metadata) define @vfdiv_vv_nxv4f32( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -716,7 +707,6 @@ define @vfdiv_vf_nxv4f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv8f32(, , metadata, metadata) define @vfdiv_vv_nxv8f32( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -752,7 +742,6 @@ define @vfdiv_fv_nxv8f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv16f32(, , metadata, metadata) define @vfdiv_vv_nxv16f32( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -776,7 +765,6 @@ define @vfdiv_vf_nxv16f32( %va, float ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv1f64(, , metadata, metadata) define @vfdiv_vv_nxv1f64( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -800,7 +788,6 @@ define @vfdiv_vf_nxv1f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv2f64(, , metadata, metadata) define @vfdiv_vv_nxv2f64( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -824,7 +811,6 @@ define @vfdiv_vf_nxv2f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv4f64(, , metadata, metadata) define @vfdiv_vv_nxv4f64( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -848,7 +834,6 @@ define @vfdiv_vf_nxv4f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv8f64(, , metadata, metadata) define @vfdiv_vv_nxv8f64( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll index 736d575a1a4e3..e9d7137919ac9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.fdiv.nxv1bf16(, , , i32) - define @vfdiv_vv_nxv1bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -82,8 +80,6 @@ define @vfdiv_vf_nxv1bf16_unmasked( % ret %v } -declare @llvm.vp.fdiv.nxv2bf16(, , , i32) - define @vfdiv_vv_nxv2bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -152,8 +148,6 @@ define @vfdiv_vf_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.fdiv.nxv4bf16(, , , i32) - define @vfdiv_vv_nxv4bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -222,8 +216,6 @@ define @vfdiv_vf_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.fdiv.nxv8bf16(, , , i32) - define @vfdiv_vv_nxv8bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -292,8 +284,6 @@ define @vfdiv_vf_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.fdiv.nxv16bf16(, , , i32) - define @vfdiv_vv_nxv16bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -362,8 +352,6 @@ define @vfdiv_vf_nxv16bf16_unmasked( %v } -declare @llvm.vp.fdiv.nxv32bf16(, , , i32) - define @vfdiv_vv_nxv32bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -594,7 +582,6 @@ define @vfdiv_vf_nxv32bf16_unmasked( @llvm.vp.fdiv.nxv32bf16( %va, %vb, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.fdiv.nxv1f16(, , , i32) define @vfdiv_vv_nxv1f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_nxv1f16: @@ -688,8 +675,6 @@ define @vfdiv_vf_nxv1f16_unmasked( %va, h ret %v } -declare @llvm.vp.fdiv.nxv2f16(, , , i32) - define @vfdiv_vv_nxv2f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -782,8 +767,6 @@ define @vfdiv_vf_nxv2f16_unmasked( %va, h ret %v } -declare @llvm.vp.fdiv.nxv4f16(, , , i32) - define @vfdiv_vv_nxv4f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -876,8 +859,6 @@ define @vfdiv_vf_nxv4f16_unmasked( %va, h ret %v } -declare @llvm.vp.fdiv.nxv8f16(, , , i32) - define @vfdiv_vv_nxv8f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -970,8 +951,6 @@ define @vfdiv_vf_nxv8f16_unmasked( %va, h ret %v } -declare @llvm.vp.fdiv.nxv16f16(, , , i32) - define @vfdiv_vv_nxv16f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -1064,8 +1043,6 @@ define @vfdiv_vf_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.fdiv.nxv32f16(, , , i32) - define @vfdiv_vv_nxv32f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -1321,8 +1298,6 @@ define @vfdiv_vf_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.fdiv.nxv1f32(, , , i32) - define @vfdiv_vv_nxv1f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv1f32: ; CHECK: # %bb.0: @@ -1367,8 +1342,6 @@ define @vfdiv_vf_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.fdiv.nxv2f32(, , , i32) - define @vfdiv_vv_nxv2f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1413,8 +1386,6 @@ define @vfdiv_vf_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.fdiv.nxv4f32(, , , i32) - define @vfdiv_vv_nxv4f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1459,8 +1430,6 @@ define @vfdiv_vf_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.fdiv.nxv8f32(, , , i32) - define @vfdiv_vv_nxv8f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1505,8 +1474,6 @@ define @vfdiv_vf_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.fdiv.nxv16f32(, , , i32) - define @vfdiv_vv_nxv16f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1551,8 +1518,6 @@ define @vfdiv_vf_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.fdiv.nxv1f64(, , , i32) - define @vfdiv_vv_nxv1f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1597,8 +1562,6 @@ define @vfdiv_vf_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.fdiv.nxv2f64(, , , i32) - define @vfdiv_vv_nxv2f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1643,8 +1606,6 @@ define @vfdiv_vf_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.fdiv.nxv4f64(, , , i32) - define @vfdiv_vv_nxv4f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1689,8 +1650,6 @@ define @vfdiv_vf_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.fdiv.nxv7f64(, , , i32) - define @vfdiv_vv_nxv7f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv7f64: ; CHECK: # %bb.0: @@ -1701,8 +1660,6 @@ define @vfdiv_vv_nxv7f64( %va, %v } -declare @llvm.vp.fdiv.nxv8f64(, , , i32) - define @vfdiv_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll index 62fb9df7a623e..71d119d9aff6b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfdiv.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv32f16.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -305,12 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -329,13 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -355,12 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -379,13 +282,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -405,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -429,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -455,12 +338,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -479,13 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -505,12 +375,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv16f32.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -529,13 +393,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -556,12 +413,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -580,13 +431,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -606,12 +450,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -630,13 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -656,12 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -680,13 +505,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -706,12 +524,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv8f64.nxv8f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -730,13 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -757,12 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -781,13 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -807,12 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -831,13 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -857,12 +636,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -881,13 +654,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -907,12 +673,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -931,13 +691,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -957,12 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -981,13 +728,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -1007,12 +747,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv32f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -1031,13 +765,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv32f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -1057,12 +784,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1081,13 +802,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1107,12 +821,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1131,13 +839,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1157,12 +858,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1181,13 +876,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1207,12 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1231,13 +913,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1257,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv16f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1281,13 +950,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv16f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1307,12 +969,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv1f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1331,13 +987,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1357,12 +1006,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv2f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1381,13 +1024,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv2f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1407,12 +1043,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv4f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1431,13 +1061,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv4f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1457,12 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv8f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1481,13 +1098,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv8f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll index c510121ee3ebe..5ad4c2d41b40f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll @@ -4,10 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare iXLen @llvm.riscv.vfirst.iXLen.nxv1i1( - , - iXLen); - define iXLen @intrinsic_vfirst_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -35,11 +31,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv1i1( - , - , - iXLen); - define iXLen @intrinsic_vfirst_mask_m_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -71,10 +62,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.iXLen.nxv2i1( - , - iXLen); - define iXLen @intrinsic_vfirst_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -89,11 +76,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv2i1( - , - , - iXLen); - define iXLen @intrinsic_vfirst_mask_m_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -111,10 +93,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.iXLen.nxv4i1( - , - iXLen); - define iXLen @intrinsic_vfirst_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -129,11 +107,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv4i1( - , - , - iXLen); - define iXLen @intrinsic_vfirst_mask_m_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -151,10 +124,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.iXLen.nxv8i1( - , - iXLen); - define iXLen @intrinsic_vfirst_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -169,11 +138,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv8i1( - , - , - iXLen); - define iXLen @intrinsic_vfirst_mask_m_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -191,10 +155,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.iXLen.nxv16i1( - , - iXLen); - define iXLen @intrinsic_vfirst_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -209,11 +169,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv16i1( - , - , - iXLen); - define iXLen @intrinsic_vfirst_mask_m_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -231,10 +186,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.iXLen.nxv32i1( - , - iXLen); - define iXLen @intrinsic_vfirst_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -249,11 +200,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv32i1( - , - , - iXLen); - define iXLen @intrinsic_vfirst_mask_m_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -271,10 +217,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.iXLen.nxv64i1( - , - iXLen); - define iXLen @intrinsic_vfirst_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -289,11 +231,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv64i1( - , - , - iXLen); - define iXLen @intrinsic_vfirst_mask_m_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll index 7e580d1057525..91b4080a4ed84 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll @@ -4,10 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s -declare @llvm.vp.fma.nxv1f64(, , , , i32) -declare @llvm.vp.fneg.nxv1f64(, , i32) -declare @llvm.vp.fmul.nxv1f64(, , , i32) - ; (-N0 * -N1) + N2 --> (N0 * N1) + N2 define @test1( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: test1: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll index 728fa07a7d4e5..c25a0d47c5c53 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.fma.nxv1bf16(, , , , i32) - define @vfma_vv_nxv1bf16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -126,8 +124,6 @@ define @vfma_vf_nxv1bf16_unmasked_commute( %v } -declare @llvm.vp.fma.nxv2bf16(, , , , i32) - define @vfma_vv_nxv2bf16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -240,8 +236,6 @@ define @vfma_vf_nxv2bf16_unmasked_commute( %v } -declare @llvm.vp.fma.nxv4bf16(, , , , i32) - define @vfma_vv_nxv4bf16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -354,8 +348,6 @@ define @vfma_vf_nxv4bf16_unmasked_commute( %v } -declare @llvm.vp.fma.nxv8bf16(, , , , i32) - define @vfma_vv_nxv8bf16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -468,8 +460,6 @@ define @vfma_vf_nxv8bf16_unmasked_commute( %v } -declare @llvm.vp.fma.nxv16bf16(, , , , i32) - define @vfma_vv_nxv16bf16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -586,8 +576,6 @@ define @vfma_vf_nxv16bf16_unmasked_commute( %v } -declare @llvm.vp.fma.nxv32bf16(, , , , i32) - define @vfma_vv_nxv32bf16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -1280,8 +1268,6 @@ define @vfma_vf_nxv32bf16_unmasked_commute( %v } -declare @llvm.vp.fma.nxv1f16(, , , , i32) - define @vfma_vv_nxv1f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -1431,8 +1417,6 @@ define @vfma_vf_nxv1f16_unmasked_commute( ret %v } -declare @llvm.vp.fma.nxv2f16(, , , , i32) - define @vfma_vv_nxv2f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -1582,8 +1566,6 @@ define @vfma_vf_nxv2f16_unmasked_commute( ret %v } -declare @llvm.vp.fma.nxv4f16(, , , , i32) - define @vfma_vv_nxv4f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -1733,8 +1715,6 @@ define @vfma_vf_nxv4f16_unmasked_commute( ret %v } -declare @llvm.vp.fma.nxv8f16(, , , , i32) - define @vfma_vv_nxv8f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -1884,8 +1864,6 @@ define @vfma_vf_nxv8f16_unmasked_commute( ret %v } -declare @llvm.vp.fma.nxv16f16(, , , , i32) - define @vfma_vv_nxv16f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -2039,8 +2017,6 @@ define @vfma_vf_nxv16f16_unmasked_commute( %v } -declare @llvm.vp.fma.nxv32f16(, , , , i32) - define @vfma_vv_nxv32f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -2772,8 +2748,6 @@ define @vfma_vf_nxv32f16_unmasked_commute( %v } -declare @llvm.vp.fma.nxv1f32(, , , , i32) - define @vfma_vv_nxv1f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f32: ; CHECK: # %bb.0: @@ -2843,8 +2817,6 @@ define @vfma_vf_nxv1f32_unmasked_commute( %v } -declare @llvm.vp.fma.nxv2f32(, , , , i32) - define @vfma_vv_nxv2f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f32: ; CHECK: # %bb.0: @@ -2914,8 +2886,6 @@ define @vfma_vf_nxv2f32_unmasked_commute( %v } -declare @llvm.vp.fma.nxv4f32(, , , , i32) - define @vfma_vv_nxv4f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f32: ; CHECK: # %bb.0: @@ -2985,8 +2955,6 @@ define @vfma_vf_nxv4f32_unmasked_commute( %v } -declare @llvm.vp.fma.nxv8f32(, , , , i32) - define @vfma_vv_nxv8f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8f32: ; CHECK: # %bb.0: @@ -3056,8 +3024,6 @@ define @vfma_vf_nxv8f32_unmasked_commute( %v } -declare @llvm.vp.fma.nxv16f32(, , , , i32) - define @vfma_vv_nxv16f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv16f32: ; CHECK: # %bb.0: @@ -3129,8 +3095,6 @@ define @vfma_vf_nxv16f32_unmasked_commute( %v } -declare @llvm.vp.fma.nxv1f64(, , , , i32) - define @vfma_vv_nxv1f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f64: ; CHECK: # %bb.0: @@ -3200,8 +3164,6 @@ define @vfma_vf_nxv1f64_unmasked_commute( %v } -declare @llvm.vp.fma.nxv2f64(, , , , i32) - define @vfma_vv_nxv2f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f64: ; CHECK: # %bb.0: @@ -3271,8 +3233,6 @@ define @vfma_vf_nxv2f64_unmasked_commute( %v } -declare @llvm.vp.fma.nxv4f64(, , , , i32) - define @vfma_vv_nxv4f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f64: ; CHECK: # %bb.0: @@ -3342,8 +3302,6 @@ define @vfma_vf_nxv4f64_unmasked_commute( %v } -declare @llvm.vp.fma.nxv7f64(, , , , i32) - define @vfma_vv_nxv7f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv7f64: ; CHECK: # %bb.0: @@ -3367,8 +3325,6 @@ define @vfma_vv_nxv7f64_unmasked( %va ret %v } -declare @llvm.vp.fma.nxv8f64(, , , , i32) - define @vfma_vv_nxv8f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8f64: ; CHECK: # %bb.0: @@ -3440,8 +3396,6 @@ define @vfma_vf_nxv8f64_unmasked_commute( %v } -declare @llvm.vp.fma.nxv16f64(, , , , i32) - define @vfma_vv_nxv16f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv16f64: ; CHECK: # %bb.0: @@ -3657,8 +3611,6 @@ define @vfma_vv_nxv16f64_unmasked( ret %v } -declare @llvm.vp.fneg.nxv1f16(, , i32) - define @vfmsub_vv_nxv1f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmsub_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -4524,8 +4476,6 @@ define @vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv2f16(, , i32) - define @vfmsub_vv_nxv2f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmsub_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -5391,8 +5341,6 @@ define @vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv4f16(, , i32) - define @vfmsub_vv_nxv4f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmsub_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -6258,8 +6206,6 @@ define @vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv8f16(, , i32) - define @vfmsub_vv_nxv8f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmsub_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -7125,8 +7071,6 @@ define @vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv16f16(, , i32) - define @vfmsub_vv_nxv16f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmsub_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -8005,8 +7949,6 @@ define @vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv32f16(, , i32) - define @vfmsub_vv_nxv32f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmsub_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -11822,8 +11764,6 @@ define @vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv1f32(, , i32) - define @vfmsub_vv_nxv1f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -12213,8 +12153,6 @@ define @vfnmsub_vf_nxv1f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv2f32(, , i32) - define @vfmsub_vv_nxv2f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -12604,8 +12542,6 @@ define @vfnmsub_vf_nxv2f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv4f32(, , i32) - define @vfmsub_vv_nxv4f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -12995,8 +12931,6 @@ define @vfnmsub_vf_nxv4f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv8f32(, , i32) - define @vfmsub_vv_nxv8f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -13386,8 +13320,6 @@ define @vfnmsub_vf_nxv8f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv16f32(, , i32) - define @vfmsub_vv_nxv16f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -13787,8 +13719,6 @@ define @vfnmsub_vf_nxv16f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv1f64(, , i32) - define @vfmsub_vv_nxv1f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -14178,8 +14108,6 @@ define @vfnmsub_vf_nxv1f64_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv2f64(, , i32) - define @vfmsub_vv_nxv2f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -14569,8 +14497,6 @@ define @vfnmsub_vf_nxv2f64_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv4f64(, , i32) - define @vfmsub_vv_nxv4f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -14960,8 +14886,6 @@ define @vfnmsub_vf_nxv4f64_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv8f64(, , i32) - define @vfmsub_vv_nxv8f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll index 13821d745846f..6c66cfcaef07b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv1bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv2bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv2bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv4bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv4bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv8bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv8bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv16bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv16bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll index ef583b748b9c2..28a8ef0087d85 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fma.nxv1f16(, , , , i32) -declare @llvm.vp.fneg.nxv1f16(, , i32) -declare @llvm.vp.merge.nxv1f16(, , , i32) -declare @llvm.vp.select.nxv1f16(, , , i32) - define @vfmacc_vv_nxv1f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv1f16: ; CHECK: # %bb.0: @@ -115,11 +110,6 @@ define @vfmacc_vf_nxv1f16_commute_ta( %va ret %u } -declare @llvm.vp.fma.nxv2f16(, , , , i32) -declare @llvm.vp.fneg.nxv2f16(, , i32) -declare @llvm.vp.merge.nxv2f16(, , , i32) -declare @llvm.vp.select.nxv2f16(, , , i32) - define @vfmacc_vv_nxv2f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv2f16: ; CHECK: # %bb.0: @@ -226,11 +216,6 @@ define @vfmacc_vf_nxv2f16_commute_ta( %va ret %u } -declare @llvm.vp.fma.nxv4f16(, , , , i32) -declare @llvm.vp.fneg.nxv4f16(, , i32) -declare @llvm.vp.merge.nxv4f16(, , , i32) -declare @llvm.vp.select.nxv4f16(, , , i32) - define @vfmacc_vv_nxv4f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv4f16: ; CHECK: # %bb.0: @@ -337,11 +322,6 @@ define @vfmacc_vf_nxv4f16_commute_ta( %va ret %u } -declare @llvm.vp.fma.nxv8f16(, , , , i32) -declare @llvm.vp.fneg.nxv8f16(, , i32) -declare @llvm.vp.merge.nxv8f16(, , , i32) -declare @llvm.vp.select.nxv8f16(, , , i32) - define @vfmacc_vv_nxv8f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv8f16: ; CHECK: # %bb.0: @@ -448,11 +428,6 @@ define @vfmacc_vf_nxv8f16_commute_ta( %va ret %u } -declare @llvm.vp.fma.nxv16f16(, , , , i32) -declare @llvm.vp.fneg.nxv16f16(, , i32) -declare @llvm.vp.merge.nxv16f16(, , , i32) -declare @llvm.vp.select.nxv16f16(, , , i32) - define @vfmacc_vv_nxv16f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv16f16: ; CHECK: # %bb.0: @@ -559,11 +534,6 @@ define @vfmacc_vf_nxv16f16_commute_ta( ret %u } -declare @llvm.vp.fma.nxv32f16(, , , , i32) -declare @llvm.vp.fneg.nxv32f16(, , i32) -declare @llvm.vp.merge.nxv32f16(, , , i32) -declare @llvm.vp.select.nxv32f16(, , , i32) - define @vfmacc_vv_nxv32f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv32f16: ; CHECK: # %bb.0: @@ -673,11 +643,6 @@ define @vfmacc_vf_nxv32f16_commute_ta( ret %u } -declare @llvm.vp.fma.nxv1f32(, , , , i32) -declare @llvm.vp.fneg.nxv1f32(, , i32) -declare @llvm.vp.merge.nxv1f32(, , , i32) -declare @llvm.vp.select.nxv1f32(, , , i32) - define @vfmacc_vv_nxv1f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv1f32: ; CHECK: # %bb.0: @@ -784,11 +749,6 @@ define @vfmacc_vf_nxv1f32_commute_ta( % ret %u } -declare @llvm.vp.fma.nxv2f32(, , , , i32) -declare @llvm.vp.fneg.nxv2f32(, , i32) -declare @llvm.vp.merge.nxv2f32(, , , i32) -declare @llvm.vp.select.nxv2f32(, , , i32) - define @vfmacc_vv_nxv2f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv2f32: ; CHECK: # %bb.0: @@ -895,11 +855,6 @@ define @vfmacc_vf_nxv2f32_commute_ta( % ret %u } -declare @llvm.vp.fma.nxv4f32(, , , , i32) -declare @llvm.vp.fneg.nxv4f32(, , i32) -declare @llvm.vp.merge.nxv4f32(, , , i32) -declare @llvm.vp.select.nxv4f32(, , , i32) - define @vfmacc_vv_nxv4f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1006,11 +961,6 @@ define @vfmacc_vf_nxv4f32_commute_ta( % ret %u } -declare @llvm.vp.fma.nxv8f32(, , , , i32) -declare @llvm.vp.fneg.nxv8f32(, , i32) -declare @llvm.vp.merge.nxv8f32(, , , i32) -declare @llvm.vp.select.nxv8f32(, , , i32) - define @vfmacc_vv_nxv8f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1117,11 +1067,6 @@ define @vfmacc_vf_nxv8f32_commute_ta( % ret %u } -declare @llvm.vp.fma.nxv16f32(, , , , i32) -declare @llvm.vp.fneg.nxv16f32(, , i32) -declare @llvm.vp.merge.nxv16f32(, , , i32) -declare @llvm.vp.select.nxv16f32(, , , i32) - define @vfmacc_vv_nxv16f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1231,11 +1176,6 @@ define @vfmacc_vf_nxv16f32_commute_ta( %u } -declare @llvm.vp.fma.nxv1f64(, , , , i32) -declare @llvm.vp.fneg.nxv1f64(, , i32) -declare @llvm.vp.merge.nxv1f64(, , , i32) -declare @llvm.vp.select.nxv1f64(, , , i32) - define @vfmacc_vv_nxv1f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1342,11 +1282,6 @@ define @vfmacc_vf_nxv1f64_commute_ta( ret %u } -declare @llvm.vp.fma.nxv2f64(, , , , i32) -declare @llvm.vp.fneg.nxv2f64(, , i32) -declare @llvm.vp.merge.nxv2f64(, , , i32) -declare @llvm.vp.select.nxv2f64(, , , i32) - define @vfmacc_vv_nxv2f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1453,11 +1388,6 @@ define @vfmacc_vf_nxv2f64_commute_ta( ret %u } -declare @llvm.vp.fma.nxv4f64(, , , , i32) -declare @llvm.vp.fneg.nxv4f64(, , i32) -declare @llvm.vp.merge.nxv4f64(, , , i32) -declare @llvm.vp.select.nxv4f64(, , , i32) - define @vfmacc_vv_nxv4f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1564,11 +1494,6 @@ define @vfmacc_vf_nxv4f64_commute_ta( ret %u } -declare @llvm.vp.fma.nxv8f64(, , , , i32) -declare @llvm.vp.fneg.nxv8f64(, , i32) -declare @llvm.vp.merge.nxv8f64(, , , i32) -declare @llvm.vp.select.nxv8f64(, , , i32) - define @vfmacc_vv_nxv8f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll index 1f0db104df7aa..ea11cb177b49d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmacc.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv1f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv2f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv2f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv4f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv4f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv8f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv8f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv16f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv16f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv1f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -904,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv2f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -928,13 +688,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv2f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -954,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv4f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -978,13 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv4f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1004,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv8f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1028,13 +762,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv8f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1054,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv1f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1078,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv2f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1128,13 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv2f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv4f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1178,13 +873,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv4f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll index 09fc199c29d23..6d8324a1df188 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv1bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv1bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv2bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv2bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv4bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv4bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv8bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv8bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv16bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv16bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll index 11aebe9ae2a96..99c9db08aac6c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll @@ -53,7 +53,6 @@ define @vfmadd_vf_nxv1bf16( %va, %vd } - define @vfmadd_vv_nxv2bf16( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -90,7 +89,6 @@ define @vfmadd_vf_nxv2bf16( %va, %vd } - define @vfmadd_vv_nxv4bf16( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -127,7 +125,6 @@ define @vfmadd_vf_nxv4bf16( %va, %vd } - define @vfmadd_vv_nxv8bf16( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -164,7 +161,6 @@ define @vfmadd_vf_nxv8bf16( %va, %vd } - define @vfmadd_vv_nxv16bf16( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -217,7 +213,6 @@ define @vfmadd_vf_nxv16bf16( %va, < ret %vd } - define @vfmadd_vv_nxv32bf16( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -395,8 +390,6 @@ define @vfmadd_vf_nxv32bf16( %va, < ret %vd } -declare @llvm.experimental.constrained.fma.nxv1f16(, , , metadata, metadata) - define @vfmadd_vv_nxv1f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmadd_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -445,8 +438,6 @@ define @vfmadd_vf_nxv1f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f16(, , , metadata, metadata) - define @vfmadd_vv_nxv2f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmadd_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -495,8 +486,6 @@ define @vfmadd_vf_nxv2f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f16(, , , metadata, metadata) - define @vfmadd_vv_nxv4f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmadd_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -545,8 +534,6 @@ define @vfmadd_vf_nxv4f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f16(, , , metadata, metadata) - define @vfmadd_vv_nxv8f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmadd_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -595,8 +582,6 @@ define @vfmadd_vf_nxv8f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv16f16(, , , metadata, metadata) - define @vfmadd_vv_nxv16f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmadd_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -661,8 +646,6 @@ define @vfmadd_vf_nxv16f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv32f16(, , , metadata, metadata) - define @vfmadd_vv_nxv32f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmadd_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -853,8 +836,6 @@ define @vfmadd_vf_nxv32f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv1f32(, , , metadata, metadata) - define @vfmadd_vv_nxv1f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv1f32: ; CHECK: # %bb.0: @@ -877,8 +858,6 @@ define @vfmadd_vf_nxv1f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f32(, , , metadata, metadata) - define @vfmadd_vv_nxv2f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv2f32: ; CHECK: # %bb.0: @@ -901,8 +880,6 @@ define @vfmadd_vf_nxv2f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f32(, , , metadata, metadata) - define @vfmadd_vv_nxv4f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv4f32: ; CHECK: # %bb.0: @@ -925,8 +902,6 @@ define @vfmadd_vf_nxv4f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f32(, , , metadata, metadata) - define @vfmadd_vv_nxv8f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv8f32: ; CHECK: # %bb.0: @@ -949,8 +924,6 @@ define @vfmadd_vf_nxv8f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv16f32(, , , metadata, metadata) - define @vfmadd_vv_nxv16f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv16f32: ; CHECK: # %bb.0: @@ -974,8 +947,6 @@ define @vfmadd_vf_nxv16f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv1f64(, , , metadata, metadata) - define @vfmadd_vv_nxv1f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv1f64: ; CHECK: # %bb.0: @@ -998,8 +969,6 @@ define @vfmadd_vf_nxv1f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f64(, , , metadata, metadata) - define @vfmadd_vv_nxv2f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1022,8 +991,6 @@ define @vfmadd_vf_nxv2f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f64(, , , metadata, metadata) - define @vfmadd_vv_nxv4f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1046,8 +1013,6 @@ define @vfmadd_vf_nxv4f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f64(, , , metadata, metadata) - define @vfmadd_vv_nxv8f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll index c0d3b55f5d35e..b8f138f4bd52e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll @@ -61,8 +61,6 @@ define @vfmadd_vf_nxv1bf16( %va, %vd } -declare @llvm.fma.v2bf16(, , ) - define @vfmadd_vv_nxv2bf16( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -97,8 +95,6 @@ define @vfmadd_vf_nxv2bf16( %va, %vd } -declare @llvm.fma.v4bf16(, , ) - define @vfmadd_vv_nxv4bf16( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -133,8 +129,6 @@ define @vfmadd_vf_nxv4bf16( %va, %vd } -declare @llvm.fma.v8bf16(, , ) - define @vfmadd_vv_nxv8bf16( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -169,8 +163,6 @@ define @vfmadd_vf_nxv8bf16( %va, %vd } -declare @llvm.fma.v16bf16(, , ) - define @vfmadd_vv_nxv16bf16( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -205,8 +197,6 @@ define @vfmadd_vf_nxv16bf16( %va, < ret %vd } -declare @llvm.fma.v32bf16(, , ) - define @vfmadd_vv_nxv32bf16( %va, %vb, %vc) { ; ZVFH-LABEL: vfmadd_vv_nxv32bf16: ; ZVFH: # %bb.0: @@ -571,8 +561,6 @@ define @vfmadd_vf_nxv32bf16( %va, < ret %vd } -declare @llvm.fma.v1f16(, , ) - define @vfmadd_vv_nxv1f16( %va, %vb, %vc) { ; ZVFH-LABEL: vfmadd_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -641,8 +629,6 @@ define @vfmadd_vf_nxv1f16( %va, %vd } -declare @llvm.fma.v2f16(, , ) - define @vfmadd_vv_nxv2f16( %va, %vb, %vc) { ; ZVFH-LABEL: vfmadd_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -689,8 +675,6 @@ define @vfmadd_vf_nxv2f16( %va, %vd } -declare @llvm.fma.v4f16(, , ) - define @vfmadd_vv_nxv4f16( %va, %vb, %vc) { ; ZVFH-LABEL: vfmadd_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -737,8 +721,6 @@ define @vfmadd_vf_nxv4f16( %va, %vd } -declare @llvm.fma.v8f16(, , ) - define @vfmadd_vv_nxv8f16( %va, %vb, %vc) { ; ZVFH-LABEL: vfmadd_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -785,8 +767,6 @@ define @vfmadd_vf_nxv8f16( %va, %vd } -declare @llvm.fma.v16f16(, , ) - define @vfmadd_vv_nxv16f16( %va, %vb, %vc) { ; ZVFH-LABEL: vfmadd_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -833,8 +813,6 @@ define @vfmadd_vf_nxv16f16( %va, %vd } -declare @llvm.fma.v32f16(, , ) - define @vfmadd_vv_nxv32f16( %va, %vb, %vc) { ; ZVFH-LABEL: vfmadd_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -1028,8 +1006,6 @@ define @vfmadd_vf_nxv32f16( %va, %vd } -declare @llvm.fma.v1f32(, , ) - define @vfmadd_vv_nxv1f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv1f32: ; CHECK: # %bb.0: @@ -1052,8 +1028,6 @@ define @vfmadd_vf_nxv1f32( %va, %vd } -declare @llvm.fma.v2f32(, , ) - define @vfmadd_vv_nxv2f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1076,8 +1050,6 @@ define @vfmadd_vf_nxv2f32( %va, %vd } -declare @llvm.fma.v4f32(, , ) - define @vfmadd_vv_nxv4f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1100,8 +1072,6 @@ define @vfmadd_vf_nxv4f32( %va, %vd } -declare @llvm.fma.v8f32(, , ) - define @vfmadd_vv_nxv8f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1124,8 +1094,6 @@ define @vfmadd_vf_nxv8f32( %va, %vd } -declare @llvm.fma.v16f32(, , ) - define @vfmadd_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1149,8 +1117,6 @@ define @vfmadd_vf_nxv16f32( %va, %vd } -declare @llvm.fma.v1f64(, , ) - define @vfmadd_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1173,8 +1139,6 @@ define @vfmadd_vf_nxv1f64( %va, %vd } -declare @llvm.fma.v2f64(, , ) - define @vfmadd_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1197,8 +1161,6 @@ define @vfmadd_vf_nxv2f64( %va, %vd } -declare @llvm.fma.v4f64(, , ) - define @vfmadd_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1221,8 +1183,6 @@ define @vfmadd_vf_nxv4f64( %va, %vd } -declare @llvm.fma.v8f64(, , ) - define @vfmadd_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll index fb04888a84dea..0609dce18903f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmadd.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv1f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv1f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv2f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv2f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv4f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv4f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv8f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv8f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv16f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv16f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv1f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv1f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -904,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv2f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -928,13 +688,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv2f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -954,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv4f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -978,13 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv4f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1004,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv8f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1028,13 +762,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv8f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1054,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv1f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1078,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv1f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv2f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1128,13 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv2f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv4f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1178,13 +873,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv4f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll index a337d3061ce78..dff9309194486 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmax.nxv1bf16.nxv1bf16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv2bf16.nxv2bf16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv4bf16.nxv4bf16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv8bf16.nxv8bf16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv16bf16.nxv16bf16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv32bf16.nxv32bf16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv1bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmax_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv2bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmax_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv4bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmax_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv8bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmax_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv16bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmax_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv32bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmax_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll index c06836f129005..63bfe1dfad5fc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll @@ -44,8 +44,6 @@ define @vfmax_nxv1bf16_vf( %a, bfloat ret %v } -declare @llvm.maxnum.nxv2bf16(, ) - define @vfmax_nxv2bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv2bf16_vv: ; CHECK: # %bb.0: @@ -78,8 +76,6 @@ define @vfmax_nxv2bf16_vf( %a, bfloat ret %v } -declare @llvm.maxnum.nxv4bf16(, ) - define @vfmax_nxv4bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv4bf16_vv: ; CHECK: # %bb.0: @@ -112,8 +108,6 @@ define @vfmax_nxv4bf16_vf( %a, bfloat ret %v } -declare @llvm.maxnum.nxv8bf16(, ) - define @vfmax_nxv8bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv8bf16_vv: ; CHECK: # %bb.0: @@ -146,8 +140,6 @@ define @vfmax_nxv8bf16_vf( %a, bfloat ret %v } -declare @llvm.maxnum.nxv16bf16(, ) - define @vfmax_nxv16bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv16bf16_vv: ; CHECK: # %bb.0: @@ -180,8 +172,6 @@ define @vfmax_nxv16bf16_vf( %a, bfl ret %v } -declare @llvm.maxnum.nxv32bf16(, ) - define @vfmax_nxv32bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv32bf16_vv: ; CHECK: # %bb.0: @@ -261,8 +251,6 @@ define @vfmax_nxv32bf16_vf( %a, bfl ret %v } -declare @llvm.maxnum.nxv1f16(, ) - define @vfmax_nxv1f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv1f16_vv: ; ZVFH: # %bb.0: @@ -307,8 +295,6 @@ define @vfmax_nxv1f16_vf( %a, half %b) { ret %v } -declare @llvm.maxnum.nxv2f16(, ) - define @vfmax_nxv2f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv2f16_vv: ; ZVFH: # %bb.0: @@ -353,8 +339,6 @@ define @vfmax_nxv2f16_vf( %a, half %b) { ret %v } -declare @llvm.maxnum.nxv4f16(, ) - define @vfmax_nxv4f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv4f16_vv: ; ZVFH: # %bb.0: @@ -399,8 +383,6 @@ define @vfmax_nxv4f16_vf( %a, half %b) { ret %v } -declare @llvm.maxnum.nxv8f16(, ) - define @vfmax_nxv8f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv8f16_vv: ; ZVFH: # %bb.0: @@ -445,8 +427,6 @@ define @vfmax_nxv8f16_vf( %a, half %b) { ret %v } -declare @llvm.maxnum.nxv16f16(, ) - define @vfmax_nxv16f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv16f16_vv: ; ZVFH: # %bb.0: @@ -491,8 +471,6 @@ define @vfmax_nxv16f16_vf( %a, half %b) ret %v } -declare @llvm.maxnum.nxv32f16(, ) - define @vfmax_nxv32f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv32f16_vv: ; ZVFH: # %bb.0: @@ -584,8 +562,6 @@ define @vfmax_nxv32f16_vf( %a, half %b) ret %v } -declare @llvm.maxnum.nxv1f32(, ) - define @vfmax_nxv1f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv1f32_vv: ; CHECK: # %bb.0: @@ -608,8 +584,6 @@ define @vfmax_nxv1f32_vf( %a, float %b) ret %v } -declare @llvm.maxnum.nxv2f32(, ) - define @vfmax_nxv2f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv2f32_vv: ; CHECK: # %bb.0: @@ -632,8 +606,6 @@ define @vfmax_nxv2f32_vf( %a, float %b) ret %v } -declare @llvm.maxnum.nxv4f32(, ) - define @vfmax_nxv4f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv4f32_vv: ; CHECK: # %bb.0: @@ -656,8 +628,6 @@ define @vfmax_nxv4f32_vf( %a, float %b) ret %v } -declare @llvm.maxnum.nxv8f32(, ) - define @vfmax_nxv8f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv8f32_vv: ; CHECK: # %bb.0: @@ -680,8 +650,6 @@ define @vfmax_nxv8f32_vf( %a, float %b) ret %v } -declare @llvm.maxnum.nxv16f32(, ) - define @vfmax_nxv16f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv16f32_vv: ; CHECK: # %bb.0: @@ -704,8 +672,6 @@ define @vfmax_nxv16f32_vf( %a, float ret %v } -declare @llvm.maxnum.nxv1f64(, ) - define @vfmax_nxv1f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv1f64_vv: ; CHECK: # %bb.0: @@ -728,8 +694,6 @@ define @vfmax_nxv1f64_vf( %a, double ret %v } -declare @llvm.maxnum.nxv2f64(, ) - define @vfmax_nxv2f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv2f64_vv: ; CHECK: # %bb.0: @@ -752,8 +716,6 @@ define @vfmax_nxv2f64_vf( %a, double ret %v } -declare @llvm.maxnum.nxv4f64(, ) - define @vfmax_nxv4f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv4f64_vv: ; CHECK: # %bb.0: @@ -776,8 +738,6 @@ define @vfmax_nxv4f64_vf( %a, double ret %v } -declare @llvm.maxnum.nxv8f64(, ) - define @vfmax_nxv8f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv8f64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll index 2ed6bf08b5672..394887fee67fc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.maxnum.nxv1bf16(, , , i32) - define @vfmax_vv_nxv1bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -44,8 +42,6 @@ define @vfmax_vv_nxv1bf16_unmasked( % ret %v } -declare @llvm.vp.maxnum.nxv2bf16(, , , i32) - define @vfmax_vv_nxv2bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -76,8 +72,6 @@ define @vfmax_vv_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.maxnum.nxv4bf16(, , , i32) - define @vfmax_vv_nxv4bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -108,8 +102,6 @@ define @vfmax_vv_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.maxnum.nxv8bf16(, , , i32) - define @vfmax_vv_nxv8bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -140,8 +132,6 @@ define @vfmax_vv_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.maxnum.nxv16bf16(, , , i32) - define @vfmax_vv_nxv16bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -172,8 +162,6 @@ define @vfmax_vv_nxv16bf16_unmasked( %v } -declare @llvm.vp.maxnum.nxv32bf16(, , , i32) - define @vfmax_vv_nxv32bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -279,7 +267,6 @@ define @vfmax_vv_nxv32bf16_unmasked( @llvm.vp.maxnum.nxv32bf16( %va, %vb, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.maxnum.nxv1f16(, , , i32) define @vfmax_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv1f16: @@ -323,8 +310,6 @@ define @vfmax_vv_nxv1f16_unmasked( %va, < ret %v } -declare @llvm.vp.maxnum.nxv2f16(, , , i32) - define @vfmax_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -367,8 +352,6 @@ define @vfmax_vv_nxv2f16_unmasked( %va, < ret %v } -declare @llvm.vp.maxnum.nxv4f16(, , , i32) - define @vfmax_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -411,8 +394,6 @@ define @vfmax_vv_nxv4f16_unmasked( %va, < ret %v } -declare @llvm.vp.maxnum.nxv8f16(, , , i32) - define @vfmax_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -455,8 +436,6 @@ define @vfmax_vv_nxv8f16_unmasked( %va, < ret %v } -declare @llvm.vp.maxnum.nxv16f16(, , , i32) - define @vfmax_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -499,8 +478,6 @@ define @vfmax_vv_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.maxnum.nxv32f16(, , , i32) - define @vfmax_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -619,8 +596,6 @@ define @vfmax_vv_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.maxnum.nxv1f32(, , , i32) - define @vfmax_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1f32: ; CHECK: # %bb.0: @@ -641,8 +616,6 @@ define @vfmax_vv_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.maxnum.nxv2f32(, , , i32) - define @vfmax_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2f32: ; CHECK: # %bb.0: @@ -663,8 +636,6 @@ define @vfmax_vv_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.maxnum.nxv4f32(, , , i32) - define @vfmax_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4f32: ; CHECK: # %bb.0: @@ -685,8 +656,6 @@ define @vfmax_vv_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.maxnum.nxv8f32(, , , i32) - define @vfmax_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8f32: ; CHECK: # %bb.0: @@ -707,8 +676,6 @@ define @vfmax_vv_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.maxnum.nxv16f32(, , , i32) - define @vfmax_vv_nxv16f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv16f32: ; CHECK: # %bb.0: @@ -729,8 +696,6 @@ define @vfmax_vv_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.maxnum.nxv1f64(, , , i32) - define @vfmax_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1f64: ; CHECK: # %bb.0: @@ -751,8 +716,6 @@ define @vfmax_vv_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.maxnum.nxv2f64(, , , i32) - define @vfmax_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2f64: ; CHECK: # %bb.0: @@ -773,8 +736,6 @@ define @vfmax_vv_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.maxnum.nxv4f64(, , , i32) - define @vfmax_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4f64: ; CHECK: # %bb.0: @@ -795,8 +756,6 @@ define @vfmax_vv_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.maxnum.nxv8f64(, , , i32) - define @vfmax_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll index 23eb52afcc905..166faef24271b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv2f16.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv4f16.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv8f16.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv16f16.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv32f16.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv1f32.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv2f32.nxv2f32( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv4f32.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv8f32.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv16f32.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -523,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv1f64.nxv1f64( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -545,14 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -570,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv2f64.nxv2f64( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -592,14 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv4f64.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv8f64.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -712,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -734,14 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -759,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv2f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -781,14 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv2f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -806,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv4f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -828,14 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv4f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv8f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv8f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv16f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv16f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv32f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv32f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv1f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv1f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1041,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv2f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1063,14 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv2f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1088,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv4f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1110,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv4f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1135,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv8f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1157,14 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv8f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1182,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv16f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1204,14 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv16f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1229,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv1f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1251,14 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv1f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1276,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv2f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1298,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv2f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1323,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv4f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1345,14 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv4f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1370,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv8f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1392,14 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv8f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll index 86ba7c7fb7fe6..8a50a014c64cf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmerge.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -52,13 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -76,13 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -100,13 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -124,13 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll index cd9166ddbb7a9..7eec4b24e6bf1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmerge.nxv1f16.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv1f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -52,13 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2f16.nxv2f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -76,13 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv2f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -100,13 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4f16.nxv4f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -124,13 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv4f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -148,13 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8f16.nxv8f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -172,13 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv8f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -196,13 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16f16.nxv16f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -220,13 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv16f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -244,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv32f16.nxv32f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -268,13 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv32f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -292,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1f32.nxv1f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -316,13 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv1f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -340,13 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2f32.nxv2f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -364,13 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv2f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -388,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4f32.nxv4f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -412,13 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv4f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -436,13 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8f32.nxv8f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -460,13 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv8f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -484,13 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16f32.nxv16f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -508,13 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv16f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -532,13 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1f64.nxv1f64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -556,13 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv1f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -580,13 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv2f64.nxv2f64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -604,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv2f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -628,13 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4f64.nxv4f64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -652,13 +463,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv4f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -676,13 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8f64.nxv8f64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -700,13 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv8f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll index 37c0cf506a6fa..d40e39c3138f7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmin.nxv1bf16.nxv1bf16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv2bf16.nxv2bf16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv4bf16.nxv4bf16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv8bf16.nxv8bf16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv16bf16.nxv16bf16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv32bf16.nxv32bf16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv1bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmin_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv2bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmin_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv4bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmin_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv8bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmin_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv16bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmin_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv32bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmin_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll index 98ccbf03e1841..bb435c9d0114f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll @@ -44,8 +44,6 @@ define @vfmin_nxv1bf16_vf( %a, bfloat ret %v } -declare @llvm.minnum.nxv2bf16(, ) - define @vfmin_nxv2bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv2bf16_vv: ; CHECK: # %bb.0: @@ -78,8 +76,6 @@ define @vfmin_nxv2bf16_vf( %a, bfloat ret %v } -declare @llvm.minnum.nxv4bf16(, ) - define @vfmin_nxv4bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv4bf16_vv: ; CHECK: # %bb.0: @@ -112,8 +108,6 @@ define @vfmin_nxv4bf16_vf( %a, bfloat ret %v } -declare @llvm.minnum.nxv8bf16(, ) - define @vfmin_nxv8bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv8bf16_vv: ; CHECK: # %bb.0: @@ -146,8 +140,6 @@ define @vfmin_nxv8bf16_vf( %a, bfloat ret %v } -declare @llvm.minnum.nxv16bf16(, ) - define @vfmin_nxv16bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv16bf16_vv: ; CHECK: # %bb.0: @@ -180,8 +172,6 @@ define @vfmin_nxv16bf16_vf( %a, bfl ret %v } -declare @llvm.minnum.nxv32bf16(, ) - define @vfmin_nxv32bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv32bf16_vv: ; CHECK: # %bb.0: @@ -261,8 +251,6 @@ define @vfmin_nxv32bf16_vf( %a, bfl ret %v } -declare @llvm.minnum.nxv1f16(, ) - define @vfmin_nxv1f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv1f16_vv: ; ZVFH: # %bb.0: @@ -307,8 +295,6 @@ define @vfmin_nxv1f16_vf( %a, half %b) { ret %v } -declare @llvm.minnum.nxv2f16(, ) - define @vfmin_nxv2f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv2f16_vv: ; ZVFH: # %bb.0: @@ -353,8 +339,6 @@ define @vfmin_nxv2f16_vf( %a, half %b) { ret %v } -declare @llvm.minnum.nxv4f16(, ) - define @vfmin_nxv4f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv4f16_vv: ; ZVFH: # %bb.0: @@ -399,8 +383,6 @@ define @vfmin_nxv4f16_vf( %a, half %b) { ret %v } -declare @llvm.minnum.nxv8f16(, ) - define @vfmin_nxv8f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv8f16_vv: ; ZVFH: # %bb.0: @@ -445,8 +427,6 @@ define @vfmin_nxv8f16_vf( %a, half %b) { ret %v } -declare @llvm.minnum.nxv16f16(, ) - define @vfmin_nxv16f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv16f16_vv: ; ZVFH: # %bb.0: @@ -491,8 +471,6 @@ define @vfmin_nxv16f16_vf( %a, half %b) ret %v } -declare @llvm.minnum.nxv32f16(, ) - define @vfmin_nxv32f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv32f16_vv: ; ZVFH: # %bb.0: @@ -584,8 +562,6 @@ define @vfmin_nxv32f16_vf( %a, half %b) ret %v } -declare @llvm.minnum.nxv1f32(, ) - define @vfmin_nxv1f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv1f32_vv: ; CHECK: # %bb.0: @@ -608,8 +584,6 @@ define @vfmin_nxv1f32_vf( %a, float %b) ret %v } -declare @llvm.minnum.nxv2f32(, ) - define @vfmin_nxv2f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv2f32_vv: ; CHECK: # %bb.0: @@ -632,8 +606,6 @@ define @vfmin_nxv2f32_vf( %a, float %b) ret %v } -declare @llvm.minnum.nxv4f32(, ) - define @vfmin_nxv4f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv4f32_vv: ; CHECK: # %bb.0: @@ -656,8 +628,6 @@ define @vfmin_nxv4f32_vf( %a, float %b) ret %v } -declare @llvm.minnum.nxv8f32(, ) - define @vfmin_nxv8f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv8f32_vv: ; CHECK: # %bb.0: @@ -680,8 +650,6 @@ define @vfmin_nxv8f32_vf( %a, float %b) ret %v } -declare @llvm.minnum.nxv16f32(, ) - define @vfmin_nxv16f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv16f32_vv: ; CHECK: # %bb.0: @@ -704,8 +672,6 @@ define @vfmin_nxv16f32_vf( %a, float ret %v } -declare @llvm.minnum.nxv1f64(, ) - define @vfmin_nxv1f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv1f64_vv: ; CHECK: # %bb.0: @@ -728,8 +694,6 @@ define @vfmin_nxv1f64_vf( %a, double ret %v } -declare @llvm.minnum.nxv2f64(, ) - define @vfmin_nxv2f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv2f64_vv: ; CHECK: # %bb.0: @@ -752,8 +716,6 @@ define @vfmin_nxv2f64_vf( %a, double ret %v } -declare @llvm.minnum.nxv4f64(, ) - define @vfmin_nxv4f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv4f64_vv: ; CHECK: # %bb.0: @@ -776,8 +738,6 @@ define @vfmin_nxv4f64_vf( %a, double ret %v } -declare @llvm.minnum.nxv8f64(, ) - define @vfmin_nxv8f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv8f64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll index 42e8de1b56c55..5c5542619b6ef 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.minnum.nxv1bf16(, , , i32) - define @vfmin_vv_nxv1bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -44,8 +42,6 @@ define @vfmin_vv_nxv1bf16_unmasked( % ret %v } -declare @llvm.vp.minnum.nxv2bf16(, , , i32) - define @vfmin_vv_nxv2bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -76,8 +72,6 @@ define @vfmin_vv_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.minnum.nxv4bf16(, , , i32) - define @vfmin_vv_nxv4bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -108,8 +102,6 @@ define @vfmin_vv_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.minnum.nxv8bf16(, , , i32) - define @vfmin_vv_nxv8bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -140,8 +132,6 @@ define @vfmin_vv_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.minnum.nxv16bf16(, , , i32) - define @vfmin_vv_nxv16bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -172,8 +162,6 @@ define @vfmin_vv_nxv16bf16_unmasked( %v } -declare @llvm.vp.minnum.nxv32bf16(, , , i32) - define @vfmin_vv_nxv32bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -279,7 +267,6 @@ define @vfmin_vv_nxv32bf16_unmasked( @llvm.vp.minnum.nxv32bf16( %va, %vb, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.minnum.nxv1f16(, , , i32) define @vfmin_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv1f16: @@ -323,8 +310,6 @@ define @vfmin_vv_nxv1f16_unmasked( %va, < ret %v } -declare @llvm.vp.minnum.nxv2f16(, , , i32) - define @vfmin_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -367,8 +352,6 @@ define @vfmin_vv_nxv2f16_unmasked( %va, < ret %v } -declare @llvm.vp.minnum.nxv4f16(, , , i32) - define @vfmin_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -411,8 +394,6 @@ define @vfmin_vv_nxv4f16_unmasked( %va, < ret %v } -declare @llvm.vp.minnum.nxv8f16(, , , i32) - define @vfmin_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -455,8 +436,6 @@ define @vfmin_vv_nxv8f16_unmasked( %va, < ret %v } -declare @llvm.vp.minnum.nxv16f16(, , , i32) - define @vfmin_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -499,8 +478,6 @@ define @vfmin_vv_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.minnum.nxv32f16(, , , i32) - define @vfmin_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -619,8 +596,6 @@ define @vfmin_vv_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.minnum.nxv1f32(, , , i32) - define @vfmin_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1f32: ; CHECK: # %bb.0: @@ -641,8 +616,6 @@ define @vfmin_vv_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.minnum.nxv2f32(, , , i32) - define @vfmin_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2f32: ; CHECK: # %bb.0: @@ -663,8 +636,6 @@ define @vfmin_vv_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.minnum.nxv4f32(, , , i32) - define @vfmin_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4f32: ; CHECK: # %bb.0: @@ -685,8 +656,6 @@ define @vfmin_vv_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.minnum.nxv8f32(, , , i32) - define @vfmin_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8f32: ; CHECK: # %bb.0: @@ -707,8 +676,6 @@ define @vfmin_vv_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.minnum.nxv16f32(, , , i32) - define @vfmin_vv_nxv16f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv16f32: ; CHECK: # %bb.0: @@ -729,8 +696,6 @@ define @vfmin_vv_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.minnum.nxv1f64(, , , i32) - define @vfmin_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1f64: ; CHECK: # %bb.0: @@ -751,8 +716,6 @@ define @vfmin_vv_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.minnum.nxv2f64(, , , i32) - define @vfmin_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2f64: ; CHECK: # %bb.0: @@ -773,8 +736,6 @@ define @vfmin_vv_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.minnum.nxv4f64(, , , i32) - define @vfmin_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4f64: ; CHECK: # %bb.0: @@ -795,8 +756,6 @@ define @vfmin_vv_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.minnum.nxv8f64(, , , i32) - define @vfmin_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll index 32048ca928d45..d060a24e665c0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv2f16.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv4f16.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv8f16.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv16f16.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv32f16.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv1f32.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv2f32.nxv2f32( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv4f32.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv8f32.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv16f32.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -523,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv1f64.nxv1f64( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -545,14 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -570,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv2f64.nxv2f64( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -592,14 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv4f64.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv8f64.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -712,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -734,14 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -759,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv2f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -781,14 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv2f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -806,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv4f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -828,14 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv4f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv8f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv8f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv16f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv16f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv32f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv32f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv1f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1041,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv2f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1063,14 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv2f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1088,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv4f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1110,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv4f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1135,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv8f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1157,14 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv8f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1182,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv16f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1204,14 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv16f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1229,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv1f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1251,14 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1276,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv2f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1298,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv2f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1323,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv4f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1345,14 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv4f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1370,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv8f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1392,14 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv8f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll index 948d2196f2bb4..25fc46fd23699 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv1bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv1bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv2bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv2bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv4bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv4bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv8bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv8bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv16bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv16bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll index 31369b69bee15..72ed38b53d2ff 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fma.nxv1f16(, , , , i32) -declare @llvm.vp.fneg.nxv1f16(, , i32) -declare @llvm.vp.merge.nxv1f16(, , , i32) -declare @llvm.vp.select.nxv1f16(, , , i32) - define @vmfsac_vv_nxv1f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv1f16: ; CHECK: # %bb.0: @@ -123,11 +118,6 @@ define @vmfsac_vf_nxv1f16_commute_ta( %a, ret %u } -declare @llvm.vp.fma.nxv2f16(, , , , i32) -declare @llvm.vp.fneg.nxv2f16(, , i32) -declare @llvm.vp.merge.nxv2f16(, , , i32) -declare @llvm.vp.select.nxv2f16(, , , i32) - define @vmfsac_vv_nxv2f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv2f16: ; CHECK: # %bb.0: @@ -242,11 +232,6 @@ define @vmfsac_vf_nxv2f16_commute_ta( %a, ret %u } -declare @llvm.vp.fma.nxv4f16(, , , , i32) -declare @llvm.vp.fneg.nxv4f16(, , i32) -declare @llvm.vp.merge.nxv4f16(, , , i32) -declare @llvm.vp.select.nxv4f16(, , , i32) - define @vmfsac_vv_nxv4f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv4f16: ; CHECK: # %bb.0: @@ -361,11 +346,6 @@ define @vmfsac_vf_nxv4f16_commute_ta( %a, ret %u } -declare @llvm.vp.fma.nxv8f16(, , , , i32) -declare @llvm.vp.fneg.nxv8f16(, , i32) -declare @llvm.vp.merge.nxv8f16(, , , i32) -declare @llvm.vp.select.nxv8f16(, , , i32) - define @vmfsac_vv_nxv8f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv8f16: ; CHECK: # %bb.0: @@ -480,11 +460,6 @@ define @vmfsac_vf_nxv8f16_commute_ta( %a, ret %u } -declare @llvm.vp.fma.nxv16f16(, , , , i32) -declare @llvm.vp.fneg.nxv16f16(, , i32) -declare @llvm.vp.merge.nxv16f16(, , , i32) -declare @llvm.vp.select.nxv16f16(, , , i32) - define @vmfsac_vv_nxv16f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv16f16: ; CHECK: # %bb.0: @@ -599,11 +574,6 @@ define @vmfsac_vf_nxv16f16_commute_ta( ret %u } -declare @llvm.vp.fma.nxv32f16(, , , , i32) -declare @llvm.vp.fneg.nxv32f16(, , i32) -declare @llvm.vp.merge.nxv32f16(, , , i32) -declare @llvm.vp.select.nxv32f16(, , , i32) - define @vmfsac_vv_nxv32f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv32f16: ; CHECK: # %bb.0: @@ -721,11 +691,6 @@ define @vmfsac_vf_nxv32f16_commute_ta( ret %u } -declare @llvm.vp.fma.nxv1f32(, , , , i32) -declare @llvm.vp.fneg.nxv1f32(, , i32) -declare @llvm.vp.merge.nxv1f32(, , , i32) -declare @llvm.vp.select.nxv1f32(, , , i32) - define @vmfsac_vv_nxv1f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv1f32: ; CHECK: # %bb.0: @@ -840,11 +805,6 @@ define @vmfsac_vf_nxv1f32_commute_ta( % ret %u } -declare @llvm.vp.fma.nxv2f32(, , , , i32) -declare @llvm.vp.fneg.nxv2f32(, , i32) -declare @llvm.vp.merge.nxv2f32(, , , i32) -declare @llvm.vp.select.nxv2f32(, , , i32) - define @vmfsac_vv_nxv2f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv2f32: ; CHECK: # %bb.0: @@ -959,11 +919,6 @@ define @vmfsac_vf_nxv2f32_commute_ta( % ret %u } -declare @llvm.vp.fma.nxv4f32(, , , , i32) -declare @llvm.vp.fneg.nxv4f32(, , i32) -declare @llvm.vp.merge.nxv4f32(, , , i32) -declare @llvm.vp.select.nxv4f32(, , , i32) - define @vmfsac_vv_nxv4f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1078,11 +1033,6 @@ define @vmfsac_vf_nxv4f32_commute_ta( % ret %u } -declare @llvm.vp.fma.nxv8f32(, , , , i32) -declare @llvm.vp.fneg.nxv8f32(, , i32) -declare @llvm.vp.merge.nxv8f32(, , , i32) -declare @llvm.vp.select.nxv8f32(, , , i32) - define @vmfsac_vv_nxv8f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1197,11 +1147,6 @@ define @vmfsac_vf_nxv8f32_commute_ta( % ret %u } -declare @llvm.vp.fma.nxv16f32(, , , , i32) -declare @llvm.vp.fneg.nxv16f32(, , i32) -declare @llvm.vp.merge.nxv16f32(, , , i32) -declare @llvm.vp.select.nxv16f32(, , , i32) - define @vmfsac_vv_nxv16f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1319,11 +1264,6 @@ define @vmfsac_vf_nxv16f32_commute_ta( %u } -declare @llvm.vp.fma.nxv1f64(, , , , i32) -declare @llvm.vp.fneg.nxv1f64(, , i32) -declare @llvm.vp.merge.nxv1f64(, , , i32) -declare @llvm.vp.select.nxv1f64(, , , i32) - define @vmfsac_vv_nxv1f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1438,11 +1378,6 @@ define @vmfsac_vf_nxv1f64_commute_ta( ret %u } -declare @llvm.vp.fma.nxv2f64(, , , , i32) -declare @llvm.vp.fneg.nxv2f64(, , i32) -declare @llvm.vp.merge.nxv2f64(, , , i32) -declare @llvm.vp.select.nxv2f64(, , , i32) - define @vmfsac_vv_nxv2f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1557,11 +1492,6 @@ define @vmfsac_vf_nxv2f64_commute_ta( ret %u } -declare @llvm.vp.fma.nxv4f64(, , , , i32) -declare @llvm.vp.fneg.nxv4f64(, , i32) -declare @llvm.vp.merge.nxv4f64(, , , i32) -declare @llvm.vp.select.nxv4f64(, , , i32) - define @vmfsac_vv_nxv4f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1676,11 +1606,6 @@ define @vmfsac_vf_nxv4f64_commute_ta( ret %u } -declare @llvm.vp.fma.nxv8f64(, , , , i32) -declare @llvm.vp.fneg.nxv8f64(, , i32) -declare @llvm.vp.merge.nxv8f64(, , , i32) -declare @llvm.vp.select.nxv8f64(, , , i32) - define @vmfsac_vv_nxv8f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll index 319c945435402..8cac1b32cd08f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmsac.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv1f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv1f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv2f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv2f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv4f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv4f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv8f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv8f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv16f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv16f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv1f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv1f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -904,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv2f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -928,13 +688,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv2f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -954,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv4f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -978,13 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv4f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1004,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv8f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1028,13 +762,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv8f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1054,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv1f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1078,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv1f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv2f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1128,13 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv2f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv4f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1178,13 +873,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv4f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll index 6838f37339e98..0902d7034b46a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv1bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv1bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv2bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv2bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv4bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv4bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv8bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv8bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv16bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv16bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll index 229c06999388c..5f9dc1ae273bf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll @@ -11,8 +11,6 @@ ; This tests a mix of vfmsac and vfmsub by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare @llvm.experimental.constrained.fma.nxv1f16(, , , metadata, metadata) - define @vfmsub_vv_nxv1f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmsub_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -67,8 +65,6 @@ define @vfmsub_vf_nxv1f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f16(, , , metadata, metadata) - define @vfmsub_vv_nxv2f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmsub_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -123,8 +119,6 @@ define @vfmsub_vf_nxv2f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f16(, , , metadata, metadata) - define @vfmsub_vv_nxv4f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmsub_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -179,8 +173,6 @@ define @vfmsub_vf_nxv4f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f16(, , , metadata, metadata) - define @vfmsub_vv_nxv8f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmsub_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -235,8 +227,6 @@ define @vfmsub_vf_nxv8f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv16f16(, , , metadata, metadata) - define @vfmsub_vv_nxv16f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmsub_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -291,8 +281,6 @@ define @vfmsub_vf_nxv16f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv32f16(, , , metadata, metadata) - define @vfmsub_vv_nxv32f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmsub_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -469,8 +457,6 @@ define @vfmsub_vf_nxv32f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv1f32(, , , metadata, metadata) - define @vfmsub_vv_nxv1f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -495,8 +481,6 @@ define @vfmsub_vf_nxv1f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f32(, , , metadata, metadata) - define @vfmsub_vv_nxv2f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -521,8 +505,6 @@ define @vfmsub_vf_nxv2f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f32(, , , metadata, metadata) - define @vfmsub_vv_nxv4f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -547,8 +529,6 @@ define @vfmsub_vf_nxv4f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f32(, , , metadata, metadata) - define @vfmsub_vv_nxv8f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -573,8 +553,6 @@ define @vfmsub_vf_nxv8f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv16f32(, , , metadata, metadata) - define @vfmsub_vv_nxv16f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -600,8 +578,6 @@ define @vfmsub_vf_nxv16f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv1f64(, , , metadata, metadata) - define @vfmsub_vv_nxv1f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -626,8 +602,6 @@ define @vfmsub_vf_nxv1f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f64(, , , metadata, metadata) - define @vfmsub_vv_nxv2f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -652,8 +626,6 @@ define @vfmsub_vf_nxv2f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f64(, , , metadata, metadata) - define @vfmsub_vv_nxv4f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -678,8 +650,6 @@ define @vfmsub_vf_nxv4f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f64(, , , metadata, metadata) - define @vfmsub_vv_nxv8f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll index 1f99d0e3a5b4b..4a4079925cb05 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll @@ -11,8 +11,6 @@ ; This tests a mix of vfmsac and vfmsub by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare @llvm.fma.v1f16(, , ) - define @vfmsub_vv_nxv1f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv1f16: ; CHECK: # %bb.0: @@ -37,8 +35,6 @@ define @vfmsub_vf_nxv1f16( %va, %vd } -declare @llvm.fma.v2f16(, , ) - define @vfmsub_vv_nxv2f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv2f16: ; CHECK: # %bb.0: @@ -63,8 +59,6 @@ define @vfmsub_vf_nxv2f16( %va, %vd } -declare @llvm.fma.v4f16(, , ) - define @vfmsub_vv_nxv4f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv4f16: ; CHECK: # %bb.0: @@ -89,8 +83,6 @@ define @vfmsub_vf_nxv4f16( %va, %vd } -declare @llvm.fma.v8f16(, , ) - define @vfmsub_vv_nxv8f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv8f16: ; CHECK: # %bb.0: @@ -115,8 +107,6 @@ define @vfmsub_vf_nxv8f16( %va, %vd } -declare @llvm.fma.v16f16(, , ) - define @vfmsub_vv_nxv16f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv16f16: ; CHECK: # %bb.0: @@ -141,8 +131,6 @@ define @vfmsub_vf_nxv16f16( %va, %vd } -declare @llvm.fma.v32f16(, , ) - define @vfmsub_vv_nxv32f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv32f16: ; CHECK: # %bb.0: @@ -168,8 +156,6 @@ define @vfmsub_vf_nxv32f16( %va, %vd } -declare @llvm.fma.v1f32(, , ) - define @vfmsub_vv_nxv1f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -194,8 +180,6 @@ define @vfmsub_vf_nxv1f32( %va, %vd } -declare @llvm.fma.v2f32(, , ) - define @vfmsub_vv_nxv2f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -220,8 +204,6 @@ define @vfmsub_vf_nxv2f32( %va, %vd } -declare @llvm.fma.v4f32(, , ) - define @vfmsub_vv_nxv4f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -246,8 +228,6 @@ define @vfmsub_vf_nxv4f32( %va, %vd } -declare @llvm.fma.v8f32(, , ) - define @vfmsub_vv_nxv8f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -272,8 +252,6 @@ define @vfmsub_vf_nxv8f32( %va, %vd } -declare @llvm.fma.v16f32(, , ) - define @vfmsub_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -299,8 +277,6 @@ define @vfmsub_vf_nxv16f32( %va, %vd } -declare @llvm.fma.v1f64(, , ) - define @vfmsub_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -325,8 +301,6 @@ define @vfmsub_vf_nxv1f64( %va, %vd } -declare @llvm.fma.v2f64(, , ) - define @vfmsub_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -351,8 +325,6 @@ define @vfmsub_vf_nxv2f64( %va, %vd } -declare @llvm.fma.v4f64(, , ) - define @vfmsub_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -377,8 +349,6 @@ define @vfmsub_vf_nxv4f64( %va, %vd } -declare @llvm.fma.v8f64(, , ) - define @vfmsub_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll index 23b4479fa8c94..7b9e6c4f9c02d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmsub.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv1f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv1f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv2f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv2f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv4f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv4f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv8f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv8f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv16f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv16f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv1f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv1f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -904,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv2f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -928,13 +688,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv2f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -954,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv4f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -978,13 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv4f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1004,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv8f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1028,13 +762,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv8f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1054,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv1f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1078,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv1f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv2f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1128,13 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv2f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv4f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1178,13 +873,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv4f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll index 44bce723c39d4..609ef8fb149b0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmul.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv32bf16.nxv32bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -305,12 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv1bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -329,13 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -355,12 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv2bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -379,13 +282,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -405,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv4bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -429,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -455,12 +338,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv8bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -479,13 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -505,12 +375,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv16bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -529,13 +393,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -555,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv32bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -579,13 +430,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll index 990d3d4e227df..7640cd565e448 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll @@ -267,7 +267,6 @@ define @vfmul_vf_nxv32bf16( %va, bf ret %vc } -declare @llvm.experimental.constrained.fmul.nxv1f16(, , metadata, metadata) define @vfmul_vv_nxv1f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfmul_vv_nxv1f16: ; ZVFH: # %bb.0: # %entry @@ -315,7 +314,6 @@ define @vfmul_vf_nxv1f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fmul.nxv2f16(, , metadata, metadata) define @vfmul_vv_nxv2f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfmul_vv_nxv2f16: ; ZVFH: # %bb.0: # %entry @@ -363,7 +361,6 @@ define @vfmul_vf_nxv2f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fmul.nxv4f16(, , metadata, metadata) define @vfmul_vv_nxv4f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfmul_vv_nxv4f16: ; ZVFH: # %bb.0: # %entry @@ -411,7 +408,6 @@ define @vfmul_vf_nxv4f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fmul.nxv8f16(, , metadata, metadata) define @vfmul_vv_nxv8f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfmul_vv_nxv8f16: ; ZVFH: # %bb.0: # %entry @@ -459,7 +455,6 @@ define @vfmul_vf_nxv8f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fmul.nxv16f16(, , metadata, metadata) define @vfmul_vv_nxv16f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfmul_vv_nxv16f16: ; ZVFH: # %bb.0: # %entry @@ -507,7 +502,6 @@ define @vfmul_vf_nxv16f16( %va, half %b ret %vc } -declare @llvm.experimental.constrained.fmul.nxv32f16(, , metadata, metadata) define @vfmul_vv_nxv32f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfmul_vv_nxv32f16: ; ZVFH: # %bb.0: # %entry @@ -600,7 +594,6 @@ define @vfmul_vf_nxv32f16( %va, half %b ret %vc } -declare @llvm.experimental.constrained.fmul.nxv1f32(, , metadata, metadata) define @vfmul_vv_nxv1f32( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -624,7 +617,6 @@ define @vfmul_vf_nxv1f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fmul.nxv2f32(, , metadata, metadata) define @vfmul_vv_nxv2f32( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -648,7 +640,6 @@ define @vfmul_vf_nxv2f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fmul.nxv4f32(, , metadata, metadata) define @vfmul_vv_nxv4f32( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -672,7 +663,6 @@ define @vfmul_vf_nxv4f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fmul.nxv8f32(, , metadata, metadata) define @vfmul_vv_nxv8f32( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -696,7 +686,6 @@ define @vfmul_vf_nxv8f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fmul.nxv16f32(, , metadata, metadata) define @vfmul_vv_nxv16f32( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -720,7 +709,6 @@ define @vfmul_vf_nxv16f32( %va, float ret %vc } -declare @llvm.experimental.constrained.fmul.nxv1f64(, , metadata, metadata) define @vfmul_vv_nxv1f64( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -744,7 +732,6 @@ define @vfmul_vf_nxv1f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fmul.nxv2f64(, , metadata, metadata) define @vfmul_vv_nxv2f64( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -768,7 +755,6 @@ define @vfmul_vf_nxv2f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fmul.nxv4f64(, , metadata, metadata) define @vfmul_vv_nxv4f64( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -792,7 +778,6 @@ define @vfmul_vf_nxv4f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fmul.nxv8f64(, , metadata, metadata) define @vfmul_vv_nxv8f64( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll index 5c8e499d2f5e1..eb77b4b4dbac3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.fmul.nxv1f16(, , , i32) - define @vfmul_vv_nxv1f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -102,8 +100,6 @@ define @vfmul_vf_nxv1f16_unmasked( %va, h ret %v } -declare @llvm.vp.fmul.nxv2f16(, , , i32) - define @vfmul_vv_nxv2f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -196,8 +192,6 @@ define @vfmul_vf_nxv2f16_unmasked( %va, h ret %v } -declare @llvm.vp.fmul.nxv4f16(, , , i32) - define @vfmul_vv_nxv4f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -290,8 +284,6 @@ define @vfmul_vf_nxv4f16_unmasked( %va, h ret %v } -declare @llvm.vp.fmul.nxv8f16(, , , i32) - define @vfmul_vv_nxv8f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -384,8 +376,6 @@ define @vfmul_vf_nxv8f16_unmasked( %va, h ret %v } -declare @llvm.vp.fmul.nxv16f16(, , , i32) - define @vfmul_vv_nxv16f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -478,8 +468,6 @@ define @vfmul_vf_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.fmul.nxv32f16(, , , i32) - define @vfmul_vv_nxv32f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -735,8 +723,6 @@ define @vfmul_vf_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.fmul.nxv1f32(, , , i32) - define @vfmul_vv_nxv1f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv1f32: ; CHECK: # %bb.0: @@ -781,8 +767,6 @@ define @vfmul_vf_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.fmul.nxv2f32(, , , i32) - define @vfmul_vv_nxv2f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv2f32: ; CHECK: # %bb.0: @@ -827,8 +811,6 @@ define @vfmul_vf_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.fmul.nxv4f32(, , , i32) - define @vfmul_vv_nxv4f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv4f32: ; CHECK: # %bb.0: @@ -873,8 +855,6 @@ define @vfmul_vf_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.fmul.nxv8f32(, , , i32) - define @vfmul_vv_nxv8f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv8f32: ; CHECK: # %bb.0: @@ -919,8 +899,6 @@ define @vfmul_vf_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.fmul.nxv16f32(, , , i32) - define @vfmul_vv_nxv16f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv16f32: ; CHECK: # %bb.0: @@ -965,8 +943,6 @@ define @vfmul_vf_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.fmul.nxv1f64(, , , i32) - define @vfmul_vv_nxv1f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1011,8 +987,6 @@ define @vfmul_vf_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.fmul.nxv2f64(, , , i32) - define @vfmul_vv_nxv2f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1057,8 +1031,6 @@ define @vfmul_vf_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.fmul.nxv4f64(, , , i32) - define @vfmul_vv_nxv4f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1103,8 +1075,6 @@ define @vfmul_vf_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.fmul.nxv7f64(, , , i32) - define @vfmul_vv_nxv7f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv7f64: ; CHECK: # %bb.0: @@ -1115,8 +1085,6 @@ define @vfmul_vv_nxv7f64( %va, %v } -declare @llvm.vp.fmul.nxv8f64(, , , i32) - define @vfmul_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll index 86c0ee0c629f8..8e8f2de3bb5eb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmul.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv32f16.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -305,12 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -329,13 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -355,12 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -379,13 +282,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -405,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -429,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -455,12 +338,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -479,13 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -505,12 +375,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv16f32.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -529,13 +393,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -556,12 +413,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -580,13 +431,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -606,12 +450,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -630,13 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -656,12 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -680,13 +505,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -706,12 +524,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv8f64.nxv8f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -730,13 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -757,12 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -781,13 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -807,12 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -831,13 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -857,12 +636,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -881,13 +654,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -907,12 +673,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -931,13 +691,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -957,12 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -981,13 +728,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -1007,12 +747,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv32f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -1031,13 +765,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv32f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -1057,12 +784,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1081,13 +802,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1107,12 +821,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1131,13 +839,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1157,12 +858,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1181,13 +876,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1207,12 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1231,13 +913,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1257,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv16f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1281,13 +950,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv16f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1307,12 +969,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv1f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1331,13 +987,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1357,12 +1006,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv2f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1381,13 +1024,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv2f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1407,12 +1043,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv4f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1431,13 +1061,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv4f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1457,12 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv8f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1481,13 +1098,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv8f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll index 871eb844ec2d0..03de2c97e685c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fmuladd.nxv1f16(, , , , i32) - define @vfma_vv_nxv1f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f16: ; CHECK: # %bb.0: @@ -75,8 +73,6 @@ define @vfma_vf_nxv1f16_unmasked_commute( ret %v } -declare @llvm.vp.fmuladd.nxv2f16(, , , , i32) - define @vfma_vv_nxv2f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f16: ; CHECK: # %bb.0: @@ -146,8 +142,6 @@ define @vfma_vf_nxv2f16_unmasked_commute( ret %v } -declare @llvm.vp.fmuladd.nxv4f16(, , , , i32) - define @vfma_vv_nxv4f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f16: ; CHECK: # %bb.0: @@ -217,8 +211,6 @@ define @vfma_vf_nxv4f16_unmasked_commute( ret %v } -declare @llvm.vp.fmuladd.nxv8f16(, , , , i32) - define @vfma_vv_nxv8f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8f16: ; CHECK: # %bb.0: @@ -288,8 +280,6 @@ define @vfma_vf_nxv8f16_unmasked_commute( ret %v } -declare @llvm.vp.fmuladd.nxv16f16(, , , , i32) - define @vfma_vv_nxv16f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv16f16: ; CHECK: # %bb.0: @@ -359,8 +349,6 @@ define @vfma_vf_nxv16f16_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv32f16(, , , , i32) - define @vfma_vv_nxv32f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv32f16: ; CHECK: # %bb.0: @@ -432,8 +420,6 @@ define @vfma_vf_nxv32f16_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv1f32(, , , , i32) - define @vfma_vv_nxv1f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f32: ; CHECK: # %bb.0: @@ -503,8 +489,6 @@ define @vfma_vf_nxv1f32_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv2f32(, , , , i32) - define @vfma_vv_nxv2f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f32: ; CHECK: # %bb.0: @@ -574,8 +558,6 @@ define @vfma_vf_nxv2f32_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv4f32(, , , , i32) - define @vfma_vv_nxv4f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f32: ; CHECK: # %bb.0: @@ -645,8 +627,6 @@ define @vfma_vf_nxv4f32_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv8f32(, , , , i32) - define @vfma_vv_nxv8f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8f32: ; CHECK: # %bb.0: @@ -716,8 +696,6 @@ define @vfma_vf_nxv8f32_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv16f32(, , , , i32) - define @vfma_vv_nxv16f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv16f32: ; CHECK: # %bb.0: @@ -789,8 +767,6 @@ define @vfma_vf_nxv16f32_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv1f64(, , , , i32) - define @vfma_vv_nxv1f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f64: ; CHECK: # %bb.0: @@ -860,8 +836,6 @@ define @vfma_vf_nxv1f64_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv2f64(, , , , i32) - define @vfma_vv_nxv2f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f64: ; CHECK: # %bb.0: @@ -931,8 +905,6 @@ define @vfma_vf_nxv2f64_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv4f64(, , , , i32) - define @vfma_vv_nxv4f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1002,8 +974,6 @@ define @vfma_vf_nxv4f64_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv7f64(, , , , i32) - define @vfma_vv_nxv7f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv7f64: ; CHECK: # %bb.0: @@ -1027,8 +997,6 @@ define @vfma_vv_nxv7f64_unmasked( %va ret %v } -declare @llvm.vp.fmuladd.nxv8f64(, , , , i32) - define @vfma_vv_nxv8f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8f64: ; CHECK: # %bb.0: @@ -1100,8 +1068,6 @@ define @vfma_vf_nxv8f64_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv16f64(, , , , i32) - define @vfma_vv_nxv16f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv16f64: ; CHECK: # %bb.0: @@ -1297,8 +1263,6 @@ define @vfma_vv_nxv16f64_unmasked( ret %v } -declare @llvm.vp.fneg.nxv1f16(, , i32) - define @vfmsub_vv_nxv1f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv1f16: ; CHECK: # %bb.0: @@ -1688,8 +1652,6 @@ define @vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv2f16(, , i32) - define @vfmsub_vv_nxv2f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv2f16: ; CHECK: # %bb.0: @@ -2079,8 +2041,6 @@ define @vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv4f16(, , i32) - define @vfmsub_vv_nxv4f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv4f16: ; CHECK: # %bb.0: @@ -2470,8 +2430,6 @@ define @vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv8f16(, , i32) - define @vfmsub_vv_nxv8f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv8f16: ; CHECK: # %bb.0: @@ -2861,8 +2819,6 @@ define @vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv16f16(, , i32) - define @vfmsub_vv_nxv16f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv16f16: ; CHECK: # %bb.0: @@ -3252,8 +3208,6 @@ define @vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv32f16(, , i32) - define @vfmsub_vv_nxv32f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv32f16: ; CHECK: # %bb.0: @@ -3653,8 +3607,6 @@ define @vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv1f32(, , i32) - define @vfmsub_vv_nxv1f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -4044,8 +3996,6 @@ define @vfnmsub_vf_nxv1f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv2f32(, , i32) - define @vfmsub_vv_nxv2f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -4435,8 +4385,6 @@ define @vfnmsub_vf_nxv2f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv4f32(, , i32) - define @vfmsub_vv_nxv4f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -4826,8 +4774,6 @@ define @vfnmsub_vf_nxv4f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv8f32(, , i32) - define @vfmsub_vv_nxv8f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -5217,8 +5163,6 @@ define @vfnmsub_vf_nxv8f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv16f32(, , i32) - define @vfmsub_vv_nxv16f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -5618,8 +5562,6 @@ define @vfnmsub_vf_nxv16f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv1f64(, , i32) - define @vfmsub_vv_nxv1f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -6009,8 +5951,6 @@ define @vfnmsub_vf_nxv1f64_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv2f64(, , i32) - define @vfmsub_vv_nxv2f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -6400,8 +6340,6 @@ define @vfnmsub_vf_nxv2f64_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv4f64(, , i32) - define @vfmsub_vv_nxv4f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -6791,8 +6729,6 @@ define @vfnmsub_vf_nxv4f64_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv8f64(, , i32) - define @vfmsub_vv_nxv8f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll index 7a63a4710c534..dd1a28d4c59ea 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+experimental-zvfbfa -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv32 -mattr=+d,+v,+experimental-zvfbfa -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s -declare bfloat @llvm.riscv.vfmv.f.s.nxv1bf16() - define bfloat @intrinsic_vfmv.f.s_s_nxv1bf16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -15,8 +13,6 @@ entry: ret bfloat %a } -declare bfloat @llvm.riscv.vfmv.f.s.nxv2bf16() - define bfloat @intrinsic_vfmv.f.s_s_nxv2bf16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -28,8 +24,6 @@ entry: ret bfloat %a } -declare bfloat @llvm.riscv.vfmv.f.s.nxv4bf16() - define bfloat @intrinsic_vfmv.f.s_s_nxv4bf16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -41,8 +35,6 @@ entry: ret bfloat %a } -declare bfloat @llvm.riscv.vfmv.f.s.nxv8bf16() - define bfloat @intrinsic_vfmv.f.s_s_nxv8bf16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -54,8 +46,6 @@ entry: ret bfloat %a } -declare bfloat @llvm.riscv.vfmv.f.s.nxv16bf16() - define bfloat @intrinsic_vfmv.f.s_s_nxv16bf16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -67,8 +57,6 @@ entry: ret bfloat %a } -declare bfloat @llvm.riscv.vfmv.f.s.nxv32bf16() - define bfloat @intrinsic_vfmv.f.s_s_nxv32bf16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll index a810809fca515..564160d11ddeb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vfmv.s.f.nxv1bf16(, bfloat, iXLen) - define @intrinsic_vfmv.s.f_f_nxv1bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -17,8 +15,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv2bf16(, bfloat, iXLen) - define @intrinsic_vfmv.s.f_f_nxv2bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -30,8 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv4bf16(, bfloat, iXLen) - define @intrinsic_vfmv.s.f_f_nxv4bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -43,8 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv8bf16(, bfloat, iXLen) - define @intrinsic_vfmv.s.f_f_nxv8bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -56,8 +48,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv16bf16(, bfloat, iXLen) - define @intrinsic_vfmv.s.f_f_nxv16bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -69,8 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv32bf16(, bfloat, iXLen) - define @intrinsic_vfmv.s.f_f_nxv32bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll index f3293ddc83ef9..ce856da16ccb7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmv.v.f.nxv1bf16( - , - bfloat, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv1bf16(bfloat %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv2bf16( - , - bfloat, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv2bf16(bfloat %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv4bf16( - , - bfloat, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv4bf16(bfloat %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv8bf16( - , - bfloat, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv8bf16(bfloat %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv16bf16( - , - bfloat, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv16bf16(bfloat %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv32bf16( - , - bfloat, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv32bf16(bfloat %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll index 3779b0ab18d8a..0819ec66f8f78 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+zvfh -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv32 -mattr=+d,+v,+zvfh -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s -declare half @llvm.riscv.vfmv.f.s.nxv1f16() - define half @intrinsic_vfmv.f.s_s_nxv1f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -15,8 +13,6 @@ entry: ret half %a } -declare half @llvm.riscv.vfmv.f.s.nxv2f16() - define half @intrinsic_vfmv.f.s_s_nxv2f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -28,8 +24,6 @@ entry: ret half %a } -declare half @llvm.riscv.vfmv.f.s.nxv4f16() - define half @intrinsic_vfmv.f.s_s_nxv4f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -41,8 +35,6 @@ entry: ret half %a } -declare half @llvm.riscv.vfmv.f.s.nxv8f16() - define half @intrinsic_vfmv.f.s_s_nxv8f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -54,8 +46,6 @@ entry: ret half %a } -declare half @llvm.riscv.vfmv.f.s.nxv16f16() - define half @intrinsic_vfmv.f.s_s_nxv16f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -67,8 +57,6 @@ entry: ret half %a } -declare half @llvm.riscv.vfmv.f.s.nxv32f16() - define half @intrinsic_vfmv.f.s_s_nxv32f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -80,8 +68,6 @@ entry: ret half %a } -declare float @llvm.riscv.vfmv.f.s.nxv1f32() - define float @intrinsic_vfmv.f.s_s_nxv1f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -93,8 +79,6 @@ entry: ret float %a } -declare float @llvm.riscv.vfmv.f.s.nxv2f32() - define float @intrinsic_vfmv.f.s_s_nxv2f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -106,8 +90,6 @@ entry: ret float %a } -declare float @llvm.riscv.vfmv.f.s.nxv4f32() - define float @intrinsic_vfmv.f.s_s_nxv4f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -119,8 +101,6 @@ entry: ret float %a } -declare float @llvm.riscv.vfmv.f.s.nxv8f32() - define float @intrinsic_vfmv.f.s_s_nxv8f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -132,8 +112,6 @@ entry: ret float %a } -declare float @llvm.riscv.vfmv.f.s.nxv16f32() - define float @intrinsic_vfmv.f.s_s_nxv16f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -145,8 +123,6 @@ entry: ret float %a } -declare double @llvm.riscv.vfmv.f.s.nxv1f64() - define double @intrinsic_vfmv.f.s_s_nxv1f64( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -158,8 +134,6 @@ entry: ret double %a } -declare double @llvm.riscv.vfmv.f.s.nxv2f64() - define double @intrinsic_vfmv.f.s_s_nxv2f64( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -171,8 +145,6 @@ entry: ret double %a } -declare double @llvm.riscv.vfmv.f.s.nxv4f64() - define double @intrinsic_vfmv.f.s_s_nxv4f64( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -184,8 +156,6 @@ entry: ret double %a } -declare double @llvm.riscv.vfmv.f.s.nxv8f64() - define double @intrinsic_vfmv.f.s_s_nxv8f64( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll index 912dfe499016f..0e8ecc251d48f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vfmv.s.f.nxv1f16(, half, iXLen) - define @intrinsic_vfmv.s.f_f_nxv1f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -17,8 +15,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv2f16(, half, iXLen) - define @intrinsic_vfmv.s.f_f_nxv2f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -30,8 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv4f16(, half, iXLen) - define @intrinsic_vfmv.s.f_f_nxv4f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -43,8 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv8f16(, half, iXLen) - define @intrinsic_vfmv.s.f_f_nxv8f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -56,8 +48,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv16f16(, half, iXLen) - define @intrinsic_vfmv.s.f_f_nxv16f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -69,8 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv32f16(, half, iXLen) - define @intrinsic_vfmv.s.f_f_nxv32f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -82,8 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv1f32(, float, iXLen) - define @intrinsic_vfmv.s.f_f_nxv1f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -95,8 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv2f32(, float, iXLen) - define @intrinsic_vfmv.s.f_f_nxv2f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -108,8 +92,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv4f32(, float, iXLen) - define @intrinsic_vfmv.s.f_f_nxv4f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -121,8 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv8f32(, float, iXLen) - define @intrinsic_vfmv.s.f_f_nxv8f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -134,8 +114,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv16f32(, float, iXLen) - define @intrinsic_vfmv.s.f_f_nxv16f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -147,8 +125,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv1f64(, double, iXLen) - define @intrinsic_vfmv.s.f_f_nxv1f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -160,8 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv2f64(, double, iXLen) - define @intrinsic_vfmv.s.f_f_nxv2f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -173,8 +147,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv4f64(, double, iXLen) - define @intrinsic_vfmv.s.f_f_nxv4f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -186,8 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv8f64(, double, iXLen) - define @intrinsic_vfmv.s.f_f_nxv8f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll index 83d87a7a74b96..f7da7ac6db65b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmv.v.f.nxv1f16( - , - half, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv1f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv2f16( - , - half, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv2f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv4f16( - , - half, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv4f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv8f16( - , - half, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv8f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv16f16( - , - half, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv16f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv32f16( - , - half, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv32f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv1f32( - , - float, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv1f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -144,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv2f32( - , - float, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv2f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -164,11 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv4f32( - , - float, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv4f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -184,11 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv8f32( - , - float, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv8f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -204,11 +154,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv16f32( - , - float, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv16f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -224,11 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv1f64( - , - double, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv1f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -244,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv2f64( - , - double, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv2f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -264,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv4f64( - , - double, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv4f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -284,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv8f64( - , - double, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv8f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll index b1fd225d37aa9..9f74f5570e434 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll @@ -7,10 +7,6 @@ ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( - , - , - iXLen, iXLen); define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32: @@ -30,12 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -54,11 +44,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -77,12 +62,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -101,11 +80,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -124,12 +98,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -148,11 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -171,12 +134,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -195,11 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -218,12 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -242,11 +188,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -265,12 +206,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -289,11 +224,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -312,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -336,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -359,12 +278,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -383,11 +296,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -406,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll index 9d74d6b85772a..6f793f371292b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -27,12 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -51,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -121,12 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -145,11 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -168,12 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -192,11 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -215,12 +166,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -239,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -262,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -286,11 +220,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -309,12 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -333,11 +256,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -356,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -380,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -403,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll index 19740af4ebe0a..4a1fbd05028b4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -27,12 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -51,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -121,12 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -145,11 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -168,12 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -192,11 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -215,12 +166,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -239,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -262,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -286,11 +220,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -309,12 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -333,11 +256,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -356,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -380,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -403,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll index 7d587fd55cd83..ea2e5ffe4cba0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv1bf16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1bf16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1bf16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1bf16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv2bf16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2bf16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2bf16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2bf16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv4bf16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4bf16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4bf16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4bf16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv8bf16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8bf16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8bf16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8bf16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv16bf16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16bf16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv16bf16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16bf16_nxv16f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll index f0c2509371df8..b6063b0ac9759 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -268,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -289,13 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -312,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -333,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -356,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -377,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll index ee9e3d1b9f630..1b14b87114a86 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll index a71af7fe9e64a..4920539bb2ac1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -268,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -289,13 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -312,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -333,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -377,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -400,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -421,13 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -444,11 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -465,13 +340,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -488,11 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -509,13 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -532,11 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -553,13 +404,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -576,11 +420,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -597,13 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -620,11 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -641,13 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll index 521f7274dc5c9..fac3724dfdc92 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll index c92909eb587e9..c5dd8af11b79c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -268,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -289,13 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -312,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -333,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -377,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -400,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -421,13 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -444,11 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -465,13 +340,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -488,11 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -509,13 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -532,11 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -553,13 +404,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -576,11 +420,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -597,13 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -620,11 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -641,13 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll index ab9ebade287e6..a8e437a3e27d7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -27,12 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -51,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -121,12 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -145,11 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -168,12 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -192,11 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -215,12 +166,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -239,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -262,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll index ee51b752b85f4..31adaa34d8588 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -27,12 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -121,12 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,11 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -168,12 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,11 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -215,12 +166,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -262,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -286,11 +220,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -309,12 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -333,11 +256,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -380,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -403,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -427,11 +328,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -450,12 +346,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -474,11 +364,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -497,12 +382,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -521,11 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -544,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -568,11 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -591,12 +454,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -615,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -638,12 +490,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -662,11 +508,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -685,12 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll index 61c6803ce12bd..c9acf9c524537 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -27,12 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -51,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -121,12 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -145,11 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -168,12 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -192,11 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -215,12 +166,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -239,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -262,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll index 1035ec9f643d5..92cbf536b23da 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -27,12 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -121,12 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,11 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -168,12 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,11 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -215,12 +166,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -262,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -286,11 +220,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -309,12 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -333,11 +256,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -380,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -403,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -427,11 +328,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -450,12 +346,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -474,11 +364,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -497,12 +382,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -521,11 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -544,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -568,11 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -591,12 +454,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -615,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -638,12 +490,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -662,11 +508,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -685,12 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvtbf16-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvtbf16-f-f.ll index 5eae28aeac882..42669083a33ac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvtbf16-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvtbf16-f-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvtbf16.f.f.w.nxv1bf16.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvtbf16_f.f.w_nxv1bf16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_f.f.w_nxv1bf16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -25,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv1bf16.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvtbf16_mask_f.f.w_nxv1bf16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_mask_f.f.w_nxv1bf16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -47,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.nxv2bf16.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvtbf16_f.f.w_nxv2bf16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_f.f.w_nxv2bf16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -68,12 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv2bf16.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvtbf16_mask_f.f.w_nxv2bf16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_mask_f.f.w_nxv2bf16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -90,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.nxv4bf16.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvtbf16_f.f.w_nxv4bf16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_f.f.w_nxv4bf16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -111,12 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv4bf16.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvtbf16_mask_f.f.w_nxv4bf16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_mask_f.f.w_nxv4bf16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -133,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.nxv8bf16.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvtbf16_f.f.w_nxv8bf16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_f.f.w_nxv8bf16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -154,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv8bf16.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvtbf16_mask_f.f.w_nxv8bf16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_mask_f.f.w_nxv8bf16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -176,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.nxv16bf16.nxv16f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvtbf16_f.f.w_nxv16bf16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_f.f.w_nxv16bf16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -197,12 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv16bf16.nxv16f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvtbf16_mask_f.f.w_nxv16bf16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_mask_f.f.w_nxv16bf16_nxv16f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll index 9bd24c44b1b90..96fbe3f6ff025 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll @@ -300,8 +300,6 @@ define @vfneg_vv_nxv32bf16_unmasked( %v } -declare @llvm.vp.fneg.nxv1f16(, , i32) - define @vfneg_vv_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -350,8 +348,6 @@ define @vfneg_vv_nxv1f16_unmasked( %va, i ret %v } -declare @llvm.vp.fneg.nxv2f16(, , i32) - define @vfneg_vv_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -400,8 +396,6 @@ define @vfneg_vv_nxv2f16_unmasked( %va, i ret %v } -declare @llvm.vp.fneg.nxv4f16(, , i32) - define @vfneg_vv_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -450,8 +444,6 @@ define @vfneg_vv_nxv4f16_unmasked( %va, i ret %v } -declare @llvm.vp.fneg.nxv8f16(, , i32) - define @vfneg_vv_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -500,8 +492,6 @@ define @vfneg_vv_nxv8f16_unmasked( %va, i ret %v } -declare @llvm.vp.fneg.nxv16f16(, , i32) - define @vfneg_vv_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -550,8 +540,6 @@ define @vfneg_vv_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.fneg.nxv32f16(, , i32) - define @vfneg_vv_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -600,8 +588,6 @@ define @vfneg_vv_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.fneg.nxv1f32(, , i32) - define @vfneg_vv_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv1f32: ; CHECK: # %bb.0: @@ -622,8 +608,6 @@ define @vfneg_vv_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.fneg.nxv2f32(, , i32) - define @vfneg_vv_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv2f32: ; CHECK: # %bb.0: @@ -644,8 +628,6 @@ define @vfneg_vv_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.fneg.nxv4f32(, , i32) - define @vfneg_vv_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv4f32: ; CHECK: # %bb.0: @@ -666,8 +648,6 @@ define @vfneg_vv_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.fneg.nxv8f32(, , i32) - define @vfneg_vv_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv8f32: ; CHECK: # %bb.0: @@ -688,8 +668,6 @@ define @vfneg_vv_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.fneg.nxv16f32(, , i32) - define @vfneg_vv_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv16f32: ; CHECK: # %bb.0: @@ -710,8 +688,6 @@ define @vfneg_vv_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.fneg.nxv1f64(, , i32) - define @vfneg_vv_nxv1f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv1f64: ; CHECK: # %bb.0: @@ -732,8 +708,6 @@ define @vfneg_vv_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.fneg.nxv2f64(, , i32) - define @vfneg_vv_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv2f64: ; CHECK: # %bb.0: @@ -754,8 +728,6 @@ define @vfneg_vv_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.fneg.nxv4f64(, , i32) - define @vfneg_vv_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv4f64: ; CHECK: # %bb.0: @@ -776,8 +748,6 @@ define @vfneg_vv_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.fneg.nxv7f64(, , i32) - define @vfneg_vv_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv7f64: ; CHECK: # %bb.0: @@ -798,8 +768,6 @@ define @vfneg_vv_nxv7f64_unmasked( %v ret %v } -declare @llvm.vp.fneg.nxv8f64(, , i32) - define @vfneg_vv_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv8f64: ; CHECK: # %bb.0: @@ -821,7 +789,6 @@ define @vfneg_vv_nxv8f64_unmasked( %v } ; Test splitting. -declare @llvm.vp.fneg.nxv16f64(, , i32) define @vfneg_vv_nxv16f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll index 4b4091ba7acbe..373c29721ce92 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv1bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv2bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv4bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv8bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv16bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll index 3b5cbb685a424..7f6fb030b13be 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fma.nxv1f16(, , , , i32) -declare @llvm.vp.fneg.nxv1f16(, , i32) -declare @llvm.vp.merge.nxv1f16(, , , i32) -declare @llvm.vp.select.nxv1f16(, , , i32) - define @vfnmacc_vv_nxv1f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv1f16: ; CHECK: # %bb.0: @@ -131,11 +126,6 @@ define @vfnmacc_vf_nxv1f16_commute_ta( %a ret %u } -declare @llvm.vp.fma.nxv2f16(, , , , i32) -declare @llvm.vp.fneg.nxv2f16(, , i32) -declare @llvm.vp.merge.nxv2f16(, , , i32) -declare @llvm.vp.select.nxv2f16(, , , i32) - define @vfnmacc_vv_nxv2f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv2f16: ; CHECK: # %bb.0: @@ -258,11 +248,6 @@ define @vfnmacc_vf_nxv2f16_commute_ta( %a ret %u } -declare @llvm.vp.fma.nxv4f16(, , , , i32) -declare @llvm.vp.fneg.nxv4f16(, , i32) -declare @llvm.vp.merge.nxv4f16(, , , i32) -declare @llvm.vp.select.nxv4f16(, , , i32) - define @vfnmacc_vv_nxv4f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv4f16: ; CHECK: # %bb.0: @@ -385,11 +370,6 @@ define @vfnmacc_vf_nxv4f16_commute_ta( %a ret %u } -declare @llvm.vp.fma.nxv8f16(, , , , i32) -declare @llvm.vp.fneg.nxv8f16(, , i32) -declare @llvm.vp.merge.nxv8f16(, , , i32) -declare @llvm.vp.select.nxv8f16(, , , i32) - define @vfnmacc_vv_nxv8f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv8f16: ; CHECK: # %bb.0: @@ -512,11 +492,6 @@ define @vfnmacc_vf_nxv8f16_commute_ta( %a ret %u } -declare @llvm.vp.fma.nxv16f16(, , , , i32) -declare @llvm.vp.fneg.nxv16f16(, , i32) -declare @llvm.vp.merge.nxv16f16(, , , i32) -declare @llvm.vp.select.nxv16f16(, , , i32) - define @vfnmacc_vv_nxv16f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv16f16: ; CHECK: # %bb.0: @@ -639,11 +614,6 @@ define @vfnmacc_vf_nxv16f16_commute_ta( ret %u } -declare @llvm.vp.fma.nxv32f16(, , , , i32) -declare @llvm.vp.fneg.nxv32f16(, , i32) -declare @llvm.vp.merge.nxv32f16(, , , i32) -declare @llvm.vp.select.nxv32f16(, , , i32) - define @vfnmacc_vv_nxv32f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv32f16: ; CHECK: # %bb.0: @@ -769,11 +739,6 @@ define @vfnmacc_vf_nxv32f16_commute_ta( ret %u } -declare @llvm.vp.fma.nxv1f32(, , , , i32) -declare @llvm.vp.fneg.nxv1f32(, , i32) -declare @llvm.vp.merge.nxv1f32(, , , i32) -declare @llvm.vp.select.nxv1f32(, , , i32) - define @vfnmacc_vv_nxv1f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv1f32: ; CHECK: # %bb.0: @@ -896,11 +861,6 @@ define @vfnmacc_vf_nxv1f32_commute_ta( ret %u } -declare @llvm.vp.fma.nxv2f32(, , , , i32) -declare @llvm.vp.fneg.nxv2f32(, , i32) -declare @llvm.vp.merge.nxv2f32(, , , i32) -declare @llvm.vp.select.nxv2f32(, , , i32) - define @vfnmacc_vv_nxv2f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1023,11 +983,6 @@ define @vfnmacc_vf_nxv2f32_commute_ta( ret %u } -declare @llvm.vp.fma.nxv4f32(, , , , i32) -declare @llvm.vp.fneg.nxv4f32(, , i32) -declare @llvm.vp.merge.nxv4f32(, , , i32) -declare @llvm.vp.select.nxv4f32(, , , i32) - define @vfnmacc_vv_nxv4f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1150,11 +1105,6 @@ define @vfnmacc_vf_nxv4f32_commute_ta( ret %u } -declare @llvm.vp.fma.nxv8f32(, , , , i32) -declare @llvm.vp.fneg.nxv8f32(, , i32) -declare @llvm.vp.merge.nxv8f32(, , , i32) -declare @llvm.vp.select.nxv8f32(, , , i32) - define @vfnmacc_vv_nxv8f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1277,11 +1227,6 @@ define @vfnmacc_vf_nxv8f32_commute_ta( ret %u } -declare @llvm.vp.fma.nxv16f32(, , , , i32) -declare @llvm.vp.fneg.nxv16f32(, , i32) -declare @llvm.vp.merge.nxv16f32(, , , i32) -declare @llvm.vp.select.nxv16f32(, , , i32) - define @vfnmacc_vv_nxv16f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1407,11 +1352,6 @@ define @vfnmacc_vf_nxv16f32_commute_ta( %u } -declare @llvm.vp.fma.nxv1f64(, , , , i32) -declare @llvm.vp.fneg.nxv1f64(, , i32) -declare @llvm.vp.merge.nxv1f64(, , , i32) -declare @llvm.vp.select.nxv1f64(, , , i32) - define @vfnmacc_vv_nxv1f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1534,11 +1474,6 @@ define @vfnmacc_vf_nxv1f64_commute_ta( %u } -declare @llvm.vp.fma.nxv2f64(, , , , i32) -declare @llvm.vp.fneg.nxv2f64(, , i32) -declare @llvm.vp.merge.nxv2f64(, , , i32) -declare @llvm.vp.select.nxv2f64(, , , i32) - define @vfnmacc_vv_nxv2f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1661,11 +1596,6 @@ define @vfnmacc_vf_nxv2f64_commute_ta( %u } -declare @llvm.vp.fma.nxv4f64(, , , , i32) -declare @llvm.vp.fneg.nxv4f64(, , i32) -declare @llvm.vp.merge.nxv4f64(, , , i32) -declare @llvm.vp.select.nxv4f64(, , , i32) - define @vfnmacc_vv_nxv4f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1788,11 +1718,6 @@ define @vfnmacc_vf_nxv4f64_commute_ta( %u } -declare @llvm.vp.fma.nxv8f64(, , , , i32) -declare @llvm.vp.fneg.nxv8f64(, , i32) -declare @llvm.vp.merge.nxv8f64(, , , i32) -declare @llvm.vp.select.nxv8f64(, , , i32) - define @vfnmacc_vv_nxv8f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll index 31df27853cb3c..5c2ebce184d6b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfnmacc.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv1f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv1f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv2f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv2f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv4f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv4f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv8f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv8f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv16f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv16f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv1f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv1f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -904,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv2f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -928,13 +688,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv2f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -954,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv4f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -978,13 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv4f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1004,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv8f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1028,13 +762,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv8f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1054,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv1f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1078,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv1f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv2f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1128,13 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv2f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv4f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1178,13 +873,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv4f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll index 2bb6bf5ae9e26..66b347d4b661e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv1bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv2bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv4bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv8bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv16bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll index d774289e3eebb..16ff3b719a927 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll @@ -11,8 +11,6 @@ ; This tests a mix of vfnmacc and vfnmadd by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare @llvm.experimental.constrained.fma.nxv1f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv1f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -71,8 +69,6 @@ define @vfnmsub_vf_nxv1f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv2f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -131,8 +127,6 @@ define @vfnmsub_vf_nxv2f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv4f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -191,8 +185,6 @@ define @vfnmsub_vf_nxv4f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv8f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -251,8 +243,6 @@ define @vfnmsub_vf_nxv8f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv16f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv16f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -311,8 +301,6 @@ define @vfnmsub_vf_nxv16f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv32f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv32f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -500,8 +488,6 @@ define @vfnmsub_vf_nxv32f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv1f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv1f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -528,8 +514,6 @@ define @vfnmsub_vf_nxv1f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv2f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -556,8 +540,6 @@ define @vfnmsub_vf_nxv2f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv4f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -584,8 +566,6 @@ define @vfnmsub_vf_nxv4f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv8f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -612,8 +592,6 @@ define @vfnmsub_vf_nxv8f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv16f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv16f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -641,8 +619,6 @@ define @vfnmsub_vf_nxv16f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv1f64(, , , metadata, metadata) - define @vfnmsub_vv_nxv1f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -669,8 +645,6 @@ define @vfnmsub_vf_nxv1f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f64(, , , metadata, metadata) - define @vfnmsub_vv_nxv2f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -697,8 +671,6 @@ define @vfnmsub_vf_nxv2f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f64(, , , metadata, metadata) - define @vfnmsub_vv_nxv4f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -725,8 +697,6 @@ define @vfnmsub_vf_nxv4f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f64(, , , metadata, metadata) - define @vfnmsub_vv_nxv8f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll index 07c85bc67339b..b0f5599cad740 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll @@ -7,8 +7,6 @@ ; This tests a mix of vfnmacc and vfnmadd by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare @llvm.fma.v1f16(, , ) - define @vfnmsub_vv_nxv1f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f16: ; CHECK: # %bb.0: @@ -35,8 +33,6 @@ define @vfnmsub_vf_nxv1f16( %va, %vd } -declare @llvm.fma.v2f16(, , ) - define @vfnmsub_vv_nxv2f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f16: ; CHECK: # %bb.0: @@ -63,8 +59,6 @@ define @vfnmsub_vf_nxv2f16( %va, %vd } -declare @llvm.fma.v4f16(, , ) - define @vfnmsub_vv_nxv4f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f16: ; CHECK: # %bb.0: @@ -91,8 +85,6 @@ define @vfnmsub_vf_nxv4f16( %va, %vd } -declare @llvm.fma.v8f16(, , ) - define @vfnmsub_vv_nxv8f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f16: ; CHECK: # %bb.0: @@ -119,8 +111,6 @@ define @vfnmsub_vf_nxv8f16( %va, %vd } -declare @llvm.fma.v16f16(, , ) - define @vfnmsub_vv_nxv16f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv16f16: ; CHECK: # %bb.0: @@ -147,8 +137,6 @@ define @vfnmsub_vf_nxv16f16( %va, %vd } -declare @llvm.fma.v32f16(, , ) - define @vfnmsub_vv_nxv32f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv32f16: ; CHECK: # %bb.0: @@ -176,8 +164,6 @@ define @vfnmsub_vf_nxv32f16( %va, %vd } -declare @llvm.fma.v1f32(, , ) - define @vfnmsub_vv_nxv1f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -204,8 +190,6 @@ define @vfnmsub_vf_nxv1f32( %va, %vd } -declare @llvm.fma.v2f32(, , ) - define @vfnmsub_vv_nxv2f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -232,8 +216,6 @@ define @vfnmsub_vf_nxv2f32( %va, %vd } -declare @llvm.fma.v4f32(, , ) - define @vfnmsub_vv_nxv4f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -260,8 +242,6 @@ define @vfnmsub_vf_nxv4f32( %va, %vd } -declare @llvm.fma.v8f32(, , ) - define @vfnmsub_vv_nxv8f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -288,8 +268,6 @@ define @vfnmsub_vf_nxv8f32( %va, %vd } -declare @llvm.fma.v16f32(, , ) - define @vfnmsub_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -317,8 +295,6 @@ define @vfnmsub_vf_nxv16f32( %va, %vd } -declare @llvm.fma.v1f64(, , ) - define @vfnmsub_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -345,8 +321,6 @@ define @vfnmsub_vf_nxv1f64( %va, %vd } -declare @llvm.fma.v2f64(, , ) - define @vfnmsub_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -373,8 +347,6 @@ define @vfnmsub_vf_nxv2f64( %va, %vd } -declare @llvm.fma.v4f64(, , ) - define @vfnmsub_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -401,8 +373,6 @@ define @vfnmsub_vf_nxv4f64( %va, %vd } -declare @llvm.fma.v8f64(, , ) - define @vfnmsub_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll index 6f41ed177beac..55f6aa2e2eb38 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfnmadd.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv1f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv1f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv2f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv2f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv4f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv4f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv8f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv8f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv16f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv16f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv1f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv1f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -904,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv2f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -928,13 +688,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv2f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -954,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv4f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -978,13 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv4f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1004,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv8f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1028,13 +762,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv8f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1054,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv1f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1078,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv1f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv2f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1128,13 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv2f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv4f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1178,13 +873,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv4f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll index cfbaafa00c043..221df7095e8c0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv1bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv2bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv4bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv8bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv16bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll index edeb554bc6d35..37b223be1150c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fma.nxv1f16(, , , , i32) -declare @llvm.vp.fneg.nxv1f16(, , i32) -declare @llvm.vp.merge.nxv1f16(, , , i32) -declare @llvm.vp.select.nxv1f16(, , , i32) - define @vfnmsac_vv_nxv1f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv1f16: ; CHECK: # %bb.0: @@ -123,11 +118,6 @@ define @vfnmsac_vf_nxv1f16_commute_ta( %a ret %u } -declare @llvm.vp.fma.nxv2f16(, , , , i32) -declare @llvm.vp.fneg.nxv2f16(, , i32) -declare @llvm.vp.merge.nxv2f16(, , , i32) -declare @llvm.vp.select.nxv2f16(, , , i32) - define @vfnmsac_vv_nxv2f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv2f16: ; CHECK: # %bb.0: @@ -242,11 +232,6 @@ define @vfnmsac_vf_nxv2f16_commute_ta( %a ret %u } -declare @llvm.vp.fma.nxv4f16(, , , , i32) -declare @llvm.vp.fneg.nxv4f16(, , i32) -declare @llvm.vp.merge.nxv4f16(, , , i32) -declare @llvm.vp.select.nxv4f16(, , , i32) - define @vfnmsac_vv_nxv4f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv4f16: ; CHECK: # %bb.0: @@ -361,11 +346,6 @@ define @vfnmsac_vf_nxv4f16_commute_ta( %a ret %u } -declare @llvm.vp.fma.nxv8f16(, , , , i32) -declare @llvm.vp.fneg.nxv8f16(, , i32) -declare @llvm.vp.merge.nxv8f16(, , , i32) -declare @llvm.vp.select.nxv8f16(, , , i32) - define @vfnmsac_vv_nxv8f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv8f16: ; CHECK: # %bb.0: @@ -480,11 +460,6 @@ define @vfnmsac_vf_nxv8f16_commute_ta( %a ret %u } -declare @llvm.vp.fma.nxv16f16(, , , , i32) -declare @llvm.vp.fneg.nxv16f16(, , i32) -declare @llvm.vp.merge.nxv16f16(, , , i32) -declare @llvm.vp.select.nxv16f16(, , , i32) - define @vfnmsac_vv_nxv16f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv16f16: ; CHECK: # %bb.0: @@ -599,11 +574,6 @@ define @vfnmsac_vf_nxv16f16_commute_ta( ret %u } -declare @llvm.vp.fma.nxv32f16(, , , , i32) -declare @llvm.vp.fneg.nxv32f16(, , i32) -declare @llvm.vp.merge.nxv32f16(, , , i32) -declare @llvm.vp.select.nxv32f16(, , , i32) - define @vfnmsac_vv_nxv32f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv32f16: ; CHECK: # %bb.0: @@ -721,11 +691,6 @@ define @vfnmsac_vf_nxv32f16_commute_ta( ret %u } -declare @llvm.vp.fma.nxv1f32(, , , , i32) -declare @llvm.vp.fneg.nxv1f32(, , i32) -declare @llvm.vp.merge.nxv1f32(, , , i32) -declare @llvm.vp.select.nxv1f32(, , , i32) - define @vfnmsac_vv_nxv1f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv1f32: ; CHECK: # %bb.0: @@ -840,11 +805,6 @@ define @vfnmsac_vf_nxv1f32_commute_ta( ret %u } -declare @llvm.vp.fma.nxv2f32(, , , , i32) -declare @llvm.vp.fneg.nxv2f32(, , i32) -declare @llvm.vp.merge.nxv2f32(, , , i32) -declare @llvm.vp.select.nxv2f32(, , , i32) - define @vfnmsac_vv_nxv2f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv2f32: ; CHECK: # %bb.0: @@ -959,11 +919,6 @@ define @vfnmsac_vf_nxv2f32_commute_ta( ret %u } -declare @llvm.vp.fma.nxv4f32(, , , , i32) -declare @llvm.vp.fneg.nxv4f32(, , i32) -declare @llvm.vp.merge.nxv4f32(, , , i32) -declare @llvm.vp.select.nxv4f32(, , , i32) - define @vfnmsac_vv_nxv4f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1078,11 +1033,6 @@ define @vfnmsac_vf_nxv4f32_commute_ta( ret %u } -declare @llvm.vp.fma.nxv8f32(, , , , i32) -declare @llvm.vp.fneg.nxv8f32(, , i32) -declare @llvm.vp.merge.nxv8f32(, , , i32) -declare @llvm.vp.select.nxv8f32(, , , i32) - define @vfnmsac_vv_nxv8f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1197,11 +1147,6 @@ define @vfnmsac_vf_nxv8f32_commute_ta( ret %u } -declare @llvm.vp.fma.nxv16f32(, , , , i32) -declare @llvm.vp.fneg.nxv16f32(, , i32) -declare @llvm.vp.merge.nxv16f32(, , , i32) -declare @llvm.vp.select.nxv16f32(, , , i32) - define @vfnmsac_vv_nxv16f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1319,11 +1264,6 @@ define @vfnmsac_vf_nxv16f32_commute_ta( %u } -declare @llvm.vp.fma.nxv1f64(, , , , i32) -declare @llvm.vp.fneg.nxv1f64(, , i32) -declare @llvm.vp.merge.nxv1f64(, , , i32) -declare @llvm.vp.select.nxv1f64(, , , i32) - define @vfnmsac_vv_nxv1f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1438,11 +1378,6 @@ define @vfnmsac_vf_nxv1f64_commute_ta( %u } -declare @llvm.vp.fma.nxv2f64(, , , , i32) -declare @llvm.vp.fneg.nxv2f64(, , i32) -declare @llvm.vp.merge.nxv2f64(, , , i32) -declare @llvm.vp.select.nxv2f64(, , , i32) - define @vfnmsac_vv_nxv2f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1557,11 +1492,6 @@ define @vfnmsac_vf_nxv2f64_commute_ta( %u } -declare @llvm.vp.fma.nxv4f64(, , , , i32) -declare @llvm.vp.fneg.nxv4f64(, , i32) -declare @llvm.vp.merge.nxv4f64(, , , i32) -declare @llvm.vp.select.nxv4f64(, , , i32) - define @vfnmsac_vv_nxv4f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1676,11 +1606,6 @@ define @vfnmsac_vf_nxv4f64_commute_ta( %u } -declare @llvm.vp.fma.nxv8f64(, , , , i32) -declare @llvm.vp.fneg.nxv8f64(, , i32) -declare @llvm.vp.merge.nxv8f64(, , , i32) -declare @llvm.vp.select.nxv8f64(, , , i32) - define @vfnmsac_vv_nxv8f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll index 50497d92764a5..f874e05465a09 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfnmsac.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv1f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv1f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv2f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv2f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv4f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv4f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv8f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv8f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv16f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv16f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv1f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv1f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -904,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv2f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -928,13 +688,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv2f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -954,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv4f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -978,13 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv4f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1004,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv8f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1028,13 +762,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv8f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1054,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv1f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1078,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv1f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv2f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1128,13 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv2f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv4f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1178,13 +873,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv4f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll index 5ebbb90c4c5a2..d400de99b49f3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv1bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv2bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv4bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv8bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv16bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll index 96c28e4c6e0e2..68af72da4126f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll @@ -11,8 +11,6 @@ ; This tests a mix of vfnmsac and vfnmsub by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare @llvm.experimental.constrained.fma.nxv1f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv1f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -67,8 +65,6 @@ define @vfnmsub_vf_nxv1f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv2f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -123,8 +119,6 @@ define @vfnmsub_vf_nxv2f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv4f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -179,8 +173,6 @@ define @vfnmsub_vf_nxv4f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv8f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -235,8 +227,6 @@ define @vfnmsub_vf_nxv8f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv16f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv16f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -291,8 +281,6 @@ define @vfnmsub_vf_nxv16f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv32f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv32f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -446,8 +434,6 @@ define @vfnmsub_vf_nxv32f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv1f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv1f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -472,8 +458,6 @@ define @vfnmsub_vf_nxv1f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv2f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -498,8 +482,6 @@ define @vfnmsub_vf_nxv2f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv4f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -524,8 +506,6 @@ define @vfnmsub_vf_nxv4f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv8f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -550,8 +530,6 @@ define @vfnmsub_vf_nxv8f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv16f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv16f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -577,8 +555,6 @@ define @vfnmsub_vf_nxv16f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv1f64(, , , metadata, metadata) - define @vfnmsub_vv_nxv1f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -603,8 +579,6 @@ define @vfnmsub_vf_nxv1f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f64(, , , metadata, metadata) - define @vfnmsub_vv_nxv2f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -629,8 +603,6 @@ define @vfnmsub_vf_nxv2f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f64(, , , metadata, metadata) - define @vfnmsub_vv_nxv4f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -655,8 +627,6 @@ define @vfnmsub_vf_nxv4f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f64(, , , metadata, metadata) - define @vfnmsub_vv_nxv8f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll index a356da80e1639..c78dfb26d53d0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll @@ -7,8 +7,6 @@ ; This tests a mix of vfnmsac and vfnmsub by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare @llvm.fma.v1f16(, , ) - define @vfnmsub_vv_nxv1f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f16: ; CHECK: # %bb.0: @@ -33,8 +31,6 @@ define @vfnmsub_vf_nxv1f16( %va, %vd } -declare @llvm.fma.v2f16(, , ) - define @vfnmsub_vv_nxv2f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f16: ; CHECK: # %bb.0: @@ -59,8 +55,6 @@ define @vfnmsub_vf_nxv2f16( %va, %vd } -declare @llvm.fma.v4f16(, , ) - define @vfnmsub_vv_nxv4f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f16: ; CHECK: # %bb.0: @@ -85,8 +79,6 @@ define @vfnmsub_vf_nxv4f16( %va, %vd } -declare @llvm.fma.v8f16(, , ) - define @vfnmsub_vv_nxv8f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f16: ; CHECK: # %bb.0: @@ -111,8 +103,6 @@ define @vfnmsub_vf_nxv8f16( %va, %vd } -declare @llvm.fma.v16f16(, , ) - define @vfnmsub_vv_nxv16f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv16f16: ; CHECK: # %bb.0: @@ -137,8 +127,6 @@ define @vfnmsub_vf_nxv16f16( %va, %vd } -declare @llvm.fma.v32f16(, , ) - define @vfnmsub_vv_nxv32f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv32f16: ; CHECK: # %bb.0: @@ -164,8 +152,6 @@ define @vfnmsub_vf_nxv32f16( %va, %vd } -declare @llvm.fma.v1f32(, , ) - define @vfnmsub_vv_nxv1f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -190,8 +176,6 @@ define @vfnmsub_vf_nxv1f32( %va, %vd } -declare @llvm.fma.v2f32(, , ) - define @vfnmsub_vv_nxv2f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -216,8 +200,6 @@ define @vfnmsub_vf_nxv2f32( %va, %vd } -declare @llvm.fma.v4f32(, , ) - define @vfnmsub_vv_nxv4f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -242,8 +224,6 @@ define @vfnmsub_vf_nxv4f32( %va, %vd } -declare @llvm.fma.v8f32(, , ) - define @vfnmsub_vv_nxv8f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -268,8 +248,6 @@ define @vfnmsub_vf_nxv8f32( %va, %vd } -declare @llvm.fma.v16f32(, , ) - define @vfnmsub_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -295,8 +273,6 @@ define @vfnmsub_vf_nxv16f32( %va, %vd } -declare @llvm.fma.v1f64(, , ) - define @vfnmsub_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -321,8 +297,6 @@ define @vfnmsub_vf_nxv1f64( %va, %vd } -declare @llvm.fma.v2f64(, , ) - define @vfnmsub_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -347,8 +321,6 @@ define @vfnmsub_vf_nxv2f64( %va, %vd } -declare @llvm.fma.v4f64(, , ) - define @vfnmsub_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -373,8 +345,6 @@ define @vfnmsub_vf_nxv4f64( %va, %vd } -declare @llvm.fma.v8f64(, , ) - define @vfnmsub_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll index c5d5bb1fe0b3e..263b0161c04fe 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfnmsub.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv1f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv1f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv2f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv2f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv4f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv4f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv8f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv8f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv16f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv16f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv1f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv1f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -904,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv2f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -928,13 +688,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv2f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -954,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv4f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -978,13 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv4f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1004,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv8f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1028,13 +762,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv8f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1054,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv1f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1078,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv1f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv2f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1128,13 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv2f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv4f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1178,13 +873,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv4f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-constrained-sdnode.ll index 9c77a6818bcb2..5ee8876ce73ee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfpext-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+zvfbfmin -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.experimental.constrained.fpext.nxv1f32.nxv1f16(, metadata) define @vfpext_nxv1f16_nxv1f32( %va) strictfp { ; CHECK-LABEL: vfpext_nxv1f16_nxv1f32: ; CHECK: # %bb.0: @@ -16,7 +15,6 @@ define @vfpext_nxv1f16_nxv1f32( %va) str ret %evec } -declare @llvm.experimental.constrained.fpext.nxv1f64.nxv1f16(, metadata) define @vfpext_nxv1f16_nxv1f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv1f16_nxv1f64: ; CHECK: # %bb.0: @@ -29,7 +27,6 @@ define @vfpext_nxv1f16_nxv1f64( %va) st ret %evec } -declare @llvm.experimental.constrained.fpext.nxv2f32.nxv2f16(, metadata) define @vfpext_nxv2f16_nxv2f32( %va) strictfp { ; CHECK-LABEL: vfpext_nxv2f16_nxv2f32: ; CHECK: # %bb.0: @@ -41,7 +38,6 @@ define @vfpext_nxv2f16_nxv2f32( %va) str ret %evec } -declare @llvm.experimental.constrained.fpext.nxv2f64.nxv2f16(, metadata) define @vfpext_nxv2f16_nxv2f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv2f16_nxv2f64: ; CHECK: # %bb.0: @@ -54,7 +50,6 @@ define @vfpext_nxv2f16_nxv2f64( %va) st ret %evec } -declare @llvm.experimental.constrained.fpext.nxv4f32.nxv4f16(, metadata) define @vfpext_nxv4f16_nxv4f32( %va) strictfp { ; CHECK-LABEL: vfpext_nxv4f16_nxv4f32: ; CHECK: # %bb.0: @@ -66,7 +61,6 @@ define @vfpext_nxv4f16_nxv4f32( %va) str ret %evec } -declare @llvm.experimental.constrained.fpext.nxv4f64.nxv4f16(, metadata) define @vfpext_nxv4f16_nxv4f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv4f16_nxv4f64: ; CHECK: # %bb.0: @@ -79,7 +73,6 @@ define @vfpext_nxv4f16_nxv4f64( %va) st ret %evec } -declare @llvm.experimental.constrained.fpext.nxv8f32.nxv8f16(, metadata) define @vfpext_nxv8f16_nxv8f32( %va) strictfp { ; CHECK-LABEL: vfpext_nxv8f16_nxv8f32: ; CHECK: # %bb.0: @@ -91,7 +84,6 @@ define @vfpext_nxv8f16_nxv8f32( %va) str ret %evec } -declare @llvm.experimental.constrained.fpext.nxv8f64.nxv8f16(, metadata) define @vfpext_nxv8f16_nxv8f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv8f16_nxv8f64: ; CHECK: # %bb.0: @@ -104,7 +96,6 @@ define @vfpext_nxv8f16_nxv8f64( %va) st ret %evec } -declare @llvm.experimental.constrained.fpext.nxv1f64.nxv1f32(, metadata) define @vfpext_nxv1f32_nxv1f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv1f32_nxv1f64: ; CHECK: # %bb.0: @@ -116,7 +107,6 @@ define @vfpext_nxv1f32_nxv1f64( %va) s ret %evec } -declare @llvm.experimental.constrained.fpext.nxv2f64.nxv2f32(, metadata) define @vfpext_nxv2f32_nxv2f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv2f32_nxv2f64: ; CHECK: # %bb.0: @@ -128,7 +118,6 @@ define @vfpext_nxv2f32_nxv2f64( %va) s ret %evec } -declare @llvm.experimental.constrained.fpext.nxv4f64.nxv4f32(, metadata) define @vfpext_nxv4f32_nxv4f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv4f32_nxv4f64: ; CHECK: # %bb.0: @@ -140,7 +129,6 @@ define @vfpext_nxv4f32_nxv4f64( %va) s ret %evec } -declare @llvm.experimental.constrained.fpext.nxv8f64.nxv8f32(, metadata) define @vfpext_nxv8f32_nxv8f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv8f32_nxv8f64: ; CHECK: # %bb.0: @@ -152,7 +140,6 @@ define @vfpext_nxv8f32_nxv8f64( %va) s ret %evec } -declare @llvm.experimental.constrained.fpext.nxv1f32.nxv1bf16(, metadata) define @vfpext_nxv1bf16_nxv1f32( %va) strictfp { ; CHECK-LABEL: vfpext_nxv1bf16_nxv1f32: ; CHECK: # %bb.0: @@ -164,7 +151,6 @@ define @vfpext_nxv1bf16_nxv1f32( %va) ret %evec } -declare @llvm.experimental.constrained.fpext.nxv1f64.nxv1bf16(, metadata) define @vfpext_nxv1bf16_nxv1f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv1bf16_nxv1f64: ; CHECK: # %bb.0: @@ -177,7 +163,6 @@ define @vfpext_nxv1bf16_nxv1f64( %va) ret %evec } -declare @llvm.experimental.constrained.fpext.nxv2f32.nxv2bf16(, metadata) define @vfpext_nxv2bf16_nxv2f32( %va) strictfp { ; CHECK-LABEL: vfpext_nxv2bf16_nxv2f32: ; CHECK: # %bb.0: @@ -189,7 +174,6 @@ define @vfpext_nxv2bf16_nxv2f32( %va) ret %evec } -declare @llvm.experimental.constrained.fpext.nxv2f64.nxv2bf16(, metadata) define @vfpext_nxv2bf16_nxv2f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv2bf16_nxv2f64: ; CHECK: # %bb.0: @@ -202,7 +186,6 @@ define @vfpext_nxv2bf16_nxv2f64( %va) ret %evec } -declare @llvm.experimental.constrained.fpext.nxv4f32.nxv4bf16(, metadata) define @vfpext_nxv4bf16_nxv4f32( %va) strictfp { ; CHECK-LABEL: vfpext_nxv4bf16_nxv4f32: ; CHECK: # %bb.0: @@ -214,7 +197,6 @@ define @vfpext_nxv4bf16_nxv4f32( %va) ret %evec } -declare @llvm.experimental.constrained.fpext.nxv4f64.nxv4bf16(, metadata) define @vfpext_nxv4bf16_nxv4f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv4bf16_nxv4f64: ; CHECK: # %bb.0: @@ -227,7 +209,6 @@ define @vfpext_nxv4bf16_nxv4f64( %va) ret %evec } -declare @llvm.experimental.constrained.fpext.nxv8f32.nxv8bf16(, metadata) define @vfpext_nxv8bf16_nxv8f32( %va) strictfp { ; CHECK-LABEL: vfpext_nxv8bf16_nxv8f32: ; CHECK: # %bb.0: @@ -239,7 +220,6 @@ define @vfpext_nxv8bf16_nxv8f32( %va) ret %evec } -declare @llvm.experimental.constrained.fpext.nxv8f64.nxv8bf16(, metadata) define @vfpext_nxv8bf16_nxv8f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv8bf16_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll index 137b616d86fcc..458795db7965d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fpext.nxv2f32.nxv2f16(, , i32) - define @vfpext_nxv2f16_nxv2f32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv2f16_nxv2f32: ; CHECK: # %bb.0: @@ -28,8 +26,6 @@ define @vfpext_nxv2f16_nxv2f32_unmasked( ret %v } -declare @llvm.vp.fpext.nxv2f64.nxv2f16(, , i32) - define @vfpext_nxv2f16_nxv2f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv2f16_nxv2f64: ; CHECK: # %bb.0: @@ -54,8 +50,6 @@ define @vfpext_nxv2f16_nxv2f64_unmasked( %v } -declare @llvm.vp.fpext.nxv2f64.nxv2f32(, , i32) - define @vfpext_nxv2f32_nxv2f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv2f32_nxv2f64: ; CHECK: # %bb.0: @@ -78,8 +72,6 @@ define @vfpext_nxv2f32_nxv2f64_unmasked( %v } -declare @llvm.vp.fpext.nxv7f64.nxv7f32(, , i32) - define @vfpext_nxv7f32_nxv7f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv7f32_nxv7f64: ; CHECK: # %bb.0: @@ -91,8 +83,6 @@ define @vfpext_nxv7f32_nxv7f64( %a, %v } -declare @llvm.vp.fpext.nxv32f32.nxv32f16(, , i32) - define @vfpext_nxv32f16_nxv32f32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv32f16_nxv32f32: ; CHECK: # %bb.0: @@ -121,8 +111,6 @@ define @vfpext_nxv32f16_nxv32f32( %a, ret %v } -declare @llvm.vp.fpext.nxv2f32.nxv2bf16(, , i32) - define @vfpext_nxv2bf16_nxv2f32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv2bf16_nxv2f32: ; CHECK: # %bb.0: @@ -145,8 +133,6 @@ define @vfpext_nxv2bf16_nxv2f32_unmasked( %v } -declare @llvm.vp.fpext.nxv2f64.nxv2bf16(, , i32) - define @vfpext_nxv2bf16_nxv2f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv2bf16_nxv2f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll index 9e7f4ede29f54..efcdc1e24b0b3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f16(, metadata) define @vfptosi_nxv1f16_nxv1i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i1: ; CHECK: # %bb.0: @@ -17,7 +16,6 @@ define @vfptosi_nxv1f16_nxv1i1( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f16(, metadata) define @vfptoui_nxv1f16_nxv1i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i1: ; CHECK: # %bb.0: @@ -30,7 +28,6 @@ define @vfptoui_nxv1f16_nxv1i1( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i7.nxv1f16(, metadata) define @vfptosi_nxv1f16_nxv1i7( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i7: ; CHECK: # %bb.0: @@ -42,7 +39,6 @@ define @vfptosi_nxv1f16_nxv1i7( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i7.nxv1f16(, metadata) define @vfptoui_nxv1f16_nxv1i7( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i7: ; CHECK: # %bb.0: @@ -54,7 +50,6 @@ define @vfptoui_nxv1f16_nxv1i7( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f16(, metadata) define @vfptosi_nxv1f16_nxv1i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i8: ; CHECK: # %bb.0: @@ -66,7 +61,6 @@ define @vfptosi_nxv1f16_nxv1i8( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f16(, metadata) define @vfptoui_nxv1f16_nxv1i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i8: ; CHECK: # %bb.0: @@ -78,7 +72,6 @@ define @vfptoui_nxv1f16_nxv1i8( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f16(, metadata) define @vfptosi_nxv1f16_nxv1i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i16: ; CHECK: # %bb.0: @@ -89,7 +82,6 @@ define @vfptosi_nxv1f16_nxv1i16( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f16(, metadata) define @vfptoui_nxv1f16_nxv1i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i16: ; CHECK: # %bb.0: @@ -100,7 +92,6 @@ define @vfptoui_nxv1f16_nxv1i16( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f16(, metadata) define @vfptosi_nxv1f16_nxv1i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i32: ; CHECK: # %bb.0: @@ -112,7 +103,6 @@ define @vfptosi_nxv1f16_nxv1i32( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f16(, metadata) define @vfptoui_nxv1f16_nxv1i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i32: ; CHECK: # %bb.0: @@ -124,7 +114,6 @@ define @vfptoui_nxv1f16_nxv1i32( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f16(, metadata) define @vfptosi_nxv1f16_nxv1i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i64: ; CHECK: # %bb.0: @@ -137,7 +126,6 @@ define @vfptosi_nxv1f16_nxv1i64( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f16(, metadata) define @vfptoui_nxv1f16_nxv1i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i64: ; CHECK: # %bb.0: @@ -150,7 +138,6 @@ define @vfptoui_nxv1f16_nxv1i64( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f16(, metadata) define @vfptosi_nxv2f16_nxv2i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i1: ; CHECK: # %bb.0: @@ -163,7 +150,6 @@ define @vfptosi_nxv2f16_nxv2i1( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f16(, metadata) define @vfptoui_nxv2f16_nxv2i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i1: ; CHECK: # %bb.0: @@ -176,7 +162,6 @@ define @vfptoui_nxv2f16_nxv2i1( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f16(, metadata) define @vfptosi_nxv2f16_nxv2i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i8: ; CHECK: # %bb.0: @@ -188,7 +173,6 @@ define @vfptosi_nxv2f16_nxv2i8( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f16(, metadata) define @vfptoui_nxv2f16_nxv2i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i8: ; CHECK: # %bb.0: @@ -200,7 +184,6 @@ define @vfptoui_nxv2f16_nxv2i8( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f16(, metadata) define @vfptosi_nxv2f16_nxv2i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i16: ; CHECK: # %bb.0: @@ -211,7 +194,6 @@ define @vfptosi_nxv2f16_nxv2i16( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f16(, metadata) define @vfptoui_nxv2f16_nxv2i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i16: ; CHECK: # %bb.0: @@ -222,7 +204,6 @@ define @vfptoui_nxv2f16_nxv2i16( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f16(, metadata) define @vfptosi_nxv2f16_nxv2i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i32: ; CHECK: # %bb.0: @@ -234,7 +215,6 @@ define @vfptosi_nxv2f16_nxv2i32( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f16(, metadata) define @vfptoui_nxv2f16_nxv2i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i32: ; CHECK: # %bb.0: @@ -246,7 +226,6 @@ define @vfptoui_nxv2f16_nxv2i32( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f16(, metadata) define @vfptosi_nxv2f16_nxv2i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i64: ; CHECK: # %bb.0: @@ -259,7 +238,6 @@ define @vfptosi_nxv2f16_nxv2i64( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f16(, metadata) define @vfptoui_nxv2f16_nxv2i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i64: ; CHECK: # %bb.0: @@ -272,7 +250,6 @@ define @vfptoui_nxv2f16_nxv2i64( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f16(, metadata) define @vfptosi_nxv4f16_nxv4i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i1: ; CHECK: # %bb.0: @@ -285,7 +262,6 @@ define @vfptosi_nxv4f16_nxv4i1( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f16(, metadata) define @vfptoui_nxv4f16_nxv4i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i1: ; CHECK: # %bb.0: @@ -298,7 +274,6 @@ define @vfptoui_nxv4f16_nxv4i1( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f16(, metadata) define @vfptosi_nxv4f16_nxv4i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i8: ; CHECK: # %bb.0: @@ -310,7 +285,6 @@ define @vfptosi_nxv4f16_nxv4i8( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f16(, metadata) define @vfptoui_nxv4f16_nxv4i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i8: ; CHECK: # %bb.0: @@ -322,7 +296,6 @@ define @vfptoui_nxv4f16_nxv4i8( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f16(, metadata) define @vfptosi_nxv4f16_nxv4i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i16: ; CHECK: # %bb.0: @@ -333,7 +306,6 @@ define @vfptosi_nxv4f16_nxv4i16( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f16(, metadata) define @vfptoui_nxv4f16_nxv4i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i16: ; CHECK: # %bb.0: @@ -344,7 +316,6 @@ define @vfptoui_nxv4f16_nxv4i16( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f16(, metadata) define @vfptosi_nxv4f16_nxv4i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i32: ; CHECK: # %bb.0: @@ -356,7 +327,6 @@ define @vfptosi_nxv4f16_nxv4i32( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f16(, metadata) define @vfptoui_nxv4f16_nxv4i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i32: ; CHECK: # %bb.0: @@ -368,7 +338,6 @@ define @vfptoui_nxv4f16_nxv4i32( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f16(, metadata) define @vfptosi_nxv4f16_nxv4i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i64: ; CHECK: # %bb.0: @@ -381,7 +350,6 @@ define @vfptosi_nxv4f16_nxv4i64( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f16(, metadata) define @vfptoui_nxv4f16_nxv4i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i64: ; CHECK: # %bb.0: @@ -394,7 +362,6 @@ define @vfptoui_nxv4f16_nxv4i64( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f16(, metadata) define @vfptosi_nxv8f16_nxv8i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i1: ; CHECK: # %bb.0: @@ -407,7 +374,6 @@ define @vfptosi_nxv8f16_nxv8i1( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f16(, metadata) define @vfptoui_nxv8f16_nxv8i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i1: ; CHECK: # %bb.0: @@ -420,7 +386,6 @@ define @vfptoui_nxv8f16_nxv8i1( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f16(, metadata) define @vfptosi_nxv8f16_nxv8i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i8: ; CHECK: # %bb.0: @@ -432,7 +397,6 @@ define @vfptosi_nxv8f16_nxv8i8( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f16(, metadata) define @vfptoui_nxv8f16_nxv8i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i8: ; CHECK: # %bb.0: @@ -444,7 +408,6 @@ define @vfptoui_nxv8f16_nxv8i8( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f16(, metadata) define @vfptosi_nxv8f16_nxv8i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i16: ; CHECK: # %bb.0: @@ -455,7 +418,6 @@ define @vfptosi_nxv8f16_nxv8i16( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f16(, metadata) define @vfptoui_nxv8f16_nxv8i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i16: ; CHECK: # %bb.0: @@ -466,7 +428,6 @@ define @vfptoui_nxv8f16_nxv8i16( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f16(, metadata) define @vfptosi_nxv8f16_nxv8i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i32: ; CHECK: # %bb.0: @@ -478,7 +439,6 @@ define @vfptosi_nxv8f16_nxv8i32( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f16(, metadata) define @vfptoui_nxv8f16_nxv8i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i32: ; CHECK: # %bb.0: @@ -490,7 +450,6 @@ define @vfptoui_nxv8f16_nxv8i32( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f16(, metadata) define @vfptosi_nxv8f16_nxv8i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i64: ; CHECK: # %bb.0: @@ -503,7 +462,6 @@ define @vfptosi_nxv8f16_nxv8i64( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f16(, metadata) define @vfptoui_nxv8f16_nxv8i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i64: ; CHECK: # %bb.0: @@ -516,7 +474,6 @@ define @vfptoui_nxv8f16_nxv8i64( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv16i1.nxv16f16(, metadata) define @vfptosi_nxv16f16_nxv16i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv16f16_nxv16i1: ; CHECK: # %bb.0: @@ -529,7 +486,6 @@ define @vfptosi_nxv16f16_nxv16i1( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv16i1.nxv16f16(, metadata) define @vfptoui_nxv16f16_nxv16i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv16f16_nxv16i1: ; CHECK: # %bb.0: @@ -542,7 +498,6 @@ define @vfptoui_nxv16f16_nxv16i1( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv16i8.nxv16f16(, metadata) define @vfptosi_nxv16f16_nxv16i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv16f16_nxv16i8: ; CHECK: # %bb.0: @@ -554,7 +509,6 @@ define @vfptosi_nxv16f16_nxv16i8( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv16i8.nxv16f16(, metadata) define @vfptoui_nxv16f16_nxv16i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv16f16_nxv16i8: ; CHECK: # %bb.0: @@ -566,7 +520,6 @@ define @vfptoui_nxv16f16_nxv16i8( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv16i16.nxv16f16(, metadata) define @vfptosi_nxv16f16_nxv16i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv16f16_nxv16i16: ; CHECK: # %bb.0: @@ -577,7 +530,6 @@ define @vfptosi_nxv16f16_nxv16i16( %va) ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv16i16.nxv16f16(, metadata) define @vfptoui_nxv16f16_nxv16i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv16f16_nxv16i16: ; CHECK: # %bb.0: @@ -588,7 +540,6 @@ define @vfptoui_nxv16f16_nxv16i16( %va) ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv16i32.nxv16f16(, metadata) define @vfptosi_nxv16f16_nxv16i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv16f16_nxv16i32: ; CHECK: # %bb.0: @@ -600,7 +551,6 @@ define @vfptosi_nxv16f16_nxv16i32( %va) ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv16i32.nxv16f16(, metadata) define @vfptoui_nxv16f16_nxv16i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv16f16_nxv16i32: ; CHECK: # %bb.0: @@ -612,7 +562,6 @@ define @vfptoui_nxv16f16_nxv16i32( %va) ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv32i1.nxv32f16(, metadata) define @vfptosi_nxv32f16_nxv32i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv32f16_nxv32i1: ; CHECK: # %bb.0: @@ -625,7 +574,6 @@ define @vfptosi_nxv32f16_nxv32i1( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv32i1.nxv32f16(, metadata) define @vfptoui_nxv32f16_nxv32i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv32f16_nxv32i1: ; CHECK: # %bb.0: @@ -638,7 +586,6 @@ define @vfptoui_nxv32f16_nxv32i1( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv32i8.nxv32f16(, metadata) define @vfptosi_nxv32f16_nxv32i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv32f16_nxv32i8: ; CHECK: # %bb.0: @@ -650,7 +597,6 @@ define @vfptosi_nxv32f16_nxv32i8( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv32i8.nxv32f16(, metadata) define @vfptoui_nxv32f16_nxv32i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv32f16_nxv32i8: ; CHECK: # %bb.0: @@ -662,7 +608,6 @@ define @vfptoui_nxv32f16_nxv32i8( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv32i16.nxv32f16(, metadata) define @vfptosi_nxv32f16_nxv32i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv32f16_nxv32i16: ; CHECK: # %bb.0: @@ -673,7 +618,6 @@ define @vfptosi_nxv32f16_nxv32i16( %va) ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv32i16.nxv32f16(, metadata) define @vfptoui_nxv32f16_nxv32i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv32f16_nxv32i16: ; CHECK: # %bb.0: @@ -684,7 +628,6 @@ define @vfptoui_nxv32f16_nxv32i16( %va) ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f32(, metadata) define @vfptosi_nxv1f32_nxv1i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i1: ; CHECK: # %bb.0: @@ -697,7 +640,6 @@ define @vfptosi_nxv1f32_nxv1i1( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f32(, metadata) define @vfptoui_nxv1f32_nxv1i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i1: ; CHECK: # %bb.0: @@ -710,7 +652,6 @@ define @vfptoui_nxv1f32_nxv1i1( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f32(, metadata) define @vfptosi_nxv1f32_nxv1i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i8: ; CHECK: # %bb.0: @@ -723,7 +664,6 @@ define @vfptosi_nxv1f32_nxv1i8( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f32(, metadata) define @vfptoui_nxv1f32_nxv1i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i8: ; CHECK: # %bb.0: @@ -736,7 +676,6 @@ define @vfptoui_nxv1f32_nxv1i8( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f32(, metadata) define @vfptosi_nxv1f32_nxv1i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i16: ; CHECK: # %bb.0: @@ -748,7 +687,6 @@ define @vfptosi_nxv1f32_nxv1i16( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f32(, metadata) define @vfptoui_nxv1f32_nxv1i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i16: ; CHECK: # %bb.0: @@ -760,7 +698,6 @@ define @vfptoui_nxv1f32_nxv1i16( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f32(, metadata) define @vfptosi_nxv1f32_nxv1i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i32: ; CHECK: # %bb.0: @@ -771,7 +708,6 @@ define @vfptosi_nxv1f32_nxv1i32( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f32(, metadata) define @vfptoui_nxv1f32_nxv1i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i32: ; CHECK: # %bb.0: @@ -782,7 +718,6 @@ define @vfptoui_nxv1f32_nxv1i32( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f32(, metadata) define @vfptosi_nxv1f32_nxv1i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i64: ; CHECK: # %bb.0: @@ -794,7 +729,6 @@ define @vfptosi_nxv1f32_nxv1i64( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f32(, metadata) define @vfptoui_nxv1f32_nxv1i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i64: ; CHECK: # %bb.0: @@ -806,7 +740,6 @@ define @vfptoui_nxv1f32_nxv1i64( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f32(, metadata) define @vfptosi_nxv2f32_nxv2i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i1: ; CHECK: # %bb.0: @@ -819,7 +752,6 @@ define @vfptosi_nxv2f32_nxv2i1( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f32(, metadata) define @vfptoui_nxv2f32_nxv2i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i1: ; CHECK: # %bb.0: @@ -832,7 +764,6 @@ define @vfptoui_nxv2f32_nxv2i1( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f32(, metadata) define @vfptosi_nxv2f32_nxv2i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i8: ; CHECK: # %bb.0: @@ -845,7 +776,6 @@ define @vfptosi_nxv2f32_nxv2i8( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f32(, metadata) define @vfptoui_nxv2f32_nxv2i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i8: ; CHECK: # %bb.0: @@ -858,7 +788,6 @@ define @vfptoui_nxv2f32_nxv2i8( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f32(, metadata) define @vfptosi_nxv2f32_nxv2i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i16: ; CHECK: # %bb.0: @@ -870,7 +799,6 @@ define @vfptosi_nxv2f32_nxv2i16( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f32(, metadata) define @vfptoui_nxv2f32_nxv2i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i16: ; CHECK: # %bb.0: @@ -882,7 +810,6 @@ define @vfptoui_nxv2f32_nxv2i16( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f32(, metadata) define @vfptosi_nxv2f32_nxv2i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i32: ; CHECK: # %bb.0: @@ -893,7 +820,6 @@ define @vfptosi_nxv2f32_nxv2i32( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f32(, metadata) define @vfptoui_nxv2f32_nxv2i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i32: ; CHECK: # %bb.0: @@ -904,7 +830,6 @@ define @vfptoui_nxv2f32_nxv2i32( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f32(, metadata) define @vfptosi_nxv2f32_nxv2i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i64: ; CHECK: # %bb.0: @@ -916,7 +841,6 @@ define @vfptosi_nxv2f32_nxv2i64( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f32(, metadata) define @vfptoui_nxv2f32_nxv2i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i64: ; CHECK: # %bb.0: @@ -928,7 +852,6 @@ define @vfptoui_nxv2f32_nxv2i64( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f32(, metadata) define @vfptosi_nxv4f32_nxv4i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i1: ; CHECK: # %bb.0: @@ -941,7 +864,6 @@ define @vfptosi_nxv4f32_nxv4i1( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f32(, metadata) define @vfptoui_nxv4f32_nxv4i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i1: ; CHECK: # %bb.0: @@ -954,7 +876,6 @@ define @vfptoui_nxv4f32_nxv4i1( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f32(, metadata) define @vfptosi_nxv4f32_nxv4i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i8: ; CHECK: # %bb.0: @@ -967,7 +888,6 @@ define @vfptosi_nxv4f32_nxv4i8( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f32(, metadata) define @vfptoui_nxv4f32_nxv4i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i8: ; CHECK: # %bb.0: @@ -980,7 +900,6 @@ define @vfptoui_nxv4f32_nxv4i8( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f32(, metadata) define @vfptosi_nxv4f32_nxv4i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i16: ; CHECK: # %bb.0: @@ -992,7 +911,6 @@ define @vfptosi_nxv4f32_nxv4i16( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f32(, metadata) define @vfptoui_nxv4f32_nxv4i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i16: ; CHECK: # %bb.0: @@ -1004,7 +922,6 @@ define @vfptoui_nxv4f32_nxv4i16( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f32(, metadata) define @vfptosi_nxv4f32_nxv4i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i32: ; CHECK: # %bb.0: @@ -1015,7 +932,6 @@ define @vfptosi_nxv4f32_nxv4i32( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f32(, metadata) define @vfptoui_nxv4f32_nxv4i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i32: ; CHECK: # %bb.0: @@ -1026,7 +942,6 @@ define @vfptoui_nxv4f32_nxv4i32( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f32(, metadata) define @vfptosi_nxv4f32_nxv4i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i64: ; CHECK: # %bb.0: @@ -1038,7 +953,6 @@ define @vfptosi_nxv4f32_nxv4i64( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f32(, metadata) define @vfptoui_nxv4f32_nxv4i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i64: ; CHECK: # %bb.0: @@ -1050,7 +964,6 @@ define @vfptoui_nxv4f32_nxv4i64( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f32(, metadata) define @vfptosi_nxv8f32_nxv8i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i1: ; CHECK: # %bb.0: @@ -1063,7 +976,6 @@ define @vfptosi_nxv8f32_nxv8i1( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f32(, metadata) define @vfptoui_nxv8f32_nxv8i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i1: ; CHECK: # %bb.0: @@ -1076,7 +988,6 @@ define @vfptoui_nxv8f32_nxv8i1( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f32(, metadata) define @vfptosi_nxv8f32_nxv8i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i8: ; CHECK: # %bb.0: @@ -1089,7 +1000,6 @@ define @vfptosi_nxv8f32_nxv8i8( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f32(, metadata) define @vfptoui_nxv8f32_nxv8i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i8: ; CHECK: # %bb.0: @@ -1102,7 +1012,6 @@ define @vfptoui_nxv8f32_nxv8i8( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f32(, metadata) define @vfptosi_nxv8f32_nxv8i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i16: ; CHECK: # %bb.0: @@ -1114,7 +1023,6 @@ define @vfptosi_nxv8f32_nxv8i16( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f32(, metadata) define @vfptoui_nxv8f32_nxv8i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i16: ; CHECK: # %bb.0: @@ -1126,7 +1034,6 @@ define @vfptoui_nxv8f32_nxv8i16( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f32(, metadata) define @vfptosi_nxv8f32_nxv8i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i32: ; CHECK: # %bb.0: @@ -1137,7 +1044,6 @@ define @vfptosi_nxv8f32_nxv8i32( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f32(, metadata) define @vfptoui_nxv8f32_nxv8i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i32: ; CHECK: # %bb.0: @@ -1148,7 +1054,6 @@ define @vfptoui_nxv8f32_nxv8i32( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f32(, metadata) define @vfptosi_nxv8f32_nxv8i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i64: ; CHECK: # %bb.0: @@ -1160,7 +1065,6 @@ define @vfptosi_nxv8f32_nxv8i64( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f32(, metadata) define @vfptoui_nxv8f32_nxv8i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i64: ; CHECK: # %bb.0: @@ -1172,7 +1076,6 @@ define @vfptoui_nxv8f32_nxv8i64( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv16i1.nxv16f32(, metadata) define @vfptosi_nxv16f32_nxv16i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv16f32_nxv16i1: ; CHECK: # %bb.0: @@ -1185,7 +1088,6 @@ define @vfptosi_nxv16f32_nxv16i1( %va) s ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv16i1.nxv16f32(, metadata) define @vfptoui_nxv16f32_nxv16i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv16f32_nxv16i1: ; CHECK: # %bb.0: @@ -1198,7 +1100,6 @@ define @vfptoui_nxv16f32_nxv16i1( %va) s ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv16i8.nxv16f32(, metadata) define @vfptosi_nxv16f32_nxv16i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv16f32_nxv16i8: ; CHECK: # %bb.0: @@ -1211,7 +1112,6 @@ define @vfptosi_nxv16f32_nxv16i8( %va) s ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv16i8.nxv16f32(, metadata) define @vfptoui_nxv16f32_nxv16i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv16f32_nxv16i8: ; CHECK: # %bb.0: @@ -1224,7 +1124,6 @@ define @vfptoui_nxv16f32_nxv16i8( %va) s ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv16i16.nxv16f32(, metadata) define @vfptosi_nxv16f32_nxv16i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv16f32_nxv16i16: ; CHECK: # %bb.0: @@ -1236,7 +1135,6 @@ define @vfptosi_nxv16f32_nxv16i16( %va) ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv16i16.nxv16f32(, metadata) define @vfptoui_nxv16f32_nxv16i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv16f32_nxv16i16: ; CHECK: # %bb.0: @@ -1248,7 +1146,6 @@ define @vfptoui_nxv16f32_nxv16i16( %va) ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv16i32.nxv16f32(, metadata) define @vfptosi_nxv16f32_nxv16i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv16f32_nxv16i32: ; CHECK: # %bb.0: @@ -1259,7 +1156,6 @@ define @vfptosi_nxv16f32_nxv16i32( %va) ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv16i32.nxv16f32(, metadata) define @vfptoui_nxv16f32_nxv16i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv16f32_nxv16i32: ; CHECK: # %bb.0: @@ -1270,7 +1166,6 @@ define @vfptoui_nxv16f32_nxv16i32( %va) ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f64(, metadata) define @vfptosi_nxv1f64_nxv1i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i1: ; CHECK: # %bb.0: @@ -1283,7 +1178,6 @@ define @vfptosi_nxv1f64_nxv1i1( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f64(, metadata) define @vfptoui_nxv1f64_nxv1i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i1: ; CHECK: # %bb.0: @@ -1296,7 +1190,6 @@ define @vfptoui_nxv1f64_nxv1i1( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f64(, metadata) define @vfptosi_nxv1f64_nxv1i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i8: ; CHECK: # %bb.0: @@ -1311,7 +1204,6 @@ define @vfptosi_nxv1f64_nxv1i8( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f64(, metadata) define @vfptoui_nxv1f64_nxv1i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i8: ; CHECK: # %bb.0: @@ -1326,7 +1218,6 @@ define @vfptoui_nxv1f64_nxv1i8( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f64(, metadata) define @vfptosi_nxv1f64_nxv1i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i16: ; CHECK: # %bb.0: @@ -1339,7 +1230,6 @@ define @vfptosi_nxv1f64_nxv1i16( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f64(, metadata) define @vfptoui_nxv1f64_nxv1i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i16: ; CHECK: # %bb.0: @@ -1352,7 +1242,6 @@ define @vfptoui_nxv1f64_nxv1i16( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f64(, metadata) define @vfptosi_nxv1f64_nxv1i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i32: ; CHECK: # %bb.0: @@ -1364,7 +1253,6 @@ define @vfptosi_nxv1f64_nxv1i32( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f64(, metadata) define @vfptoui_nxv1f64_nxv1i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i32: ; CHECK: # %bb.0: @@ -1376,7 +1264,6 @@ define @vfptoui_nxv1f64_nxv1i32( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f64(, metadata) define @vfptosi_nxv1f64_nxv1i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i64: ; CHECK: # %bb.0: @@ -1387,7 +1274,6 @@ define @vfptosi_nxv1f64_nxv1i64( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f64(, metadata) define @vfptoui_nxv1f64_nxv1i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i64: ; CHECK: # %bb.0: @@ -1398,7 +1284,6 @@ define @vfptoui_nxv1f64_nxv1i64( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f64(, metadata) define @vfptosi_nxv2f64_nxv2i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i1: ; CHECK: # %bb.0: @@ -1411,7 +1296,6 @@ define @vfptosi_nxv2f64_nxv2i1( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f64(, metadata) define @vfptoui_nxv2f64_nxv2i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i1: ; CHECK: # %bb.0: @@ -1424,7 +1308,6 @@ define @vfptoui_nxv2f64_nxv2i1( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f64(, metadata) define @vfptosi_nxv2f64_nxv2i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i8: ; CHECK: # %bb.0: @@ -1439,7 +1322,6 @@ define @vfptosi_nxv2f64_nxv2i8( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f64(, metadata) define @vfptoui_nxv2f64_nxv2i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i8: ; CHECK: # %bb.0: @@ -1454,7 +1336,6 @@ define @vfptoui_nxv2f64_nxv2i8( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f64(, metadata) define @vfptosi_nxv2f64_nxv2i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i16: ; CHECK: # %bb.0: @@ -1467,7 +1348,6 @@ define @vfptosi_nxv2f64_nxv2i16( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f64(, metadata) define @vfptoui_nxv2f64_nxv2i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i16: ; CHECK: # %bb.0: @@ -1480,7 +1360,6 @@ define @vfptoui_nxv2f64_nxv2i16( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f64(, metadata) define @vfptosi_nxv2f64_nxv2i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i32: ; CHECK: # %bb.0: @@ -1492,7 +1371,6 @@ define @vfptosi_nxv2f64_nxv2i32( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f64(, metadata) define @vfptoui_nxv2f64_nxv2i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i32: ; CHECK: # %bb.0: @@ -1504,7 +1382,6 @@ define @vfptoui_nxv2f64_nxv2i32( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f64(, metadata) define @vfptosi_nxv2f64_nxv2i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i64: ; CHECK: # %bb.0: @@ -1515,7 +1392,6 @@ define @vfptosi_nxv2f64_nxv2i64( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f64(, metadata) define @vfptoui_nxv2f64_nxv2i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i64: ; CHECK: # %bb.0: @@ -1526,7 +1402,6 @@ define @vfptoui_nxv2f64_nxv2i64( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f64(, metadata) define @vfptosi_nxv4f64_nxv4i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i1: ; CHECK: # %bb.0: @@ -1539,7 +1414,6 @@ define @vfptosi_nxv4f64_nxv4i1( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f64(, metadata) define @vfptoui_nxv4f64_nxv4i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i1: ; CHECK: # %bb.0: @@ -1552,7 +1426,6 @@ define @vfptoui_nxv4f64_nxv4i1( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f64(, metadata) define @vfptosi_nxv4f64_nxv4i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i8: ; CHECK: # %bb.0: @@ -1567,7 +1440,6 @@ define @vfptosi_nxv4f64_nxv4i8( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f64(, metadata) define @vfptoui_nxv4f64_nxv4i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i8: ; CHECK: # %bb.0: @@ -1582,7 +1454,6 @@ define @vfptoui_nxv4f64_nxv4i8( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f64(, metadata) define @vfptosi_nxv4f64_nxv4i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i16: ; CHECK: # %bb.0: @@ -1595,7 +1466,6 @@ define @vfptosi_nxv4f64_nxv4i16( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f64(, metadata) define @vfptoui_nxv4f64_nxv4i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i16: ; CHECK: # %bb.0: @@ -1608,7 +1478,6 @@ define @vfptoui_nxv4f64_nxv4i16( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f64(, metadata) define @vfptosi_nxv4f64_nxv4i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i32: ; CHECK: # %bb.0: @@ -1620,7 +1489,6 @@ define @vfptosi_nxv4f64_nxv4i32( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f64(, metadata) define @vfptoui_nxv4f64_nxv4i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i32: ; CHECK: # %bb.0: @@ -1632,7 +1500,6 @@ define @vfptoui_nxv4f64_nxv4i32( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f64(, metadata) define @vfptosi_nxv4f64_nxv4i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i64: ; CHECK: # %bb.0: @@ -1643,7 +1510,6 @@ define @vfptosi_nxv4f64_nxv4i64( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f64(, metadata) define @vfptoui_nxv4f64_nxv4i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i64: ; CHECK: # %bb.0: @@ -1654,7 +1520,6 @@ define @vfptoui_nxv4f64_nxv4i64( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f64(, metadata) define @vfptosi_nxv8f64_nxv8i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i1: ; CHECK: # %bb.0: @@ -1667,7 +1532,6 @@ define @vfptosi_nxv8f64_nxv8i1( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f64(, metadata) define @vfptoui_nxv8f64_nxv8i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i1: ; CHECK: # %bb.0: @@ -1680,7 +1544,6 @@ define @vfptoui_nxv8f64_nxv8i1( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f64(, metadata) define @vfptosi_nxv8f64_nxv8i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i8: ; CHECK: # %bb.0: @@ -1695,7 +1558,6 @@ define @vfptosi_nxv8f64_nxv8i8( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f64(, metadata) define @vfptoui_nxv8f64_nxv8i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i8: ; CHECK: # %bb.0: @@ -1710,7 +1572,6 @@ define @vfptoui_nxv8f64_nxv8i8( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f64(, metadata) define @vfptosi_nxv8f64_nxv8i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i16: ; CHECK: # %bb.0: @@ -1723,7 +1584,6 @@ define @vfptosi_nxv8f64_nxv8i16( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f64(, metadata) define @vfptoui_nxv8f64_nxv8i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i16: ; CHECK: # %bb.0: @@ -1736,7 +1596,6 @@ define @vfptoui_nxv8f64_nxv8i16( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f64(, metadata) define @vfptosi_nxv8f64_nxv8i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i32: ; CHECK: # %bb.0: @@ -1748,7 +1607,6 @@ define @vfptosi_nxv8f64_nxv8i32( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f64(, metadata) define @vfptoui_nxv8f64_nxv8i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i32: ; CHECK: # %bb.0: @@ -1760,7 +1618,6 @@ define @vfptoui_nxv8f64_nxv8i32( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f64(, metadata) define @vfptosi_nxv8f64_nxv8i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i64: ; CHECK: # %bb.0: @@ -1771,7 +1628,6 @@ define @vfptosi_nxv8f64_nxv8i64( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f64(, metadata) define @vfptoui_nxv8f64_nxv8i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll index 33decd8aa1b91..7924bc83d824c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll @@ -30,8 +30,6 @@ define @vfptosi_nxv2i1_nxv2bf16_unmasked( ret %v } -declare @llvm.vp.fptosi.nxv2i1.nxv2f16(, , i32) - define @vfptosi_nxv2i1_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_nxv2i1_nxv2f16: ; ZVFH: # %bb.0: @@ -72,8 +70,6 @@ define @vfptosi_nxv2i1_nxv2f16_unmasked( %v ret %v } -declare @llvm.vp.fptosi.nxv2i1.nxv2f32(, , i32) - define @vfptosi_nxv2i1_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i1_nxv2f32: ; CHECK: # %bb.0: @@ -96,8 +92,6 @@ define @vfptosi_nxv2i1_nxv2f32_unmasked( % ret %v } -declare @llvm.vp.fptosi.nxv2i1.nxv2f64(, , i32) - define @vfptosi_nxv2i1_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i1_nxv2f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll index f94f709626443..7127d10e67dbc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll @@ -113,8 +113,6 @@ define @vfptosi_nxv2i64_nxv2bf16_unmasked( %v } -declare @llvm.vp.fptosi.v4i7.v4f16(, , i32) - define @vfptosi_v4i7_v4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_v4i7_v4f16: ; ZVFH: # %bb.0: @@ -135,8 +133,6 @@ define @vfptosi_v4i7_v4f16( %va, %v } -declare @llvm.vp.fptosi.nxv2i8.nxv2f16(, , i32) - define @vfptosi_nxv2i8_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_nxv2i8_nxv2f16: ; ZVFH: # %bb.0: @@ -177,8 +173,6 @@ define @vfptosi_nxv2i8_nxv2f16_unmasked( %v ret %v } -declare @llvm.vp.fptosi.nxv2i16.nxv2f16(, , i32) - define @vfptosi_nxv2i16_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_nxv2i16_nxv2f16: ; ZVFH: # %bb.0: @@ -213,8 +207,6 @@ define @vfptosi_nxv2i16_nxv2f16_unmasked( ret %v } -declare @llvm.vp.fptosi.nxv2i32.nxv2f16(, , i32) - define @vfptosi_nxv2i32_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_nxv2i32_nxv2f16: ; ZVFH: # %bb.0: @@ -253,8 +245,6 @@ define @vfptosi_nxv2i32_nxv2f16_unmasked( ret %v } -declare @llvm.vp.fptosi.nxv2i64.nxv2f16(, , i32) - define @vfptosi_nxv2i64_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_nxv2i64_nxv2f16: ; ZVFH: # %bb.0: @@ -287,8 +277,6 @@ define @vfptosi_nxv2i64_nxv2f16_unmasked( ret %v } -declare @llvm.vp.fptosi.nxv2i8.nxv2f32(, , i32) - define @vfptosi_nxv2i8_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i8_nxv2f32: ; CHECK: # %bb.0: @@ -313,8 +301,6 @@ define @vfptosi_nxv2i8_nxv2f32_unmasked( % ret %v } -declare @llvm.vp.fptosi.nxv2i16.nxv2f32(, , i32) - define @vfptosi_nxv2i16_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i16_nxv2f32: ; CHECK: # %bb.0: @@ -337,8 +323,6 @@ define @vfptosi_nxv2i16_nxv2f32_unmasked( ret %v } -declare @llvm.vp.fptosi.nxv2i32.nxv2f32(, , i32) - define @vfptosi_nxv2i32_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i32_nxv2f32: ; CHECK: # %bb.0: @@ -359,8 +343,6 @@ define @vfptosi_nxv2i32_nxv2f32_unmasked( ret %v } -declare @llvm.vp.fptosi.nxv2i64.nxv2f32(, , i32) - define @vfptosi_nxv2i64_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i64_nxv2f32: ; CHECK: # %bb.0: @@ -383,8 +365,6 @@ define @vfptosi_nxv2i64_nxv2f32_unmasked( ret %v } -declare @llvm.vp.fptosi.nxv2i8.nxv2f64(, , i32) - define @vfptosi_nxv2i8_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i8_nxv2f64: ; CHECK: # %bb.0: @@ -413,8 +393,6 @@ define @vfptosi_nxv2i8_nxv2f64_unmasked( ret %v } -declare @llvm.vp.fptosi.nxv2i16.nxv2f64(, , i32) - define @vfptosi_nxv2i16_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i16_nxv2f64: ; CHECK: # %bb.0: @@ -439,8 +417,6 @@ define @vfptosi_nxv2i16_nxv2f64_unmasked( %v } -declare @llvm.vp.fptosi.nxv2i32.nxv2f64(, , i32) - define @vfptosi_nxv2i32_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i32_nxv2f64: ; CHECK: # %bb.0: @@ -463,8 +439,6 @@ define @vfptosi_nxv2i32_nxv2f64_unmasked( %v } -declare @llvm.vp.fptosi.nxv2i64.nxv2f64(, , i32) - define @vfptosi_nxv2i64_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i64_nxv2f64: ; CHECK: # %bb.0: @@ -485,8 +459,6 @@ define @vfptosi_nxv2i64_nxv2f64_unmasked( %v } -declare @llvm.vp.fptosi.nxv32i16.nxv32f32(, , i32) - define @vfptosi_nxv32i16_nxv32f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv32i16_nxv32f32: ; CHECK: # %bb.0: @@ -515,8 +487,6 @@ define @vfptosi_nxv32i16_nxv32f32( %va, ret %v } -declare @llvm.vp.fptosi.nxv32i32.nxv32f32(, , i32) - define @vfptosi_nxv32i32_nxv32f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv32i32_nxv32f32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll index 59c6791c12f79..dba3f6cec2da0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll @@ -30,8 +30,6 @@ define @vfptoui_nxv2i1_nxv2bf16_unmasked( ret %v } -declare @llvm.vp.fptoui.nxv2i1.nxv2f16(, , i32) - define @vfptoui_nxv2i1_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_nxv2i1_nxv2f16: ; ZVFH: # %bb.0: @@ -72,8 +70,6 @@ define @vfptoui_nxv2i1_nxv2f16_unmasked( %v ret %v } -declare @llvm.vp.fptoui.nxv2i1.nxv2f32(, , i32) - define @vfptoui_nxv2i1_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i1_nxv2f32: ; CHECK: # %bb.0: @@ -96,8 +92,6 @@ define @vfptoui_nxv2i1_nxv2f32_unmasked( % ret %v } -declare @llvm.vp.fptoui.nxv2i1.nxv2f64(, , i32) - define @vfptoui_nxv2i1_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i1_nxv2f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll index 7aae383049deb..07b58ed057508 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll @@ -113,8 +113,6 @@ define @vfptoui_nxv2i64_nxv2bf16_unmasked( %v } -declare @llvm.vp.fptoui.v4i7.v4f16(, , i32) - define @vfptoui_v4i7_v4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_v4i7_v4f16: ; ZVFH: # %bb.0: @@ -135,8 +133,6 @@ define @vfptoui_v4i7_v4f16( %va, %v } -declare @llvm.vp.fptoui.nxv2i8.nxv2f16(, , i32) - define @vfptoui_nxv2i8_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_nxv2i8_nxv2f16: ; ZVFH: # %bb.0: @@ -177,8 +173,6 @@ define @vfptoui_nxv2i8_nxv2f16_unmasked( %v ret %v } -declare @llvm.vp.fptoui.nxv2i16.nxv2f16(, , i32) - define @vfptoui_nxv2i16_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_nxv2i16_nxv2f16: ; ZVFH: # %bb.0: @@ -213,8 +207,6 @@ define @vfptoui_nxv2i16_nxv2f16_unmasked( ret %v } -declare @llvm.vp.fptoui.nxv2i32.nxv2f16(, , i32) - define @vfptoui_nxv2i32_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_nxv2i32_nxv2f16: ; ZVFH: # %bb.0: @@ -253,8 +245,6 @@ define @vfptoui_nxv2i32_nxv2f16_unmasked( ret %v } -declare @llvm.vp.fptoui.nxv2i64.nxv2f16(, , i32) - define @vfptoui_nxv2i64_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_nxv2i64_nxv2f16: ; ZVFH: # %bb.0: @@ -287,8 +277,6 @@ define @vfptoui_nxv2i64_nxv2f16_unmasked( ret %v } -declare @llvm.vp.fptoui.nxv2i8.nxv2f32(, , i32) - define @vfptoui_nxv2i8_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i8_nxv2f32: ; CHECK: # %bb.0: @@ -313,8 +301,6 @@ define @vfptoui_nxv2i8_nxv2f32_unmasked( % ret %v } -declare @llvm.vp.fptoui.nxv2i16.nxv2f32(, , i32) - define @vfptoui_nxv2i16_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i16_nxv2f32: ; CHECK: # %bb.0: @@ -337,8 +323,6 @@ define @vfptoui_nxv2i16_nxv2f32_unmasked( ret %v } -declare @llvm.vp.fptoui.nxv2i32.nxv2f32(, , i32) - define @vfptoui_nxv2i32_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i32_nxv2f32: ; CHECK: # %bb.0: @@ -359,8 +343,6 @@ define @vfptoui_nxv2i32_nxv2f32_unmasked( ret %v } -declare @llvm.vp.fptoui.nxv2i64.nxv2f32(, , i32) - define @vfptoui_nxv2i64_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i64_nxv2f32: ; CHECK: # %bb.0: @@ -383,8 +365,6 @@ define @vfptoui_nxv2i64_nxv2f32_unmasked( ret %v } -declare @llvm.vp.fptoui.nxv2i8.nxv2f64(, , i32) - define @vfptoui_nxv2i8_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i8_nxv2f64: ; CHECK: # %bb.0: @@ -413,8 +393,6 @@ define @vfptoui_nxv2i8_nxv2f64_unmasked( ret %v } -declare @llvm.vp.fptoui.nxv2i16.nxv2f64(, , i32) - define @vfptoui_nxv2i16_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i16_nxv2f64: ; CHECK: # %bb.0: @@ -439,8 +417,6 @@ define @vfptoui_nxv2i16_nxv2f64_unmasked( %v } -declare @llvm.vp.fptoui.nxv2i32.nxv2f64(, , i32) - define @vfptoui_nxv2i32_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i32_nxv2f64: ; CHECK: # %bb.0: @@ -463,8 +439,6 @@ define @vfptoui_nxv2i32_nxv2f64_unmasked( %v } -declare @llvm.vp.fptoui.nxv2i64.nxv2f64(, , i32) - define @vfptoui_nxv2i64_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i64_nxv2f64: ; CHECK: # %bb.0: @@ -485,8 +459,6 @@ define @vfptoui_nxv2i64_nxv2f64_unmasked( %v } -declare @llvm.vp.fptoui.nxv32i16.nxv32f32(, , i32) - define @vfptoui_nxv32i16_nxv32f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv32i16_nxv32f32: ; CHECK: # %bb.0: @@ -515,8 +487,6 @@ define @vfptoui_nxv32i16_nxv32f32( %va, ret %v } -declare @llvm.vp.fptoui.nxv32i32.nxv32f32(, , i32) - define @vfptoui_nxv32i32_nxv32f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv32i32_nxv32f32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-constrained-sdnode.ll index 566920d577ce1..b77589762017a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-constrained-sdnode.ll @@ -8,7 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+zvfbfmin -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.experimental.constrained.fptrunc.nxv1f32.nxv1f64(, metadata, metadata) define @vfptrunc_nxv1f64_nxv1f32( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv1f64_nxv1f32: ; CHECK: # %bb.0: @@ -20,7 +19,6 @@ define @vfptrunc_nxv1f64_nxv1f32( %va) ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv1f16.nxv1f64(, metadata, metadata) define @vfptrunc_nxv1f64_nxv1f16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv1f64_nxv1f16: ; CHECK: # %bb.0: @@ -33,7 +31,6 @@ define @vfptrunc_nxv1f64_nxv1f16( %va) ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv1f16.nxv1f32(, metadata, metadata) define @vfptrunc_nxv1f32_nxv1f16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv1f32_nxv1f16: ; CHECK: # %bb.0: @@ -45,7 +42,6 @@ define @vfptrunc_nxv1f32_nxv1f16( %va) s ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv2f32.nxv2f64(, metadata, metadata) define @vfptrunc_nxv2f64_nxv2f32( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv2f64_nxv2f32: ; CHECK: # %bb.0: @@ -57,7 +53,6 @@ define @vfptrunc_nxv2f64_nxv2f32( %va) ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv2f16.nxv2f64(, metadata, metadata) define @vfptrunc_nxv2f64_nxv2f16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv2f64_nxv2f16: ; CHECK: # %bb.0: @@ -70,7 +65,6 @@ define @vfptrunc_nxv2f64_nxv2f16( %va) ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv2f16.nxv2f32(, metadata, metadata) define @vfptrunc_nxv2f32_nxv2f16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv2f32_nxv2f16: ; CHECK: # %bb.0: @@ -82,7 +76,6 @@ define @vfptrunc_nxv2f32_nxv2f16( %va) s ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv4f32.nxv4f64(, metadata, metadata) define @vfptrunc_nxv4f64_nxv4f32( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv4f64_nxv4f32: ; CHECK: # %bb.0: @@ -94,7 +87,6 @@ define @vfptrunc_nxv4f64_nxv4f32( %va) ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv4f16.nxv4f64(, metadata, metadata) define @vfptrunc_nxv4f64_nxv4f16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv4f64_nxv4f16: ; CHECK: # %bb.0: @@ -107,7 +99,6 @@ define @vfptrunc_nxv4f64_nxv4f16( %va) ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv4f16.nxv4f32(, metadata, metadata) define @vfptrunc_nxv4f32_nxv4f16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv4f32_nxv4f16: ; CHECK: # %bb.0: @@ -119,7 +110,6 @@ define @vfptrunc_nxv4f32_nxv4f16( %va) s ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv8f32.nxv8f64(, metadata, metadata) define @vfptrunc_nxv8f64_nxv8f32( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv8f64_nxv8f32: ; CHECK: # %bb.0: @@ -131,7 +121,6 @@ define @vfptrunc_nxv8f64_nxv8f32( %va) ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv8f16.nxv8f64(, metadata, metadata) define @vfptrunc_nxv8f64_nxv8f16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv8f64_nxv8f16: ; CHECK: # %bb.0: @@ -144,7 +133,6 @@ define @vfptrunc_nxv8f64_nxv8f16( %va) ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv8f16.nxv8f32(, metadata, metadata) define @vfptrunc_nxv8f32_nxv8f16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv8f32_nxv8f16: ; CHECK: # %bb.0: @@ -156,7 +144,6 @@ define @vfptrunc_nxv8f32_nxv8f16( %va) s ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv1bf16.nxv1f64(, metadata, metadata) define @vfptrunc_nxv1f64_nxv1bf16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv1f64_nxv1bf16: ; CHECK: # %bb.0: @@ -169,7 +156,6 @@ define @vfptrunc_nxv1f64_nxv1bf16( %v ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv1bf16.nxv1f32(, metadata, metadata) define @vfptrunc_nxv1f32_nxv1bf16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv1f32_nxv1bf16: ; CHECK: # %bb.0: @@ -181,7 +167,6 @@ define @vfptrunc_nxv1f32_nxv1bf16( %va ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv2bf16.nxv2f64(, metadata, metadata) define @vfptrunc_nxv2f64_nxv2bf16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv2f64_nxv2bf16: ; CHECK: # %bb.0: @@ -194,7 +179,6 @@ define @vfptrunc_nxv2f64_nxv2bf16( %v ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv2bf16.nxv2f32(, metadata, metadata) define @vfptrunc_nxv2f32_nxv2bf16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv2f32_nxv2bf16: ; CHECK: # %bb.0: @@ -206,7 +190,6 @@ define @vfptrunc_nxv2f32_nxv2bf16( %va ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv4bf16.nxv4f64(, metadata, metadata) define @vfptrunc_nxv4f64_nxv4bf16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv4f64_nxv4bf16: ; CHECK: # %bb.0: @@ -219,7 +202,6 @@ define @vfptrunc_nxv4f64_nxv4bf16( %v ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv4bf16.nxv4f32(, metadata, metadata) define @vfptrunc_nxv4f32_nxv4bf16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv4f32_nxv4bf16: ; CHECK: # %bb.0: @@ -231,7 +213,6 @@ define @vfptrunc_nxv4f32_nxv4bf16( %va ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv8bf16.nxv8f64(, metadata, metadata) define @vfptrunc_nxv8f64_nxv8bf16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv8f64_nxv8bf16: ; CHECK: # %bb.0: @@ -244,7 +225,6 @@ define @vfptrunc_nxv8f64_nxv8bf16( %v ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv8bf16.nxv8f32(, metadata, metadata) define @vfptrunc_nxv8f32_nxv8bf16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv8f32_nxv8bf16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll index 03de35c212296..4177672b3a306 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v,+m,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+m,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fptrunc.nxv2f16.nxv2f32(, , i32) - define @vfptrunc_nxv2f16_nxv2f32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv2f16_nxv2f32: ; CHECK: # %bb.0: @@ -28,8 +26,6 @@ define @vfptrunc_nxv2f16_nxv2f32_unmasked( %v } -declare @llvm.vp.fptrunc.nxv2f16.nxv2f64(, , i32) - define @vfptrunc_nxv2f16_nxv2f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv2f16_nxv2f64: ; CHECK: # %bb.0: @@ -54,8 +50,6 @@ define @vfptrunc_nxv2f16_nxv2f64_unmasked( %v } -declare @llvm.vp.fptrunc.nxv2f64.nxv2f32(, , i32) - define @vfptrunc_nxv2f32_nxv2f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv2f32_nxv2f64: ; CHECK: # %bb.0: @@ -78,8 +72,6 @@ define @vfptrunc_nxv2f32_nxv2f64_unmasked( %v } -declare @llvm.vp.fptrunc.nxv7f64.nxv7f32(, , i32) - define @vfptrunc_nxv7f32_nxv7f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv7f32_nxv7f64: ; CHECK: # %bb.0: @@ -91,8 +83,6 @@ define @vfptrunc_nxv7f32_nxv7f64( %a, ret %v } -declare @llvm.vp.fptrunc.nxv16f64.nxv16f32(, , i32) - define @vfptrunc_nxv16f32_nxv16f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv16f32_nxv16f64: ; CHECK: # %bb.0: @@ -120,8 +110,6 @@ define @vfptrunc_nxv16f32_nxv16f64( ret %v } -declare @llvm.vp.fptrunc.nxv32f64.nxv32f32(, , i32) - define @vfptrunc_nxv32f32_nxv32f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv32f32_nxv32f64: ; CHECK: # %bb.0: @@ -206,8 +194,6 @@ define @vfptrunc_nxv32f32_nxv32f64( ret %v } -declare @llvm.vp.fptrunc.nxv2bf16.nxv2f32(, , i32) - define @vfptrunc_nxv2bf16_nxv2f32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv2bf16_nxv2f32: ; CHECK: # %bb.0: @@ -230,8 +216,6 @@ define @vfptrunc_nxv2bf16_nxv2f32_unmasked( %v } -declare @llvm.vp.fptrunc.nxv2bf16.nxv2f64(, , i32) - define @vfptrunc_nxv2bf16_nxv2f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv2bf16_nxv2f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll index e8688abc63a5d..42be051e35aae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fdiv.nxv1f16(, , , i32) - define @vfrdiv_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv1f16: ; CHECK: # %bb.0: @@ -30,8 +28,6 @@ define @vfrdiv_vf_nxv1f16_unmasked( %va, ret %v } -declare @llvm.vp.fdiv.nxv2f16(, , , i32) - define @vfrdiv_vf_nxv2f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv2f16: ; CHECK: # %bb.0: @@ -56,8 +52,6 @@ define @vfrdiv_vf_nxv2f16_unmasked( %va, ret %v } -declare @llvm.vp.fdiv.nxv4f16(, , , i32) - define @vfrdiv_vf_nxv4f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv4f16: ; CHECK: # %bb.0: @@ -82,8 +76,6 @@ define @vfrdiv_vf_nxv4f16_unmasked( %va, ret %v } -declare @llvm.vp.fdiv.nxv8f16(, , , i32) - define @vfrdiv_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv8f16: ; CHECK: # %bb.0: @@ -108,8 +100,6 @@ define @vfrdiv_vf_nxv8f16_unmasked( %va, ret %v } -declare @llvm.vp.fdiv.nxv16f16(, , , i32) - define @vfrdiv_vf_nxv16f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv16f16: ; CHECK: # %bb.0: @@ -134,8 +124,6 @@ define @vfrdiv_vf_nxv16f16_unmasked( %v ret %v } -declare @llvm.vp.fdiv.nxv32f16(, , , i32) - define @vfrdiv_vf_nxv32f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv32f16: ; CHECK: # %bb.0: @@ -160,8 +148,6 @@ define @vfrdiv_vf_nxv32f16_unmasked( %v ret %v } -declare @llvm.vp.fdiv.nxv1f32(, , , i32) - define @vfrdiv_vf_nxv1f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv1f32: ; CHECK: # %bb.0: @@ -186,8 +172,6 @@ define @vfrdiv_vf_nxv1f32_unmasked( %va ret %v } -declare @llvm.vp.fdiv.nxv2f32(, , , i32) - define @vfrdiv_vf_nxv2f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv2f32: ; CHECK: # %bb.0: @@ -212,8 +196,6 @@ define @vfrdiv_vf_nxv2f32_unmasked( %va ret %v } -declare @llvm.vp.fdiv.nxv4f32(, , , i32) - define @vfrdiv_vf_nxv4f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv4f32: ; CHECK: # %bb.0: @@ -238,8 +220,6 @@ define @vfrdiv_vf_nxv4f32_unmasked( %va ret %v } -declare @llvm.vp.fdiv.nxv8f32(, , , i32) - define @vfrdiv_vf_nxv8f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv8f32: ; CHECK: # %bb.0: @@ -264,8 +244,6 @@ define @vfrdiv_vf_nxv8f32_unmasked( %va ret %v } -declare @llvm.vp.fdiv.nxv16f32(, , , i32) - define @vfrdiv_vf_nxv16f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv16f32: ; CHECK: # %bb.0: @@ -290,8 +268,6 @@ define @vfrdiv_vf_nxv16f32_unmasked( ret %v } -declare @llvm.vp.fdiv.nxv1f64(, , , i32) - define @vfrdiv_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv1f64: ; CHECK: # %bb.0: @@ -316,8 +292,6 @@ define @vfrdiv_vf_nxv1f64_unmasked( % ret %v } -declare @llvm.vp.fdiv.nxv2f64(, , , i32) - define @vfrdiv_vf_nxv2f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv2f64: ; CHECK: # %bb.0: @@ -342,8 +316,6 @@ define @vfrdiv_vf_nxv2f64_unmasked( % ret %v } -declare @llvm.vp.fdiv.nxv4f64(, , , i32) - define @vfrdiv_vf_nxv4f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv4f64: ; CHECK: # %bb.0: @@ -368,8 +340,6 @@ define @vfrdiv_vf_nxv4f64_unmasked( % ret %v } -declare @llvm.vp.fdiv.nxv8f64(, , , i32) - define @vfrdiv_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll index 6a2a6a4c4a0cd..e36786e423193 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfrdiv.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv32f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv32f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv16f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv16f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv1f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv1f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv2f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv2f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv4f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv4f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv8f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv8f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll index 1211415ffe432..ed0290cfeac2c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfrec7.nxv1bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv1bf16_nxv1bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -50,11 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv2bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv2bf16_nxv2bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -72,12 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -96,11 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv4bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv4bf16_nxv4bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -118,12 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -142,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv8bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv8bf16_nxv8bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -164,12 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv16bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv16bf16_nxv16bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -210,12 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -234,11 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv32bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv32bf16_nxv32bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -256,12 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv32bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv32bf16_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll index e185e8d568701..4d6efc7ca56ad 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfrec7.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv1f16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -50,11 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv2f16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv2f16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -72,12 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -96,11 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv4f16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv4f16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -118,12 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -142,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv8f16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv8f16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -164,12 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv16f16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv16f16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -210,12 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -234,11 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv32f16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv32f16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -256,12 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv32f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -280,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv1f32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -302,12 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -326,11 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv2f32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,12 +266,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -372,11 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv4f32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -394,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -418,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv8f32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -440,12 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -464,11 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv16f32( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv16f32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -486,12 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv16f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -510,11 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv1f64( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv1f64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -532,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -556,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv2f64( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv2f64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -578,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -602,11 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv4f64( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv4f64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -624,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -648,11 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv8f64( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv8f64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -670,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv8f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll index f1ed95512741c..2da41ac1e3a91 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfredmax.nxv4f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv4f16.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv4f16.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv4f16.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv4f16.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv4f16.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv2f32.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv2f32.nxv2f32( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv2f32.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv2f32.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv2f32.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv1f64.nxv1f64( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv1f64.nxv2f64( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv1f64.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv1f64.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll index 5dfa5a1f2b20e..d2b5f4a495104 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfredmin.nxv4f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv4f16.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv4f16.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv4f16.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv4f16.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv4f16.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv2f32.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv2f32.nxv2f32( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv2f32.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv2f32.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv2f32.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv1f64.nxv1f64( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv1f64.nxv2f64( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv1f64.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv1f64.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll index a85850b0c4504..81e5c96b6299f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfredosum.nxv4f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv4f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv4f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv4f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv4f16.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv2f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv2f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv2f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv2f32.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv1f64.nxv2f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv1f64.nxv4f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv1f64.nxv8f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll b/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll index b3101450493e8..22d44f8341743 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfredusum.nxv4f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.nxv1i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv4f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.nxv2i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.nxv4i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv4f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.nxv8i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv4f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.nxv16i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv4f16.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.nxv32i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv2f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.nxv1i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.nxv2i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv2f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.nxv4i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv2f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.nxv8i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv2f32.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.nxv16i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.nxv1i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv1f64.nxv2f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.nxv2i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv1f64.nxv4f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.nxv4i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv1f64.nxv8f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.nxv8i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll index 4626b865ab454..4ba0e4e3dcd63 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfrsqrt7.nxv1bf16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv1bf16_nxv1bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv1bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv2bf16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv2bf16_nxv2bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv2bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv4bf16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv4bf16_nxv4bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv4bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv8bf16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv8bf16_nxv8bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv8bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv16bf16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv16bf16_nxv16bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv16bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv32bf16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv32bf16_nxv32bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv32bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv32bf16_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll index db303dddc328e..bfaca190a45e4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfrsqrt7.nxv1f16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv2f16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv2f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv4f16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv4f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv8f16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv8f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv16f16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv16f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv32f16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv32f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv1f32( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv2f32( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv4f32( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv8f32( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv16f32( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv16f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv1f64( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv1f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv2f64( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv2f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv4f64( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv4f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv8f64( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv8f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll index 54a6d48cfcc5b..ecd54fb090c14 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfrsub.nxv1bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv2bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv4bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv8bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv16bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv32bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll index e2864ea30ec7b..2fc77437019e3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fsub.nxv1f16(, , , i32) - define @vfrsub_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv1f16: ; CHECK: # %bb.0: @@ -30,8 +28,6 @@ define @vfrsub_vf_nxv1f16_unmasked( %va, ret %v } -declare @llvm.vp.fsub.nxv2f16(, , , i32) - define @vfrsub_vf_nxv2f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv2f16: ; CHECK: # %bb.0: @@ -56,8 +52,6 @@ define @vfrsub_vf_nxv2f16_unmasked( %va, ret %v } -declare @llvm.vp.fsub.nxv4f16(, , , i32) - define @vfrsub_vf_nxv4f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv4f16: ; CHECK: # %bb.0: @@ -82,8 +76,6 @@ define @vfrsub_vf_nxv4f16_unmasked( %va, ret %v } -declare @llvm.vp.fsub.nxv8f16(, , , i32) - define @vfrsub_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv8f16: ; CHECK: # %bb.0: @@ -108,8 +100,6 @@ define @vfrsub_vf_nxv8f16_unmasked( %va, ret %v } -declare @llvm.vp.fsub.nxv16f16(, , , i32) - define @vfrsub_vf_nxv16f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv16f16: ; CHECK: # %bb.0: @@ -134,8 +124,6 @@ define @vfrsub_vf_nxv16f16_unmasked( %v ret %v } -declare @llvm.vp.fsub.nxv32f16(, , , i32) - define @vfrsub_vf_nxv32f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv32f16: ; CHECK: # %bb.0: @@ -160,8 +148,6 @@ define @vfrsub_vf_nxv32f16_unmasked( %v ret %v } -declare @llvm.vp.fsub.nxv1f32(, , , i32) - define @vfrsub_vf_nxv1f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv1f32: ; CHECK: # %bb.0: @@ -186,8 +172,6 @@ define @vfrsub_vf_nxv1f32_unmasked( %va ret %v } -declare @llvm.vp.fsub.nxv2f32(, , , i32) - define @vfrsub_vf_nxv2f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv2f32: ; CHECK: # %bb.0: @@ -212,8 +196,6 @@ define @vfrsub_vf_nxv2f32_unmasked( %va ret %v } -declare @llvm.vp.fsub.nxv4f32(, , , i32) - define @vfrsub_vf_nxv4f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv4f32: ; CHECK: # %bb.0: @@ -238,8 +220,6 @@ define @vfrsub_vf_nxv4f32_unmasked( %va ret %v } -declare @llvm.vp.fsub.nxv8f32(, , , i32) - define @vfrsub_vf_nxv8f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv8f32: ; CHECK: # %bb.0: @@ -264,8 +244,6 @@ define @vfrsub_vf_nxv8f32_unmasked( %va ret %v } -declare @llvm.vp.fsub.nxv16f32(, , , i32) - define @vfrsub_vf_nxv16f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv16f32: ; CHECK: # %bb.0: @@ -290,8 +268,6 @@ define @vfrsub_vf_nxv16f32_unmasked( ret %v } -declare @llvm.vp.fsub.nxv1f64(, , , i32) - define @vfrsub_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv1f64: ; CHECK: # %bb.0: @@ -316,8 +292,6 @@ define @vfrsub_vf_nxv1f64_unmasked( % ret %v } -declare @llvm.vp.fsub.nxv2f64(, , , i32) - define @vfrsub_vf_nxv2f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv2f64: ; CHECK: # %bb.0: @@ -342,8 +316,6 @@ define @vfrsub_vf_nxv2f64_unmasked( % ret %v } -declare @llvm.vp.fsub.nxv4f64(, , , i32) - define @vfrsub_vf_nxv4f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv4f64: ; CHECK: # %bb.0: @@ -368,8 +340,6 @@ define @vfrsub_vf_nxv4f64_unmasked( % ret %v } -declare @llvm.vp.fsub.nxv8f64(, , , i32) - define @vfrsub_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll index c701016b7f772..d09027f34c080 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfrsub.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv32f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv32f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv16f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv16f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv1f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv1f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv2f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv2f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv4f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv4f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv8f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv8f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll index 2cd698d9aaa3c..605cb959134d1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv1bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnj_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv2bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnj_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv4bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnj_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv8bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnj_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv16bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnj_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv32bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnj_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll index 242a826055c58..8949289ab68f8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv2f16.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv4f16.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv8f16.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv16f16.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv32f16.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv1f32.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv2f32.nxv2f32( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv4f32.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv8f32.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv16f32.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -523,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv1f64.nxv1f64( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -545,14 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -570,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv2f64.nxv2f64( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -592,14 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv4f64.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv8f64.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -712,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -734,14 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -759,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv2f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -781,14 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv2f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -806,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv4f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -828,14 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv4f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv8f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv8f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv16f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv16f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv32f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv32f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv1f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1041,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv2f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1063,14 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv2f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1088,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv4f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1110,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv4f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1135,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv8f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1157,14 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv8f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1182,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv16f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1204,14 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv16f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1229,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv1f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1251,14 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1276,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv2f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1298,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv2f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1323,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv4f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1345,14 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv4f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1370,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv8f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1392,14 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv8f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll index 08340becc9ed4..4d43b93e4bfd7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv1bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv2bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv4bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv8bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv16bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv32bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll index 25b99a1763e49..edd1bb65437cb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -523,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -545,14 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -570,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -592,14 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -712,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -734,14 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -759,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv2f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -781,14 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv2f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -806,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv4f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -828,14 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv4f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv8f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv8f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv16f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv16f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv32f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv32f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv1f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv1f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1041,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv2f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1063,14 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv2f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1088,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv4f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1110,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv4f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1135,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv8f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1157,14 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv8f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1182,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv16f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1204,14 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv16f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1229,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv1f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1251,14 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv1f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1276,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv2f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1298,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv2f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1323,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv4f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1345,14 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv4f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1370,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv8f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1392,14 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv8f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll index e51a42e2b8cea..b31a3290477ae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv1bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv2bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv4bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv8bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv16bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv32bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll index cc4c253a8b164..08a5b1f33a910 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -523,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -545,14 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -570,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -592,14 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -712,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -734,14 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -759,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv2f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -781,14 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv2f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -806,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv4f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -828,14 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv4f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv8f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv8f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv16f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv16f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv32f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv32f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv1f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv1f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1041,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv2f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1063,14 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv2f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1088,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv4f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1110,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv4f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1135,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv8f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1157,14 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv8f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1182,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv16f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1204,14 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv16f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1229,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv1f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1251,14 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv1f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1276,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv2f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1298,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv2f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1323,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv4f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1345,14 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv4f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1370,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv8f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1392,14 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv8f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll index c65719c3a4c1a..43cf64cdcf6f2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfslide1down.nxv1bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1down_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv2bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1down_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv4bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1down_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv8bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1down_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv16bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1down_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv32bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1down_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll index e05e2160a377a..897004a0a806c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfslide1down.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv2f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv2f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv4f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv4f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv8f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv8f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv16f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv16f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv32f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv32f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv1f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv1f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -333,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv2f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -355,14 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv2f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -380,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv4f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -402,14 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv4f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -427,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv8f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -449,14 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv8f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -474,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv16f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -496,14 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv16f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -521,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv1f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -543,14 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv1f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -568,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv2f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -590,14 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv2f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -615,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv4f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -637,14 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv4f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -662,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv8f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -684,14 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv8f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll index 57a48986fdfcd..860082bac85c5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfslide1up.nxv1bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1up_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv2bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1up_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv4bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1up_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv8bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1up_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv16bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1up_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv32bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1up_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll index 642b9dec459e3..02caf5014a180 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfslide1up.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv2f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv2f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv4f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv4f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv8f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv8f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv16f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv16f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv32f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv32f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv1f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv1f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv2f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv2f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv4f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv4f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv8f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv8f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv16f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -507,14 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv16f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -532,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv1f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -555,14 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv1f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -580,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv2f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -603,14 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv2f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -628,12 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv4f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -651,14 +463,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv4f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -676,12 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv8f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -699,14 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv8f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll index eeb5f3bc984d3..eeec0a75a2eb1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll @@ -26,7 +26,6 @@ define @vfsqrt_nxv1bf16( %v) strictfp ret %r } - define @vfsqrt_nxv2bf16( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv2bf16: ; CHECK: # %bb.0: @@ -41,7 +40,6 @@ define @vfsqrt_nxv2bf16( %v) strictfp ret %r } - define @vfsqrt_nxv4bf16( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv4bf16: ; CHECK: # %bb.0: @@ -56,7 +54,6 @@ define @vfsqrt_nxv4bf16( %v) strictfp ret %r } - define @vfsqrt_nxv8bf16( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv8bf16: ; CHECK: # %bb.0: @@ -71,7 +68,6 @@ define @vfsqrt_nxv8bf16( %v) strictfp ret %r } - define @vfsqrt_nxv16bf16( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv16bf16: ; CHECK: # %bb.0: @@ -86,7 +82,6 @@ define @vfsqrt_nxv16bf16( %v) stric ret %r } - define @vfsqrt_nxv32bf16( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv32bf16: ; CHECK: # %bb.0: @@ -106,8 +101,6 @@ define @vfsqrt_nxv32bf16( %v) stric ret %r } -declare @llvm.experimental.constrained.sqrt.nxv1f16(, metadata, metadata) - define @vfsqrt_nxv1f16( %v) strictfp { ; ZVFH-LABEL: vfsqrt_nxv1f16: ; ZVFH: # %bb.0: @@ -128,8 +121,6 @@ define @vfsqrt_nxv1f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv2f16(, metadata, metadata) - define @vfsqrt_nxv2f16( %v) strictfp { ; ZVFH-LABEL: vfsqrt_nxv2f16: ; ZVFH: # %bb.0: @@ -150,8 +141,6 @@ define @vfsqrt_nxv2f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv4f16(, metadata, metadata) - define @vfsqrt_nxv4f16( %v) strictfp { ; ZVFH-LABEL: vfsqrt_nxv4f16: ; ZVFH: # %bb.0: @@ -172,8 +161,6 @@ define @vfsqrt_nxv4f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv8f16(, metadata, metadata) - define @vfsqrt_nxv8f16( %v) strictfp { ; ZVFH-LABEL: vfsqrt_nxv8f16: ; ZVFH: # %bb.0: @@ -194,8 +181,6 @@ define @vfsqrt_nxv8f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv16f16(, metadata, metadata) - define @vfsqrt_nxv16f16( %v) strictfp { ; ZVFH-LABEL: vfsqrt_nxv16f16: ; ZVFH: # %bb.0: @@ -216,8 +201,6 @@ define @vfsqrt_nxv16f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv32f16(, metadata, metadata) - define @vfsqrt_nxv32f16( %v) strictfp { ; ZVFH-LABEL: vfsqrt_nxv32f16: ; ZVFH: # %bb.0: @@ -243,8 +226,6 @@ define @vfsqrt_nxv32f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv1f32(, metadata, metadata) - define @vfsqrt_nxv1f32( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv1f32: ; CHECK: # %bb.0: @@ -255,8 +236,6 @@ define @vfsqrt_nxv1f32( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv2f32(, metadata, metadata) - define @vfsqrt_nxv2f32( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv2f32: ; CHECK: # %bb.0: @@ -267,8 +246,6 @@ define @vfsqrt_nxv2f32( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv4f32(, metadata, metadata) - define @vfsqrt_nxv4f32( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv4f32: ; CHECK: # %bb.0: @@ -279,8 +256,6 @@ define @vfsqrt_nxv4f32( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv8f32(, metadata, metadata) - define @vfsqrt_nxv8f32( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv8f32: ; CHECK: # %bb.0: @@ -291,8 +266,6 @@ define @vfsqrt_nxv8f32( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv16f32(, metadata, metadata) - define @vfsqrt_nxv16f32( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv16f32: ; CHECK: # %bb.0: @@ -303,8 +276,6 @@ define @vfsqrt_nxv16f32( %v) strictfp ret %r } -declare @llvm.experimental.constrained.sqrt.nxv1f64(, metadata, metadata) - define @vfsqrt_nxv1f64( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv1f64: ; CHECK: # %bb.0: @@ -315,8 +286,6 @@ define @vfsqrt_nxv1f64( %v) strictfp ret %r } -declare @llvm.experimental.constrained.sqrt.nxv2f64(, metadata, metadata) - define @vfsqrt_nxv2f64( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv2f64: ; CHECK: # %bb.0: @@ -327,8 +296,6 @@ define @vfsqrt_nxv2f64( %v) strictfp ret %r } -declare @llvm.experimental.constrained.sqrt.nxv4f64(, metadata, metadata) - define @vfsqrt_nxv4f64( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv4f64: ; CHECK: # %bb.0: @@ -339,8 +306,6 @@ define @vfsqrt_nxv4f64( %v) strictfp ret %r } -declare @llvm.experimental.constrained.sqrt.nxv8f64(, metadata, metadata) - define @vfsqrt_nxv8f64( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll index 6d7662db2b157..114842b4ef87e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll @@ -101,8 +101,6 @@ define @vfsqrt_nxv32bf16( %v) { ret %r } -declare @llvm.sqrt.nxv1f16() - define @vfsqrt_nxv1f16( %v) { ; ZVFH-LABEL: vfsqrt_nxv1f16: ; ZVFH: # %bb.0: @@ -123,8 +121,6 @@ define @vfsqrt_nxv1f16( %v) { ret %r } -declare @llvm.sqrt.nxv2f16() - define @vfsqrt_nxv2f16( %v) { ; ZVFH-LABEL: vfsqrt_nxv2f16: ; ZVFH: # %bb.0: @@ -145,8 +141,6 @@ define @vfsqrt_nxv2f16( %v) { ret %r } -declare @llvm.sqrt.nxv4f16() - define @vfsqrt_nxv4f16( %v) { ; ZVFH-LABEL: vfsqrt_nxv4f16: ; ZVFH: # %bb.0: @@ -167,8 +161,6 @@ define @vfsqrt_nxv4f16( %v) { ret %r } -declare @llvm.sqrt.nxv8f16() - define @vfsqrt_nxv8f16( %v) { ; ZVFH-LABEL: vfsqrt_nxv8f16: ; ZVFH: # %bb.0: @@ -189,8 +181,6 @@ define @vfsqrt_nxv8f16( %v) { ret %r } -declare @llvm.sqrt.nxv16f16() - define @vfsqrt_nxv16f16( %v) { ; ZVFH-LABEL: vfsqrt_nxv16f16: ; ZVFH: # %bb.0: @@ -211,8 +201,6 @@ define @vfsqrt_nxv16f16( %v) { ret %r } -declare @llvm.sqrt.nxv32f16() - define @vfsqrt_nxv32f16( %v) { ; ZVFH-LABEL: vfsqrt_nxv32f16: ; ZVFH: # %bb.0: @@ -238,8 +226,6 @@ define @vfsqrt_nxv32f16( %v) { ret %r } -declare @llvm.sqrt.nxv1f32() - define @vfsqrt_nxv1f32( %v) { ; CHECK-LABEL: vfsqrt_nxv1f32: ; CHECK: # %bb.0: @@ -250,8 +236,6 @@ define @vfsqrt_nxv1f32( %v) { ret %r } -declare @llvm.sqrt.nxv2f32() - define @vfsqrt_nxv2f32( %v) { ; CHECK-LABEL: vfsqrt_nxv2f32: ; CHECK: # %bb.0: @@ -262,8 +246,6 @@ define @vfsqrt_nxv2f32( %v) { ret %r } -declare @llvm.sqrt.nxv4f32() - define @vfsqrt_nxv4f32( %v) { ; CHECK-LABEL: vfsqrt_nxv4f32: ; CHECK: # %bb.0: @@ -274,8 +256,6 @@ define @vfsqrt_nxv4f32( %v) { ret %r } -declare @llvm.sqrt.nxv8f32() - define @vfsqrt_nxv8f32( %v) { ; CHECK-LABEL: vfsqrt_nxv8f32: ; CHECK: # %bb.0: @@ -286,8 +266,6 @@ define @vfsqrt_nxv8f32( %v) { ret %r } -declare @llvm.sqrt.nxv16f32() - define @vfsqrt_nxv16f32( %v) { ; CHECK-LABEL: vfsqrt_nxv16f32: ; CHECK: # %bb.0: @@ -298,8 +276,6 @@ define @vfsqrt_nxv16f32( %v) { ret %r } -declare @llvm.sqrt.nxv1f64() - define @vfsqrt_nxv1f64( %v) { ; CHECK-LABEL: vfsqrt_nxv1f64: ; CHECK: # %bb.0: @@ -310,8 +286,6 @@ define @vfsqrt_nxv1f64( %v) { ret %r } -declare @llvm.sqrt.nxv2f64() - define @vfsqrt_nxv2f64( %v) { ; CHECK-LABEL: vfsqrt_nxv2f64: ; CHECK: # %bb.0: @@ -322,8 +296,6 @@ define @vfsqrt_nxv2f64( %v) { ret %r } -declare @llvm.sqrt.nxv4f64() - define @vfsqrt_nxv4f64( %v) { ; CHECK-LABEL: vfsqrt_nxv4f64: ; CHECK: # %bb.0: @@ -334,8 +306,6 @@ define @vfsqrt_nxv4f64( %v) { ret %r } -declare @llvm.sqrt.nxv8f64() - define @vfsqrt_nxv8f64( %v) { ; CHECK-LABEL: vfsqrt_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll index 4336b27eb134a..451b13edb794e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.sqrt.nxv1bf16(, , i32) - define @vfsqrt_vv_nxv1bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -42,8 +40,6 @@ define @vfsqrt_vv_nxv1bf16_unmasked( ret %v } -declare @llvm.vp.sqrt.nxv2bf16(, , i32) - define @vfsqrt_vv_nxv2bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -72,8 +68,6 @@ define @vfsqrt_vv_nxv2bf16_unmasked( ret %v } -declare @llvm.vp.sqrt.nxv4bf16(, , i32) - define @vfsqrt_vv_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -102,8 +96,6 @@ define @vfsqrt_vv_nxv4bf16_unmasked( ret %v } -declare @llvm.vp.sqrt.nxv8bf16(, , i32) - define @vfsqrt_vv_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -132,8 +124,6 @@ define @vfsqrt_vv_nxv8bf16_unmasked( ret %v } -declare @llvm.vp.sqrt.nxv16bf16(, , i32) - define @vfsqrt_vv_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -162,8 +152,6 @@ define @vfsqrt_vv_nxv16bf16_unmasked( %v } -declare @llvm.vp.sqrt.nxv32bf16(, , i32) - define @vfsqrt_vv_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -233,7 +221,6 @@ define @vfsqrt_vv_nxv32bf16_unmasked( @llvm.vp.sqrt.nxv32bf16( %va, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.sqrt.nxv1f16(, , i32) define @vfsqrt_vv_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_nxv1f16: @@ -275,8 +262,6 @@ define @vfsqrt_vv_nxv1f16_unmasked( %va, ret %v } -declare @llvm.vp.sqrt.nxv2f16(, , i32) - define @vfsqrt_vv_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -317,8 +302,6 @@ define @vfsqrt_vv_nxv2f16_unmasked( %va, ret %v } -declare @llvm.vp.sqrt.nxv4f16(, , i32) - define @vfsqrt_vv_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -359,8 +342,6 @@ define @vfsqrt_vv_nxv4f16_unmasked( %va, ret %v } -declare @llvm.vp.sqrt.nxv8f16(, , i32) - define @vfsqrt_vv_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -401,8 +382,6 @@ define @vfsqrt_vv_nxv8f16_unmasked( %va, ret %v } -declare @llvm.vp.sqrt.nxv16f16(, , i32) - define @vfsqrt_vv_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -443,8 +422,6 @@ define @vfsqrt_vv_nxv16f16_unmasked( %v ret %v } -declare @llvm.vp.sqrt.nxv32f16(, , i32) - define @vfsqrt_vv_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -527,8 +504,6 @@ define @vfsqrt_vv_nxv32f16_unmasked( %v ret %v } -declare @llvm.vp.sqrt.nxv1f32(, , i32) - define @vfsqrt_vv_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv1f32: ; CHECK: # %bb.0: @@ -549,8 +524,6 @@ define @vfsqrt_vv_nxv1f32_unmasked( %va ret %v } -declare @llvm.vp.sqrt.nxv2f32(, , i32) - define @vfsqrt_vv_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv2f32: ; CHECK: # %bb.0: @@ -571,8 +544,6 @@ define @vfsqrt_vv_nxv2f32_unmasked( %va ret %v } -declare @llvm.vp.sqrt.nxv4f32(, , i32) - define @vfsqrt_vv_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv4f32: ; CHECK: # %bb.0: @@ -593,8 +564,6 @@ define @vfsqrt_vv_nxv4f32_unmasked( %va ret %v } -declare @llvm.vp.sqrt.nxv8f32(, , i32) - define @vfsqrt_vv_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv8f32: ; CHECK: # %bb.0: @@ -615,8 +584,6 @@ define @vfsqrt_vv_nxv8f32_unmasked( %va ret %v } -declare @llvm.vp.sqrt.nxv16f32(, , i32) - define @vfsqrt_vv_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv16f32: ; CHECK: # %bb.0: @@ -637,8 +604,6 @@ define @vfsqrt_vv_nxv16f32_unmasked( ret %v } -declare @llvm.vp.sqrt.nxv1f64(, , i32) - define @vfsqrt_vv_nxv1f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv1f64: ; CHECK: # %bb.0: @@ -659,8 +624,6 @@ define @vfsqrt_vv_nxv1f64_unmasked( % ret %v } -declare @llvm.vp.sqrt.nxv2f64(, , i32) - define @vfsqrt_vv_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv2f64: ; CHECK: # %bb.0: @@ -681,8 +644,6 @@ define @vfsqrt_vv_nxv2f64_unmasked( % ret %v } -declare @llvm.vp.sqrt.nxv4f64(, , i32) - define @vfsqrt_vv_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv4f64: ; CHECK: # %bb.0: @@ -703,8 +664,6 @@ define @vfsqrt_vv_nxv4f64_unmasked( % ret %v } -declare @llvm.vp.sqrt.nxv7f64(, , i32) - define @vfsqrt_vv_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv7f64: ; CHECK: # %bb.0: @@ -725,8 +684,6 @@ define @vfsqrt_vv_nxv7f64_unmasked( % ret %v } -declare @llvm.vp.sqrt.nxv8f64(, , i32) - define @vfsqrt_vv_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv8f64: ; CHECK: # %bb.0: @@ -748,7 +705,6 @@ define @vfsqrt_vv_nxv8f64_unmasked( % } ; Test splitting. -declare @llvm.vp.sqrt.nxv16f64(, , i32) define @vfsqrt_vv_nxv16f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll index 6e495afe25639..c8d5060a38065 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsqrt.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -50,11 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv2f16( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv2f16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -72,12 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -96,11 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv4f16( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv4f16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -118,12 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -142,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv8f16( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv8f16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -164,12 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv16f16( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv16f16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -210,12 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -234,11 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv32f16( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv32f16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -256,12 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv32f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -280,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv1f32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -302,12 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -326,11 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv2f32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,12 +266,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -372,11 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv4f32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -394,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -418,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv8f32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -440,12 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -464,11 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv16f32( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv16f32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -486,12 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv16f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -510,11 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv1f64( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv1f64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -532,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -556,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv2f64( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv2f64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -578,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -602,11 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv4f64( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv4f64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -624,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -648,11 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv8f64( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv8f64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -670,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv8f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll index aea75211b70b5..b568c19de0edd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsub.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv32bf16.nxv32bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -281,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv1bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -303,13 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -327,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv2bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -349,13 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -373,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv4bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -395,13 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -419,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv8bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -441,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -465,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv16bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -487,13 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -511,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv32bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -533,13 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll index cd8f890251c77..d50b8c3c0e81a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll @@ -286,7 +286,6 @@ define @vfsub_vf_nxv32bf16( %va, bf ret %vc } -declare @llvm.experimental.constrained.fsub.nxv1f16(, , metadata, metadata) define @vfsub_vv_nxv1f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfsub_vv_nxv1f16: ; ZVFH: # %bb.0: # %entry @@ -334,7 +333,6 @@ define @vfsub_vf_nxv1f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fsub.nxv2f16(, , metadata, metadata) define @vfsub_vv_nxv2f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfsub_vv_nxv2f16: ; ZVFH: # %bb.0: # %entry @@ -382,7 +380,6 @@ define @vfsub_vf_nxv2f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fsub.nxv4f16(, , metadata, metadata) define @vfsub_vv_nxv4f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfsub_vv_nxv4f16: ; ZVFH: # %bb.0: # %entry @@ -430,7 +427,6 @@ define @vfsub_vf_nxv4f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fsub.nxv8f16(, , metadata, metadata) define @vfsub_vv_nxv8f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfsub_vv_nxv8f16: ; ZVFH: # %bb.0: # %entry @@ -503,7 +499,6 @@ define @vfsub_fv_nxv8f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fsub.nxv16f16(, , metadata, metadata) define @vfsub_vv_nxv16f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfsub_vv_nxv16f16: ; ZVFH: # %bb.0: # %entry @@ -551,7 +546,6 @@ define @vfsub_vf_nxv16f16( %va, half %b ret %vc } -declare @llvm.experimental.constrained.fsub.nxv32f16(, , metadata, metadata) define @vfsub_vv_nxv32f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfsub_vv_nxv32f16: ; ZVFH: # %bb.0: # %entry @@ -644,7 +638,6 @@ define @vfsub_vf_nxv32f16( %va, half %b ret %vc } -declare @llvm.experimental.constrained.fsub.nxv1f32(, , metadata, metadata) define @vfsub_vv_nxv1f32( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -668,7 +661,6 @@ define @vfsub_vf_nxv1f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fsub.nxv2f32(, , metadata, metadata) define @vfsub_vv_nxv2f32( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -692,7 +684,6 @@ define @vfsub_vf_nxv2f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fsub.nxv4f32(, , metadata, metadata) define @vfsub_vv_nxv4f32( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -716,7 +707,6 @@ define @vfsub_vf_nxv4f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fsub.nxv8f32(, , metadata, metadata) define @vfsub_vv_nxv8f32( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -752,7 +742,6 @@ define @vfsub_fv_nxv8f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fsub.nxv16f32(, , metadata, metadata) define @vfsub_vv_nxv16f32( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -776,7 +765,6 @@ define @vfsub_vf_nxv16f32( %va, float ret %vc } -declare @llvm.experimental.constrained.fsub.nxv1f64(, , metadata, metadata) define @vfsub_vv_nxv1f64( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -800,7 +788,6 @@ define @vfsub_vf_nxv1f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fsub.nxv2f64(, , metadata, metadata) define @vfsub_vv_nxv2f64( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -824,7 +811,6 @@ define @vfsub_vf_nxv2f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fsub.nxv4f64(, , metadata, metadata) define @vfsub_vv_nxv4f64( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -848,7 +834,6 @@ define @vfsub_vf_nxv4f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fsub.nxv8f64(, , metadata, metadata) define @vfsub_vv_nxv8f64( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll index dc0bfbd0f76dd..6637aced3cdac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.fsub.nxv1bf16(, , , i32) - define @vfsub_vv_nxv1bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -82,8 +80,6 @@ define @vfsub_vf_nxv1bf16_unmasked( % ret %v } -declare @llvm.vp.fsub.nxv2bf16(, , , i32) - define @vfsub_vv_nxv2bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -152,8 +148,6 @@ define @vfsub_vf_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.fsub.nxv4bf16(, , , i32) - define @vfsub_vv_nxv4bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -222,8 +216,6 @@ define @vfsub_vf_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.fsub.nxv8bf16(, , , i32) - define @vfsub_vv_nxv8bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -292,8 +284,6 @@ define @vfsub_vf_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.fsub.nxv16bf16(, , , i32) - define @vfsub_vv_nxv16bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -362,8 +352,6 @@ define @vfsub_vf_nxv16bf16_unmasked( %v } -declare @llvm.vp.fsub.nxv32bf16(, , , i32) - define @vfsub_vv_nxv32bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -594,7 +582,6 @@ define @vfsub_vf_nxv32bf16_unmasked( @llvm.vp.fsub.nxv32bf16( %va, %vb, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.fsub.nxv1f16(, , , i32) define @vfsub_vv_nxv1f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_nxv1f16: @@ -688,8 +675,6 @@ define @vfsub_vf_nxv1f16_unmasked( %va, h ret %v } -declare @llvm.vp.fsub.nxv2f16(, , , i32) - define @vfsub_vv_nxv2f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -782,8 +767,6 @@ define @vfsub_vf_nxv2f16_unmasked( %va, h ret %v } -declare @llvm.vp.fsub.nxv4f16(, , , i32) - define @vfsub_vv_nxv4f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -876,8 +859,6 @@ define @vfsub_vf_nxv4f16_unmasked( %va, h ret %v } -declare @llvm.vp.fsub.nxv8f16(, , , i32) - define @vfsub_vv_nxv8f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -970,8 +951,6 @@ define @vfsub_vf_nxv8f16_unmasked( %va, h ret %v } -declare @llvm.vp.fsub.nxv16f16(, , , i32) - define @vfsub_vv_nxv16f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -1064,8 +1043,6 @@ define @vfsub_vf_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.fsub.nxv32f16(, , , i32) - define @vfsub_vv_nxv32f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -1321,8 +1298,6 @@ define @vfsub_vf_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.fsub.nxv1f32(, , , i32) - define @vfsub_vv_nxv1f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -1367,8 +1342,6 @@ define @vfsub_vf_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.fsub.nxv2f32(, , , i32) - define @vfsub_vv_nxv2f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1413,8 +1386,6 @@ define @vfsub_vf_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.fsub.nxv4f32(, , , i32) - define @vfsub_vv_nxv4f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1459,8 +1430,6 @@ define @vfsub_vf_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.fsub.nxv8f32(, , , i32) - define @vfsub_vv_nxv8f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1505,8 +1474,6 @@ define @vfsub_vf_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.fsub.nxv16f32(, , , i32) - define @vfsub_vv_nxv16f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1551,8 +1518,6 @@ define @vfsub_vf_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.fsub.nxv1f64(, , , i32) - define @vfsub_vv_nxv1f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1597,8 +1562,6 @@ define @vfsub_vf_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.fsub.nxv2f64(, , , i32) - define @vfsub_vv_nxv2f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1643,8 +1606,6 @@ define @vfsub_vf_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.fsub.nxv4f64(, , , i32) - define @vfsub_vv_nxv4f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1689,8 +1650,6 @@ define @vfsub_vf_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.fsub.nxv7f64(, , , i32) - define @vfsub_vv_nxv7f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv7f64: ; CHECK: # %bb.0: @@ -1701,8 +1660,6 @@ define @vfsub_vv_nxv7f64( %va, %v } -declare @llvm.vp.fsub.nxv8f64(, , , i32) - define @vfsub_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll index 41ebfc50ed475..70b6b58e28844 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv32f16.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -281,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -303,13 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -327,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -349,13 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -373,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -395,13 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -419,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -441,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -465,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv16f32.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -487,13 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -512,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -534,13 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -558,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -580,13 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -626,13 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -650,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv8f64.nxv8f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -672,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -697,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -719,13 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -743,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -765,13 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -789,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -811,13 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -835,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -857,13 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -881,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -903,13 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -927,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv32f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -949,13 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv32f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -973,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -995,13 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1019,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1041,13 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1065,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1087,13 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1111,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1133,13 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1157,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv16f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1179,13 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv16f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1203,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv1f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1225,13 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv1f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1249,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv2f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1271,13 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv2f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1295,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv4f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1317,13 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv4f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1341,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv8f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1363,13 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv8f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll index 62feac824efad..14edc5a57effc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -29,13 +23,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,12 +42,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -80,13 +61,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,12 +80,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -132,13 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -158,12 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -184,13 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -210,12 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -236,13 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -262,12 +197,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv1f32_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -287,13 +216,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -313,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv2f32_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -338,13 +254,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -364,12 +273,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv4f32_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -389,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -415,12 +311,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv8f32_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -440,13 +330,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -466,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv16f32_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -491,13 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-vp.ll index f7297927db717..2437b127d594a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-vp.ll @@ -92,6 +92,3 @@ bb: ret %tmp4 } -declare @llvm.vp.fpext.nxv2f32.nxv2f16(, , i32) -declare @llvm.vp.fadd.nxv2f32(, , , i32) -declare @llvm.vp.merge.nxv2f32(, , , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll index c5417e826bf41..2d130f9e6c2ca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -255,12 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv1f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_bf16: ; CHECK: # %bb.0: # %entry @@ -279,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_bf16: ; CHECK: # %bb.0: # %entry @@ -305,12 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv2f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_bf16: ; CHECK: # %bb.0: # %entry @@ -329,13 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_bf16: ; CHECK: # %bb.0: # %entry @@ -355,12 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv4f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_bf16: ; CHECK: # %bb.0: # %entry @@ -379,13 +282,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_bf16: ; CHECK: # %bb.0: # %entry @@ -405,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv8f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_bf16: ; CHECK: # %bb.0: # %entry @@ -429,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_bf16: ; CHECK: # %bb.0: # %entry @@ -455,12 +338,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv16f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_bf16: ; CHECK: # %bb.0: # %entry @@ -479,13 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll index 5b541562978b8..4ca4a4d451bd1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -29,13 +23,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,12 +42,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -80,13 +61,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,12 +80,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -132,13 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -158,12 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -184,13 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -210,12 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -236,13 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -262,12 +197,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -287,13 +216,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -313,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -339,13 +255,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -365,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -391,13 +294,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -417,12 +313,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -443,13 +333,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -469,12 +352,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -494,13 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -520,12 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -545,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -571,12 +428,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -596,13 +447,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -622,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -647,13 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -673,12 +504,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -698,13 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -724,12 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -749,13 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -775,12 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -800,13 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -826,12 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -851,13 +637,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -877,12 +656,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -902,13 +675,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll index 5e92ab1e290e2..7319301b82f91 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -255,12 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -279,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -305,12 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -329,13 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -355,12 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -379,13 +282,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -405,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -429,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -456,12 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv1f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry @@ -480,13 +357,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry @@ -506,12 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv2f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry @@ -530,13 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry @@ -556,12 +413,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv4f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry @@ -580,13 +431,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry @@ -606,12 +450,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv8f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry @@ -630,13 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry @@ -656,12 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv16f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry @@ -680,13 +505,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry @@ -706,12 +524,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv1f64.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry @@ -730,13 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry @@ -756,12 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv2f64.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry @@ -780,13 +579,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry @@ -806,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv4f64.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry @@ -830,13 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry @@ -856,12 +635,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv8f64.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry @@ -880,13 +653,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll index b7df45bad36e6..01344dcd3bb77 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv1bf16_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1bf16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -25,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv1bf16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1bf16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv2bf16_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2bf16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -68,12 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv2bf16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2bf16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv4bf16_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4bf16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -111,12 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv4bf16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4bf16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv8bf16_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8bf16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -154,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv8bf16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8bf16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv16bf16_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16bf16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv16bf16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16bf16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv32bf16_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32bf16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -240,12 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv32bf16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32bf16_nxv32i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll index c370261a77bc0..9f513969feae6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv1bf16_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1bf16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -25,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv1bf16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1bf16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv2bf16_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2bf16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -68,12 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv2bf16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2bf16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv4bf16_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4bf16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -111,12 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv4bf16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4bf16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv8bf16_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8bf16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -154,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv8bf16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8bf16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv16bf16_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16bf16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv16bf16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16bf16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv32bf16_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32bf16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -240,12 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv32bf16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32bf16_nxv32i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll index 354cf37591a19..b51c8efca9f7c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll @@ -7,10 +7,6 @@ ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( - , - , - iXLen); define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16: @@ -28,12 +24,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -50,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -71,12 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -93,11 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -114,12 +88,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -136,11 +104,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -157,12 +120,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -179,11 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -200,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -222,11 +168,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -243,12 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -265,11 +200,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -286,12 +216,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -308,11 +232,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -329,12 +248,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -351,11 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -372,12 +280,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll index 317ebe3e558db..ebb1a74a806a2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -25,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -68,12 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -111,12 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -154,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -240,12 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -283,12 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -305,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -348,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -369,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -391,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -412,12 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -434,11 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -455,12 +340,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -477,11 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -498,12 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -520,11 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -541,12 +404,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -563,11 +420,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -584,12 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -606,11 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -627,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll index 59800f9672289..41dfcadcef017 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -25,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -68,12 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -111,12 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -154,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -240,12 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -283,12 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -305,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -348,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -369,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -391,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -412,12 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -434,11 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -455,12 +340,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -477,11 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -498,12 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -520,11 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -541,12 +404,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -563,11 +420,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -584,12 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -606,11 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -627,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll index c33af8df6f1c6..d12e839856beb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -268,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -289,13 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -312,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -333,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -356,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -377,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll index a6a8b99f24217..e216cb9f601fa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -268,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -289,13 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -312,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -333,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -356,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -377,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll index 90a93116bd634..409bc28659b6c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -27,12 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -121,12 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,11 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -168,12 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,11 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -215,12 +166,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -262,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -286,11 +220,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -309,12 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -333,11 +256,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -356,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -380,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -403,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll index d7b1d97d059c1..c513ae4a95102 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -27,12 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -121,12 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,11 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -168,12 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,11 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -215,12 +166,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -262,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -286,11 +220,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -309,12 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -333,11 +256,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -356,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -380,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -403,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvtbf16-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvtbf16-f-f.ll index 6e11a222c713c..349d289339df2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvtbf16-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvtbf16-f-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvtbf16.f.f.v.nxv1f32.nxv1bf16( - , - , - iXLen); - define @intrinsic_vfwcvtbf16_f.f.v_nxv1f32_nxv1bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_f.f.v_nxv1f32_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv1f32.nxv1bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvtbf16_mask_f.f.v_nxv1f32_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_mask_f.f.v_nxv1f32_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.nxv2f32.nxv2bf16( - , - , - iXLen); - define @intrinsic_vfwcvtbf16_f.f.v_nxv2f32_nxv2bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_f.f.v_nxv2f32_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv2f32.nxv2bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvtbf16_mask_f.f.v_nxv2f32_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_mask_f.f.v_nxv2f32_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.nxv4f32.nxv4bf16( - , - , - iXLen); - define @intrinsic_vfwcvtbf16_f.f.v_nxv4f32_nxv4bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_f.f.v_nxv4f32_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv4f32.nxv4bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvtbf16_mask_f.f.v_nxv4f32_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_mask_f.f.v_nxv4f32_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.nxv8f32.nxv8bf16( - , - , - iXLen); - define @intrinsic_vfwcvtbf16_f.f.v_nxv8f32_nxv8bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_f.f.v_nxv8f32_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv8f32.nxv8bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvtbf16_mask_f.f.v_nxv8f32_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_mask_f.f.v_nxv8f32_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.nxv16f32.nxv16bf16( - , - , - iXLen); - define @intrinsic_vfwcvtbf16_f.f.v_nxv16f32_nxv16bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_f.f.v_nxv16f32_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv16f32.nxv16bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvtbf16_mask_f.f.v_nxv16f32_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_mask_f.f.v_nxv16f32_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-sdnode.ll index 63113b8780989..eb5078c6f0862 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-sdnode.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare @llvm.fma.v1f32(, , ) - define @vfwmacc_vv_nxv1f32( %va, %vb, %vc) { ; ZVFH-LABEL: vfwmacc_vv_nxv1f32: ; ZVFH: # %bb.0: @@ -243,8 +241,6 @@ define @vfwnmsac_fv_nxv1f32( %va, %vg } -declare @llvm.fma.v2f32(, , ) - define @vfwmacc_vv_nxv2f32( %va, %vb, %vc) { ; ZVFH-LABEL: vfwmacc_vv_nxv2f32: ; ZVFH: # %bb.0: @@ -478,9 +474,6 @@ define @vfwnmsac_fv_nxv2f32( %va, %vg } - -declare @llvm.fma.v4f32(, , ) - define @vfwmacc_vv_nxv4f32( %va, %vb, %vc) { ; ZVFH-LABEL: vfwmacc_vv_nxv4f32: ; ZVFH: # %bb.0: @@ -714,8 +707,6 @@ define @vfwnmsac_fv_nxv4f32( %va, %vg } -declare @llvm.fma.v8f32(, , ) - define @vfwmacc_vv_nxv8f32( %va, %vb, %vc) { ; ZVFH-LABEL: vfwmacc_vv_nxv8f32: ; ZVFH: # %bb.0: @@ -949,8 +940,6 @@ define @vfwnmsac_fv_nxv8f32( %va, %vg } -declare @llvm.fma.v16f32(, , ) - define @vfwmacc_vv_nxv16f32( %va, %vb, %vc) { ; ZVFH-LABEL: vfwmacc_vv_nxv16f32: ; ZVFH: # %bb.0: @@ -1184,8 +1173,6 @@ define @vfwnmsac_fv_nxv16f32( %va, %vg } -declare @llvm.fma.v1f64(, , ) - define @vfwmacc_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1329,8 +1316,6 @@ define @vfwnmsac_fv_nxv1f64( %va, %vg } -declare @llvm.fma.v2f64(, , ) - define @vfwmacc_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1474,9 +1459,6 @@ define @vfwnmsac_fv_nxv2f64( %va, %vg } - -declare @llvm.fma.v4f64(, , ) - define @vfwmacc_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1620,8 +1602,6 @@ define @vfwnmsac_fv_nxv4f64( %va, %vg } -declare @llvm.fma.v8f64(, , ) - define @vfwmacc_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll index 4ef7ea5b52a75..2e8e05fd8ce4c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll @@ -8,11 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.fma.nxv1f32(, , , , i32) -declare @llvm.vp.fneg.nxv1f32(, , i32) -declare @llvm.vp.fpext.nxv1f32.nxv1f16(, , i32) -declare @llvm.vp.merge.nxv1f32(, , , i32) - define @vfmacc_vv_nxv1f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmacc_vv_nxv1f32: ; ZVFH: # %bb.0: @@ -295,12 +290,6 @@ define @vfmacc_vf_nxv1f32_unmasked_tu( % ret %u } -declare @llvm.vp.fma.nxv2f32(, , , , i32) -declare @llvm.vp.fneg.nxv2f32(, , i32) -declare @llvm.vp.merge.nxv2f32(, , , i32) -declare @llvm.vp.select.nxv2f32(, , , i32) -declare @llvm.vp.fpext.nxv2f32.nxv2f16(, , i32) - define @vfmacc_vv_nxv2f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmacc_vv_nxv2f32: ; ZVFH: # %bb.0: @@ -397,12 +386,6 @@ define @vfmacc_vf_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.fma.nxv4f32(, , , , i32) -declare @llvm.vp.fneg.nxv4f32(, , i32) -declare @llvm.vp.merge.nxv4f32(, , , i32) -declare @llvm.vp.select.nxv4f32(, , , i32) -declare @llvm.vp.fpext.nxv4f32.nxv4f16(, , i32) - define @vfmacc_vv_nxv4f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmacc_vv_nxv4f32: ; ZVFH: # %bb.0: @@ -501,12 +484,6 @@ define @vfmacc_vf_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.fma.nxv8f32(, , , , i32) -declare @llvm.vp.fneg.nxv8f32(, , i32) -declare @llvm.vp.merge.nxv8f32(, , , i32) -declare @llvm.vp.select.nxv8f32(, , , i32) -declare @llvm.vp.fpext.nxv8f32.nxv8f16(, , i32) - define @vfmacc_vv_nxv8f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmacc_vv_nxv8f32: ; ZVFH: # %bb.0: @@ -605,12 +582,6 @@ define @vfmacc_vf_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.fma.nxv16f32(, , , , i32) -declare @llvm.vp.fneg.nxv16f32(, , i32) -declare @llvm.vp.merge.nxv16f32(, , , i32) -declare @llvm.vp.select.nxv16f32(, , , i32) -declare @llvm.vp.fpext.nxv16f32.nxv16f16(, , i32) - define @vfmacc_vv_nxv16f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmacc_vv_nxv16f32: ; ZVFH: # %bb.0: @@ -709,12 +680,6 @@ define @vfmacc_vf_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.fma.nxv1f64(, , , , i32) -declare @llvm.vp.fneg.nxv1f64(, , i32) -declare @llvm.vp.merge.nxv1f64(, , , i32) -declare @llvm.vp.select.nxv1f64(, , , i32) -declare @llvm.vp.fpext.nxv1f64.nxv1f32(, , i32) - define @vfmacc_vv_nxv1f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv1f64: ; CHECK: # %bb.0: @@ -771,12 +736,6 @@ define @vfmacc_vf_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.fma.nxv2f64(, , , , i32) -declare @llvm.vp.fneg.nxv2f64(, , i32) -declare @llvm.vp.merge.nxv2f64(, , , i32) -declare @llvm.vp.select.nxv2f64(, , , i32) -declare @llvm.vp.fpext.nxv2f64.nxv2f32(, , i32) - define @vfmacc_vv_nxv2f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv2f64: ; CHECK: # %bb.0: @@ -833,12 +792,6 @@ define @vfmacc_vf_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.fma.nxv4f64(, , , , i32) -declare @llvm.vp.fneg.nxv4f64(, , i32) -declare @llvm.vp.merge.nxv4f64(, , , i32) -declare @llvm.vp.select.nxv4f64(, , , i32) -declare @llvm.vp.fpext.nxv4f64.nxv4f32(, , i32) - define @vfmacc_vv_nxv4f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv4f64: ; CHECK: # %bb.0: @@ -895,12 +848,6 @@ define @vfmacc_vf_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.fma.nxv8f64(, , , , i32) -declare @llvm.vp.fneg.nxv8f64(, , i32) -declare @llvm.vp.merge.nxv8f64(, , , i32) -declare @llvm.vp.select.nxv8f64(, , , i32) -declare @llvm.vp.fpext.nxv8f64.nxv8f32(, , i32) - define @vfmacc_vv_nxv8f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv8f64: ; CHECK: # %bb.0: @@ -957,8 +904,6 @@ define @vfmacc_vf_nxv8f64_unmasked( %v ret %v } -declare @llvm.vp.fpext.nxv1f64.nxv1f16(, , i32) - define @vfmacc_vv_nxv1f64_nxv1f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv1f64_nxv1f16: ; CHECK: # %bb.0: @@ -991,8 +936,6 @@ define @vfmacc_vv_nxv1f64_nxv1f16_unmasked( %v } -declare @llvm.vp.fpext.nxv2f64.nxv2f16(, , i32) - define @vfmacc_vv_nxv2f64_nxv2f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv2f64_nxv2f16: ; CHECK: # %bb.0: @@ -1025,8 +968,6 @@ define @vfmacc_vv_nxv2f64_nxv2f16_unmasked( %v } -declare @llvm.vp.fpext.nxv4f64.nxv4f16(, , i32) - define @vfmacc_vv_nxv4f64_nxv4f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv4f64_nxv4f16: ; CHECK: # %bb.0: @@ -1059,8 +1000,6 @@ define @vfmacc_vv_nxv4f64_nxv4f16_unmasked( %v } -declare @llvm.vp.fpext.nxv8f64.nxv8f16(, , i32) - define @vfmacc_vv_nxv8f64_nxv8f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv8f64_nxv8f16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll index 354f169561735..aad4b8b19884a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv4f32.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv8f32.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv16f32.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv2f64.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv4f64.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv8f64.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv1f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv1f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv2f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv2f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv4f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv4f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv8f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv8f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv16f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv16f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv1f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv1f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv2f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv2f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv4f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv4f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv8f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv8f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16.ll index 965c3d2c5d715..5dfbc8aa04de2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfbfmin,+zvfbfwma \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwmaccbf16.nxv1f32.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv1f32.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv2f32.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv2f32.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv4f32.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv4f32.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv8f32.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv8f32.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv16f32.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv16f32.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv1f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vf_nxv1f32_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv1f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vf_nxv1f32_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv2f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vf_nxv2f32_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv2f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vf_nxv2f32_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv4f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vf_nxv4f32_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv4f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vf_nxv4f32_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv8f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vf_nxv8f32_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv8f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vf_nxv8f32_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv16f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vf_nxv16f32_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv16f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vf_nxv16f32_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll index a3f667818ab0a..0425399c43ac8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv1f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv1f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv2f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv2f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv4f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv4f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv8f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv8f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv16f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv16f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-vp.ll index 36f8e99b27383..68b803fd53721 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-vp.ll @@ -8,11 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN -declare @llvm.vp.fma.nxv1f32(, , , , i32) -declare @llvm.vp.fneg.nxv1f32(, , i32) -declare @llvm.vp.fpext.nxv1f32.nxv1f16(, , i32) -declare @llvm.vp.merge.nxv1f32(, , , i32) - define @vmfsac_vv_nxv1f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vmfsac_vv_nxv1f32: ; ZVFH: # %bb.0: @@ -191,10 +186,6 @@ define @vmfsac_vf_nxv1f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv2f32(, , , , i32) -declare @llvm.vp.fneg.nxv2f32(, , i32) -declare @llvm.vp.fpext.nxv2f32.nxv2f16(, , i32) - define @vmfsac_vv_nxv2f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vmfsac_vv_nxv2f32: ; ZVFH: # %bb.0: @@ -323,10 +314,6 @@ define @vmfsac_vf_nxv2f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv4f32(, , , , i32) -declare @llvm.vp.fneg.nxv4f32(, , i32) -declare @llvm.vp.fpext.nxv4f32.nxv4f16(, , i32) - define @vmfsac_vv_nxv4f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vmfsac_vv_nxv4f32: ; ZVFH: # %bb.0: @@ -457,10 +444,6 @@ define @vmfsac_vf_nxv4f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv8f32(, , , , i32) -declare @llvm.vp.fneg.nxv8f32(, , i32) -declare @llvm.vp.fpext.nxv8f32.nxv8f16(, , i32) - define @vmfsac_vv_nxv8f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vmfsac_vv_nxv8f32: ; ZVFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll index bd0d616fa6176..491e57475cf21 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwmsac.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv4f32.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv8f32.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv16f32.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv2f64.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv4f64.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv8f64.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv1f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv1f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv2f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv2f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv4f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv4f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv8f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv8f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv16f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv16f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv1f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv1f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv2f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv2f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv4f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv4f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv8f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv8f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll index 577b93af7a918..858ba09f47b14 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -29,13 +23,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,12 +42,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -80,13 +61,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,12 +80,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -132,13 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -158,12 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -184,13 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -210,12 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -236,13 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -262,12 +197,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv1f32_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -287,13 +216,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -313,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv2f32_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -338,13 +254,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -364,12 +273,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv4f32_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -389,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -415,12 +311,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv8f32_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -440,13 +330,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -466,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv16f32_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -491,13 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll index f3a2bbd2ea140..975032b88568a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -29,13 +23,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,12 +42,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -80,13 +61,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,12 +80,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -132,13 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -158,12 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -184,13 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -210,12 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -236,13 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -262,12 +197,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -287,13 +216,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -313,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -339,13 +255,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -365,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -391,13 +294,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -417,12 +313,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -443,13 +333,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -469,12 +352,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -494,13 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -520,12 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -545,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -571,12 +428,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -596,13 +447,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -622,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -647,13 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -673,12 +504,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -698,13 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -724,12 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -749,13 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -775,12 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -800,13 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -826,12 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -851,13 +637,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -877,12 +656,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -902,13 +675,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll index 1e05e4c7acf25..152e7041a466f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv1f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv2f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv4f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv8f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv16f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll index fa328356ab585..2b540d775e500 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll @@ -8,10 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.fma.nxv1f32(, , , , i32) -declare @llvm.vp.fneg.nxv1f32(, , i32) -declare @llvm.vp.fpext.nxv1f32.nxv1f16(, , i32) - define @vfnmacc_vv_nxv1f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmacc_vv_nxv1f32: ; ZVFH: # %bb.0: @@ -146,10 +142,6 @@ define @vfnmacc_vf_nxv1f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv2f32(, , , , i32) -declare @llvm.vp.fneg.nxv2f32(, , i32) -declare @llvm.vp.fpext.nxv2f32.nxv2f16(, , i32) - define @vfnmacc_vv_nxv2f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmacc_vv_nxv2f32: ; ZVFH: # %bb.0: @@ -284,10 +276,6 @@ define @vfnmacc_vf_nxv2f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv4f32(, , , , i32) -declare @llvm.vp.fneg.nxv4f32(, , i32) -declare @llvm.vp.fpext.nxv4f32.nxv4f16(, , i32) - define @vfnmacc_vv_nxv4f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmacc_vv_nxv4f32: ; ZVFH: # %bb.0: @@ -424,10 +412,6 @@ define @vfnmacc_vf_nxv4f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv8f32(, , , , i32) -declare @llvm.vp.fneg.nxv8f32(, , i32) -declare @llvm.vp.fpext.nxv8f32.nxv8f16(, , i32) - define @vfnmacc_vv_nxv8f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmacc_vv_nxv8f32: ; ZVFH: # %bb.0: @@ -564,10 +548,6 @@ define @vfnmacc_vf_nxv8f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv16f32(, , , , i32) -declare @llvm.vp.fneg.nxv16f32(, , i32) -declare @llvm.vp.fpext.nxv16f32.nxv16f16(, , i32) - define @vfnmacc_vv_nxv16f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmacc_vv_nxv16f32: ; ZVFH: # %bb.0: @@ -704,10 +684,6 @@ define @vfnmacc_vf_nxv16f32_unmasked( ret %v } -declare @llvm.vp.fma.nxv1f64(, , , , i32) -declare @llvm.vp.fneg.nxv1f64(, , i32) -declare @llvm.vp.fpext.nxv1f64.nxv1f32(, , i32) - define @vfnmacc_vv_nxv1f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv1f64: ; CHECK: # %bb.0: @@ -790,10 +766,6 @@ define @vfnmacc_vf_nxv1f64_unmasked( % ret %v } -declare @llvm.vp.fma.nxv2f64(, , , , i32) -declare @llvm.vp.fneg.nxv2f64(, , i32) -declare @llvm.vp.fpext.nxv2f64.nxv2f32(, , i32) - define @vfnmacc_vv_nxv2f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv2f64: ; CHECK: # %bb.0: @@ -876,10 +848,6 @@ define @vfnmacc_vf_nxv2f64_unmasked( % ret %v } -declare @llvm.vp.fma.nxv4f64(, , , , i32) -declare @llvm.vp.fneg.nxv4f64(, , i32) -declare @llvm.vp.fpext.nxv4f64.nxv4f32(, , i32) - define @vfnmacc_vv_nxv4f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv4f64: ; CHECK: # %bb.0: @@ -962,10 +930,6 @@ define @vfnmacc_vf_nxv4f64_unmasked( % ret %v } -declare @llvm.vp.fma.nxv8f64(, , , , i32) -declare @llvm.vp.fneg.nxv8f64(, , i32) -declare @llvm.vp.fpext.nxv8f64.nxv8f32(, , i32) - define @vfnmacc_vv_nxv8f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv8f64: ; CHECK: # %bb.0: @@ -1048,8 +1012,6 @@ define @vfnmacc_vf_nxv8f64_unmasked( % ret %v } -declare @llvm.vp.fpext.nxv1f64.nxv1f16(, , i32) - define @vfnmacc_vv_nxv1f64_nxv1f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv1f64_nxv1f16: ; CHECK: # %bb.0: @@ -1086,8 +1048,6 @@ define @vfnmacc_vv_nxv1f64_nxv1f16_unmasked( %v } -declare @llvm.vp.fpext.nxv2f64.nxv2f16(, , i32) - define @vfnmacc_vv_nxv2f64_nxv2f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv2f64_nxv2f16: ; CHECK: # %bb.0: @@ -1124,8 +1084,6 @@ define @vfnmacc_vv_nxv2f64_nxv2f16_unmasked( %v } -declare @llvm.vp.fpext.nxv4f64.nxv4f16(, , i32) - define @vfnmacc_vv_nxv4f64_nxv4f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv4f64_nxv4f16: ; CHECK: # %bb.0: @@ -1162,8 +1120,6 @@ define @vfnmacc_vv_nxv4f64_nxv4f16_unmasked( %v } -declare @llvm.vp.fpext.nxv8f64.nxv8f16(, , i32) - define @vfnmacc_vv_nxv8f64_nxv4f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv8f64_nxv4f16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll index e1db8cb722760..2b7362cbc2f4a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv1f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv1f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv2f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv2f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv4f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv4f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv8f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv8f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv16f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv16f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv1f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv1f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv2f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv2f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv4f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv4f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv8f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv8f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll index 223ad4f7483f6..c7f7b7102a500 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv1f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv2f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv4f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv8f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv16f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll index cc0ae35780a60..ef00edb5a5a53 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll @@ -8,10 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.fma.nxv1f32(, , , , i32) -declare @llvm.vp.fneg.nxv1f32(, , i32) -declare @llvm.vp.fpext.nxv1f32.nxv1f16(, , i32) - define @vfnmsac_vv_nxv1f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmsac_vv_nxv1f32: ; ZVFH: # %bb.0: @@ -140,10 +136,6 @@ define @vfnmsac_vf_nxv1f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv2f32(, , , , i32) -declare @llvm.vp.fneg.nxv2f32(, , i32) -declare @llvm.vp.fpext.nxv2f32.nxv2f16(, , i32) - define @vfnmsac_vv_nxv2f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmsac_vv_nxv2f32: ; ZVFH: # %bb.0: @@ -272,10 +264,6 @@ define @vfnmsac_vf_nxv2f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv4f32(, , , , i32) -declare @llvm.vp.fneg.nxv4f32(, , i32) -declare @llvm.vp.fpext.nxv4f32.nxv4f16(, , i32) - define @vfnmsac_vv_nxv4f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmsac_vv_nxv4f32: ; ZVFH: # %bb.0: @@ -406,10 +394,6 @@ define @vfnmsac_vf_nxv4f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv8f32(, , , , i32) -declare @llvm.vp.fneg.nxv8f32(, , i32) -declare @llvm.vp.fpext.nxv8f32.nxv8f16(, , i32) - define @vfnmsac_vv_nxv8f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmsac_vv_nxv8f32: ; ZVFH: # %bb.0: @@ -540,10 +524,6 @@ define @vfnmsac_vf_nxv8f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv16f32(, , , , i32) -declare @llvm.vp.fneg.nxv16f32(, , i32) -declare @llvm.vp.fpext.nxv16f32.nxv16f16(, , i32) - define @vfnmsac_vv_nxv16f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmsac_vv_nxv16f32: ; ZVFH: # %bb.0: @@ -674,10 +654,6 @@ define @vfnmsac_vf_nxv16f32_unmasked( ret %v } -declare @llvm.vp.fma.nxv1f64(, , , , i32) -declare @llvm.vp.fneg.nxv1f64(, , i32) -declare @llvm.vp.fpext.nxv1f64.nxv1f32(, , i32) - define @vfnmsac_vv_nxv1f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv1f64: ; CHECK: # %bb.0: @@ -754,10 +730,6 @@ define @vfnmsac_vf_nxv1f64_unmasked( % ret %v } -declare @llvm.vp.fma.nxv2f64(, , , , i32) -declare @llvm.vp.fneg.nxv2f64(, , i32) -declare @llvm.vp.fpext.nxv2f64.nxv2f32(, , i32) - define @vfnmsac_vv_nxv2f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv2f64: ; CHECK: # %bb.0: @@ -834,10 +806,6 @@ define @vfnmsac_vf_nxv2f64_unmasked( % ret %v } -declare @llvm.vp.fma.nxv4f64(, , , , i32) -declare @llvm.vp.fneg.nxv4f64(, , i32) -declare @llvm.vp.fpext.nxv4f64.nxv4f32(, , i32) - define @vfnmsac_vv_nxv4f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv4f64: ; CHECK: # %bb.0: @@ -914,10 +882,6 @@ define @vfnmsac_vf_nxv4f64_unmasked( % ret %v } -declare @llvm.vp.fma.nxv8f64(, , , , i32) -declare @llvm.vp.fneg.nxv8f64(, , i32) -declare @llvm.vp.fpext.nxv8f64.nxv8f32(, , i32) - define @vfnmsac_vv_nxv8f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll index 5c62112aa9e3d..7bebb0084fcbf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv1f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv1f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv2f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv2f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv4f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv4f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv8f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv8f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv16f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv16f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv1f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv1f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv2f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv2f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv4f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv4f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv8f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv8f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll index bbb019f2f5892..014f07c125391 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwredosum.nxv2f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv2f32.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv2f32.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv2f32.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv2f32.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv1f64.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv1f64.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv1f64.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv1f64.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll index 05044ef689a92..fa3303de780b7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwredusum.nxv2f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv2f32.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv2f32.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv2f32.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv2f32.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv1f64.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv1f64.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv1f64.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv1f64.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll index d993e4e610d2c..3341669551fae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -29,13 +23,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,12 +42,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -80,13 +61,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,12 +80,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -132,13 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -158,12 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -184,13 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -210,12 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -236,13 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -262,12 +197,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv1f32_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -287,13 +216,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -313,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv2f32_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -338,13 +254,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -364,12 +273,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv4f32_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -389,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -415,12 +311,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv8f32_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -440,13 +330,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -466,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv16f32_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -491,13 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll index b22899a100e4a..c1a295a36bcc2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -255,12 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv1f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_bf16: ; CHECK: # %bb.0: # %entry @@ -279,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_bf16: ; CHECK: # %bb.0: # %entry @@ -305,12 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv2f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_bf16: ; CHECK: # %bb.0: # %entry @@ -329,13 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_bf16: ; CHECK: # %bb.0: # %entry @@ -355,12 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv4f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_bf16: ; CHECK: # %bb.0: # %entry @@ -379,13 +282,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_bf16: ; CHECK: # %bb.0: # %entry @@ -405,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv8f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_bf16: ; CHECK: # %bb.0: # %entry @@ -429,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_bf16: ; CHECK: # %bb.0: # %entry @@ -455,12 +338,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv16f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_bf16: ; CHECK: # %bb.0: # %entry @@ -479,13 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll index 9a96364bf07d5..38878bee6999a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -29,13 +23,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,12 +42,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -80,13 +61,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,12 +80,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -132,13 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -158,12 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -184,13 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -210,12 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -236,13 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -262,12 +197,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -287,13 +216,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -313,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -339,13 +255,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -365,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -391,13 +294,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -417,12 +313,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -443,13 +333,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -469,12 +352,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -494,13 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -520,12 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -545,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -571,12 +428,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -596,13 +447,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -622,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -647,13 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -673,12 +504,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -698,13 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -724,12 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -749,13 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -775,12 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -800,13 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -826,12 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -851,13 +637,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -877,12 +656,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -902,13 +675,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll index 7facc0ad483d7..d7af8f2a2746d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -255,12 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -279,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -305,12 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -329,13 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -355,12 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -379,13 +282,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -405,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -429,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -456,12 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv1f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry @@ -480,13 +357,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry @@ -506,12 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv2f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry @@ -530,13 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry @@ -556,12 +413,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv4f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry @@ -580,13 +431,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry @@ -606,12 +450,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv8f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry @@ -630,13 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry @@ -656,12 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv16f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry @@ -680,13 +505,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry @@ -706,12 +524,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv1f64.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry @@ -730,13 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry @@ -756,12 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv2f64.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry @@ -780,13 +579,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry @@ -806,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv4f64.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry @@ -830,13 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry @@ -856,12 +635,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv8f64.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry @@ -880,13 +653,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vghsh.ll b/llvm/test/CodeGen/RISCV/rvv/vghsh.ll index 291d505d8faaf..3a4b1bfab6a31 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vghsh.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vghsh.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkg \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vghsh.nxv1i32.nxv1i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vghsh_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vghsh_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vghsh.nxv2i32.nxv2i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vghsh_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vghsh_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -52,13 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vghsh.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vghsh_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vghsh_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -76,13 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vghsh.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vghsh_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vghsh_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -100,13 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vghsh.nxv16i32.nxv16i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vghsh_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vghsh_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vgmul.ll b/llvm/test/CodeGen/RISCV/rvv/vgmul.ll index 4498f71008704..240d6db9beef1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vgmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vgmul.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkg \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vgmul.vv.nxv1i32( - , - , - iXLen, iXLen); - define @intrinsic_vgmul_vs_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vgmul_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vgmul.vv.nxv2i32( - , - , - iXLen, iXLen); - define @intrinsic_vgmul_vs_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vgmul_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vgmul.vv.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vgmul_vs_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vgmul_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vgmul.vv.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vgmul_vs_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vgmul_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vgmul.vv.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vgmul_vs_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vgmul_vs_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vid.ll b/llvm/test/CodeGen/RISCV/rvv/vid.ll index 1a2ed18730ab7..f743f1eb2ff82 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vid.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vid.ll @@ -4,10 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vid.nxv1i8( - , - iXLen); - define @intrinsic_vid_v_nxv1i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -22,11 +18,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv1i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -42,10 +33,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv2i8( - , - iXLen); - define @intrinsic_vid_v_nxv2i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -60,11 +47,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv2i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -80,10 +62,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv4i8( - , - iXLen); - define @intrinsic_vid_v_nxv4i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv4i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -118,10 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv8i8( - , - iXLen); - define @intrinsic_vid_v_nxv8i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -136,11 +105,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv8i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -156,10 +120,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv16i8( - , - iXLen); - define @intrinsic_vid_v_nxv16i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -174,11 +134,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv16i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -194,10 +149,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv32i8( - , - iXLen); - define @intrinsic_vid_v_nxv32i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -212,11 +163,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv32i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -232,10 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv1i16( - , - iXLen); - define @intrinsic_vid_v_nxv1i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -250,11 +192,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv1i16( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -270,10 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv2i16( - , - iXLen); - define @intrinsic_vid_v_nxv2i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -288,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv2i16( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -308,10 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv4i16( - , - iXLen); - define @intrinsic_vid_v_nxv4i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,11 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv4i16( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -346,10 +265,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv8i16( - , - iXLen); - define @intrinsic_vid_v_nxv8i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -364,11 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv8i16( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -384,10 +294,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv16i16( - , - iXLen); - define @intrinsic_vid_v_nxv16i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -402,11 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv16i16( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -422,10 +323,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv32i16( - , - iXLen); - define @intrinsic_vid_v_nxv32i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -440,11 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv32i16( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -460,10 +352,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv1i32( - , - iXLen); - define @intrinsic_vid_v_nxv1i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -478,11 +366,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv1i32( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -498,10 +381,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv2i32( - , - iXLen); - define @intrinsic_vid_v_nxv2i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -516,11 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv2i32( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -536,10 +410,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv4i32( - , - iXLen); - define @intrinsic_vid_v_nxv4i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -554,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -574,10 +439,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv8i32( - , - iXLen); - define @intrinsic_vid_v_nxv8i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -592,11 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -612,10 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv16i32( - , - iXLen); - define @intrinsic_vid_v_nxv16i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -630,11 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -650,10 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv1i64( - , - iXLen); - define @intrinsic_vid_v_nxv1i64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -668,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv1i64( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -688,10 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv2i64( - , - iXLen); - define @intrinsic_vid_v_nxv2i64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -706,11 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv2i64( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -726,10 +555,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv4i64( - , - iXLen); - define @intrinsic_vid_v_nxv4i64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -744,11 +569,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv4i64( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -764,10 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv8i64( - , - iXLen); - define @intrinsic_vid_v_nxv8i64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -782,11 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv8i64( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/viota.ll b/llvm/test/CodeGen/RISCV/rvv/viota.ll index a60aca3c4f065..16eda451735e9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/viota.ll +++ b/llvm/test/CodeGen/RISCV/rvv/viota.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.viota.nxv1i8( - , - , - iXLen); - define @intrinsic_viota_m_nxv1i8_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,12 +19,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -46,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv2i8( - , - , - iXLen); - define @intrinsic_viota_m_nxv2i8_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv2i8_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -66,12 +50,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv2i8_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -88,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv4i8( - , - , - iXLen); - define @intrinsic_viota_m_nxv4i8_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv4i8_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -108,12 +81,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv4i8_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -130,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv8i8( - , - , - iXLen); - define @intrinsic_viota_m_nxv8i8_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv8i8_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -150,12 +112,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv8i8_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -172,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv16i8( - , - , - iXLen); - define @intrinsic_viota_m_nxv16i8_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv16i8_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -192,12 +143,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv16i8_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -214,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv32i8( - , - , - iXLen); - define @intrinsic_viota_m_nxv32i8_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv32i8_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -234,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv32i8_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -256,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv64i8( - , - , - iXLen); - define @intrinsic_viota_m_nxv64i8_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv64i8_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -276,12 +205,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv64i8_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -298,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv1i16( - , - , - iXLen); - define @intrinsic_viota_m_nxv1i16_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i16_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -318,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv1i16_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -340,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv2i16( - , - , - iXLen); - define @intrinsic_viota_m_nxv2i16_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv2i16_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -360,12 +267,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv2i16_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -382,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv4i16( - , - , - iXLen); - define @intrinsic_viota_m_nxv4i16_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv4i16_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -402,12 +298,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv4i16_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -424,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv8i16( - , - , - iXLen); - define @intrinsic_viota_m_nxv8i16_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv8i16_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -444,12 +329,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv8i16_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -466,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv16i16( - , - , - iXLen); - define @intrinsic_viota_m_nxv16i16_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv16i16_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -486,12 +360,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv16i16_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -508,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv32i16( - , - , - iXLen); - define @intrinsic_viota_m_nxv32i16_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv32i16_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -528,12 +391,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv32i16( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv32i16_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -550,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv1i32( - , - , - iXLen); - define @intrinsic_viota_m_nxv1i32_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i32_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -570,12 +422,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv1i32_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -592,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv2i32( - , - , - iXLen); - define @intrinsic_viota_m_nxv2i32_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv2i32_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -612,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv2i32_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -634,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv4i32( - , - , - iXLen); - define @intrinsic_viota_m_nxv4i32_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv4i32_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -654,12 +484,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv4i32_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -676,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv8i32( - , - , - iXLen); - define @intrinsic_viota_m_nxv8i32_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv8i32_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -696,12 +515,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv8i32_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -718,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv16i32( - , - , - iXLen); - define @intrinsic_viota_m_nxv16i32_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv16i32_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -738,12 +546,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv16i32( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv16i32_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -760,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv1i64( - , - , - iXLen); - define @intrinsic_viota_m_nxv1i64_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i64_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -780,12 +577,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv1i64( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv1i64_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i64_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -802,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv2i64( - , - , - iXLen); - define @intrinsic_viota_m_nxv2i64_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv2i64_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -822,12 +608,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv2i64( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv2i64_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i64_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -844,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv4i64( - , - , - iXLen); - define @intrinsic_viota_m_nxv4i64_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv4i64_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -864,12 +639,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv4i64( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv4i64_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i64_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -886,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv8i64( - , - , - iXLen); - define @intrinsic_viota_m_nxv8i64_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv8i64_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -906,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv8i64( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv8i64_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i64_nxv8i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll index 599a66d191fd2..b93a4c166538a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i1(, metadata, metadata) define @vsitofp_nxv1i1_nxv1f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f16: ; CHECK: # %bb.0: @@ -17,7 +16,6 @@ define @vsitofp_nxv1i1_nxv1f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i1(, metadata, metadata) define @vuitofp_nxv1i1_nxv1f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f16: ; CHECK: # %bb.0: @@ -30,7 +28,6 @@ define @vuitofp_nxv1i1_nxv1f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i1(, metadata, metadata) define @vsitofp_nxv1i1_nxv1f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f32: ; CHECK: # %bb.0: @@ -43,7 +40,6 @@ define @vsitofp_nxv1i1_nxv1f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i1(, metadata, metadata) define @vuitofp_nxv1i1_nxv1f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f32: ; CHECK: # %bb.0: @@ -56,7 +52,6 @@ define @vuitofp_nxv1i1_nxv1f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i1(, metadata, metadata) define @vsitofp_nxv1i1_nxv1f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f64: ; CHECK: # %bb.0: @@ -69,7 +64,6 @@ define @vsitofp_nxv1i1_nxv1f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i1(, metadata, metadata) define @vuitofp_nxv1i1_nxv1f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f64: ; CHECK: # %bb.0: @@ -82,7 +76,6 @@ define @vuitofp_nxv1i1_nxv1f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i1(, metadata, metadata) define @vsitofp_nxv2i1_nxv2f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f16: ; CHECK: # %bb.0: @@ -95,7 +88,6 @@ define @vsitofp_nxv2i1_nxv2f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i1(, metadata, metadata) define @vuitofp_nxv2i1_nxv2f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f16: ; CHECK: # %bb.0: @@ -108,7 +100,6 @@ define @vuitofp_nxv2i1_nxv2f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i1(, metadata, metadata) define @vsitofp_nxv2i1_nxv2f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f32: ; CHECK: # %bb.0: @@ -121,7 +112,6 @@ define @vsitofp_nxv2i1_nxv2f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i1(, metadata, metadata) define @vuitofp_nxv2i1_nxv2f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f32: ; CHECK: # %bb.0: @@ -134,7 +124,6 @@ define @vuitofp_nxv2i1_nxv2f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i1(, metadata, metadata) define @vsitofp_nxv2i1_nxv2f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f64: ; CHECK: # %bb.0: @@ -147,7 +136,6 @@ define @vsitofp_nxv2i1_nxv2f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i1(, metadata, metadata) define @vuitofp_nxv2i1_nxv2f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f64: ; CHECK: # %bb.0: @@ -160,7 +148,6 @@ define @vuitofp_nxv2i1_nxv2f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i1(, metadata, metadata) define @vsitofp_nxv4i1_nxv4f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f16: ; CHECK: # %bb.0: @@ -173,7 +160,6 @@ define @vsitofp_nxv4i1_nxv4f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i1(, metadata, metadata) define @vuitofp_nxv4i1_nxv4f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f16: ; CHECK: # %bb.0: @@ -186,7 +172,6 @@ define @vuitofp_nxv4i1_nxv4f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i1(, metadata, metadata) define @vsitofp_nxv4i1_nxv4f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f32: ; CHECK: # %bb.0: @@ -199,7 +184,6 @@ define @vsitofp_nxv4i1_nxv4f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i1(, metadata, metadata) define @vuitofp_nxv4i1_nxv4f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f32: ; CHECK: # %bb.0: @@ -212,7 +196,6 @@ define @vuitofp_nxv4i1_nxv4f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i1(, metadata, metadata) define @vsitofp_nxv4i1_nxv4f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f64: ; CHECK: # %bb.0: @@ -225,7 +208,6 @@ define @vsitofp_nxv4i1_nxv4f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i1(, metadata, metadata) define @vuitofp_nxv4i1_nxv4f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f64: ; CHECK: # %bb.0: @@ -238,7 +220,6 @@ define @vuitofp_nxv4i1_nxv4f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i1(, metadata, metadata) define @vsitofp_nxv8i1_nxv8f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f16: ; CHECK: # %bb.0: @@ -251,7 +232,6 @@ define @vsitofp_nxv8i1_nxv8f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i1(, metadata, metadata) define @vuitofp_nxv8i1_nxv8f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f16: ; CHECK: # %bb.0: @@ -264,7 +244,6 @@ define @vuitofp_nxv8i1_nxv8f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i1(, metadata, metadata) define @vsitofp_nxv8i1_nxv8f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f32: ; CHECK: # %bb.0: @@ -277,7 +256,6 @@ define @vsitofp_nxv8i1_nxv8f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i1(, metadata, metadata) define @vuitofp_nxv8i1_nxv8f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f32: ; CHECK: # %bb.0: @@ -290,7 +268,6 @@ define @vuitofp_nxv8i1_nxv8f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i1(, metadata, metadata) define @vsitofp_nxv8i1_nxv8f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f64: ; CHECK: # %bb.0: @@ -303,7 +280,6 @@ define @vsitofp_nxv8i1_nxv8f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i1(, metadata, metadata) define @vuitofp_nxv8i1_nxv8f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f64: ; CHECK: # %bb.0: @@ -316,7 +292,6 @@ define @vuitofp_nxv8i1_nxv8f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i1(, metadata, metadata) define @vsitofp_nxv16i1_nxv16f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv16i1_nxv16f16: ; CHECK: # %bb.0: @@ -329,7 +304,6 @@ define @vsitofp_nxv16i1_nxv16f16( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i1(, metadata, metadata) define @vuitofp_nxv16i1_nxv16f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv16i1_nxv16f16: ; CHECK: # %bb.0: @@ -342,7 +316,6 @@ define @vuitofp_nxv16i1_nxv16f16( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i1(, metadata, metadata) define @vsitofp_nxv16i1_nxv16f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv16i1_nxv16f32: ; CHECK: # %bb.0: @@ -355,7 +328,6 @@ define @vsitofp_nxv16i1_nxv16f32( %va) s ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i1(, metadata, metadata) define @vuitofp_nxv16i1_nxv16f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv16i1_nxv16f32: ; CHECK: # %bb.0: @@ -368,7 +340,6 @@ define @vuitofp_nxv16i1_nxv16f32( %va) s ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i1(, metadata, metadata) define @vsitofp_nxv32i1_nxv32f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv32i1_nxv32f16: ; CHECK: # %bb.0: @@ -381,7 +352,6 @@ define @vsitofp_nxv32i1_nxv32f16( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i1(, metadata, metadata) define @vuitofp_nxv32i1_nxv32f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv32i1_nxv32f16: ; CHECK: # %bb.0: @@ -394,7 +364,6 @@ define @vuitofp_nxv32i1_nxv32f16( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i8(, metadata, metadata) define @vsitofp_nxv1i8_nxv1f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f16: ; CHECK: # %bb.0: @@ -406,7 +375,6 @@ define @vsitofp_nxv1i8_nxv1f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i7(, metadata, metadata) define @vsitofp_nxv1i7_nxv1f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i7_nxv1f16: ; CHECK: # %bb.0: @@ -419,7 +387,6 @@ define @vsitofp_nxv1i7_nxv1f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i7(, metadata, metadata) define @vuitofp_nxv1i7_nxv1f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i7_nxv1f16: ; CHECK: # %bb.0: @@ -432,7 +399,6 @@ define @vuitofp_nxv1i7_nxv1f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i8(, metadata, metadata) define @vuitofp_nxv1i8_nxv1f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f16: ; CHECK: # %bb.0: @@ -444,7 +410,6 @@ define @vuitofp_nxv1i8_nxv1f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i8(, metadata, metadata) define @vsitofp_nxv1i8_nxv1f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f32: ; CHECK: # %bb.0: @@ -456,7 +421,6 @@ define @vsitofp_nxv1i8_nxv1f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i8(, metadata, metadata) define @vuitofp_nxv1i8_nxv1f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f32: ; CHECK: # %bb.0: @@ -468,7 +432,6 @@ define @vuitofp_nxv1i8_nxv1f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i8(, metadata, metadata) define @vsitofp_nxv1i8_nxv1f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f64: ; CHECK: # %bb.0: @@ -480,7 +443,6 @@ define @vsitofp_nxv1i8_nxv1f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i8(, metadata, metadata) define @vuitofp_nxv1i8_nxv1f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f64: ; CHECK: # %bb.0: @@ -492,7 +454,6 @@ define @vuitofp_nxv1i8_nxv1f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i8(, metadata, metadata) define @vsitofp_nxv2i8_nxv2f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f16: ; CHECK: # %bb.0: @@ -504,7 +465,6 @@ define @vsitofp_nxv2i8_nxv2f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i8(, metadata, metadata) define @vuitofp_nxv2i8_nxv2f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f16: ; CHECK: # %bb.0: @@ -516,7 +476,6 @@ define @vuitofp_nxv2i8_nxv2f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i8(, metadata, metadata) define @vsitofp_nxv2i8_nxv2f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f32: ; CHECK: # %bb.0: @@ -528,7 +487,6 @@ define @vsitofp_nxv2i8_nxv2f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i8(, metadata, metadata) define @vuitofp_nxv2i8_nxv2f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f32: ; CHECK: # %bb.0: @@ -540,7 +498,6 @@ define @vuitofp_nxv2i8_nxv2f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i8(, metadata, metadata) define @vsitofp_nxv2i8_nxv2f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f64: ; CHECK: # %bb.0: @@ -552,7 +509,6 @@ define @vsitofp_nxv2i8_nxv2f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i8(, metadata, metadata) define @vuitofp_nxv2i8_nxv2f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f64: ; CHECK: # %bb.0: @@ -564,7 +520,6 @@ define @vuitofp_nxv2i8_nxv2f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i8(, metadata, metadata) define @vsitofp_nxv4i8_nxv4f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f16: ; CHECK: # %bb.0: @@ -576,7 +531,6 @@ define @vsitofp_nxv4i8_nxv4f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i8(, metadata, metadata) define @vuitofp_nxv4i8_nxv4f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f16: ; CHECK: # %bb.0: @@ -588,7 +542,6 @@ define @vuitofp_nxv4i8_nxv4f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i8(, metadata, metadata) define @vsitofp_nxv4i8_nxv4f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f32: ; CHECK: # %bb.0: @@ -600,7 +553,6 @@ define @vsitofp_nxv4i8_nxv4f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i8(, metadata, metadata) define @vuitofp_nxv4i8_nxv4f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f32: ; CHECK: # %bb.0: @@ -612,7 +564,6 @@ define @vuitofp_nxv4i8_nxv4f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i8(, metadata, metadata) define @vsitofp_nxv4i8_nxv4f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f64: ; CHECK: # %bb.0: @@ -624,7 +575,6 @@ define @vsitofp_nxv4i8_nxv4f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i8(, metadata, metadata) define @vuitofp_nxv4i8_nxv4f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f64: ; CHECK: # %bb.0: @@ -636,7 +586,6 @@ define @vuitofp_nxv4i8_nxv4f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i8(, metadata, metadata) define @vsitofp_nxv8i8_nxv8f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f16: ; CHECK: # %bb.0: @@ -648,7 +597,6 @@ define @vsitofp_nxv8i8_nxv8f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i8(, metadata, metadata) define @vuitofp_nxv8i8_nxv8f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f16: ; CHECK: # %bb.0: @@ -660,7 +608,6 @@ define @vuitofp_nxv8i8_nxv8f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i8(, metadata, metadata) define @vsitofp_nxv8i8_nxv8f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f32: ; CHECK: # %bb.0: @@ -672,7 +619,6 @@ define @vsitofp_nxv8i8_nxv8f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i8(, metadata, metadata) define @vuitofp_nxv8i8_nxv8f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f32: ; CHECK: # %bb.0: @@ -684,7 +630,6 @@ define @vuitofp_nxv8i8_nxv8f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i8(, metadata, metadata) define @vsitofp_nxv8i8_nxv8f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f64: ; CHECK: # %bb.0: @@ -696,7 +641,6 @@ define @vsitofp_nxv8i8_nxv8f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i8(, metadata, metadata) define @vuitofp_nxv8i8_nxv8f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f64: ; CHECK: # %bb.0: @@ -708,7 +652,6 @@ define @vuitofp_nxv8i8_nxv8f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i8(, metadata, metadata) define @vsitofp_nxv16i8_nxv16f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv16i8_nxv16f16: ; CHECK: # %bb.0: @@ -720,7 +663,6 @@ define @vsitofp_nxv16i8_nxv16f16( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i8(, metadata, metadata) define @vuitofp_nxv16i8_nxv16f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv16i8_nxv16f16: ; CHECK: # %bb.0: @@ -732,7 +674,6 @@ define @vuitofp_nxv16i8_nxv16f16( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i8(, metadata, metadata) define @vsitofp_nxv16i8_nxv16f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv16i8_nxv16f32: ; CHECK: # %bb.0: @@ -744,7 +685,6 @@ define @vsitofp_nxv16i8_nxv16f32( %va) s ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i8(, metadata, metadata) define @vuitofp_nxv16i8_nxv16f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv16i8_nxv16f32: ; CHECK: # %bb.0: @@ -756,7 +696,6 @@ define @vuitofp_nxv16i8_nxv16f32( %va) s ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i8(, metadata, metadata) define @vsitofp_nxv32i8_nxv32f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv32i8_nxv32f16: ; CHECK: # %bb.0: @@ -768,7 +707,6 @@ define @vsitofp_nxv32i8_nxv32f16( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i8(, metadata, metadata) define @vuitofp_nxv32i8_nxv32f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv32i8_nxv32f16: ; CHECK: # %bb.0: @@ -780,7 +718,6 @@ define @vuitofp_nxv32i8_nxv32f16( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i16(, metadata, metadata) define @vsitofp_nxv1i16_nxv1f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i16_nxv1f16: ; CHECK: # %bb.0: @@ -791,7 +728,6 @@ define @vsitofp_nxv1i16_nxv1f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i16(, metadata, metadata) define @vuitofp_nxv1i16_nxv1f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i16_nxv1f16: ; CHECK: # %bb.0: @@ -802,7 +738,6 @@ define @vuitofp_nxv1i16_nxv1f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i16(, metadata, metadata) define @vsitofp_nxv1i16_nxv1f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i16_nxv1f32: ; CHECK: # %bb.0: @@ -814,7 +749,6 @@ define @vsitofp_nxv1i16_nxv1f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i16(, metadata, metadata) define @vuitofp_nxv1i16_nxv1f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i16_nxv1f32: ; CHECK: # %bb.0: @@ -826,7 +760,6 @@ define @vuitofp_nxv1i16_nxv1f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i16(, metadata, metadata) define @vsitofp_nxv1i16_nxv1f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i16_nxv1f64: ; CHECK: # %bb.0: @@ -838,7 +771,6 @@ define @vsitofp_nxv1i16_nxv1f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i16(, metadata, metadata) define @vuitofp_nxv1i16_nxv1f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i16_nxv1f64: ; CHECK: # %bb.0: @@ -850,7 +782,6 @@ define @vuitofp_nxv1i16_nxv1f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i16(, metadata, metadata) define @vsitofp_nxv2i16_nxv2f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i16_nxv2f16: ; CHECK: # %bb.0: @@ -861,7 +792,6 @@ define @vsitofp_nxv2i16_nxv2f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i16(, metadata, metadata) define @vuitofp_nxv2i16_nxv2f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i16_nxv2f16: ; CHECK: # %bb.0: @@ -872,7 +802,6 @@ define @vuitofp_nxv2i16_nxv2f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i16(, metadata, metadata) define @vsitofp_nxv2i16_nxv2f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i16_nxv2f32: ; CHECK: # %bb.0: @@ -884,7 +813,6 @@ define @vsitofp_nxv2i16_nxv2f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i16(, metadata, metadata) define @vuitofp_nxv2i16_nxv2f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i16_nxv2f32: ; CHECK: # %bb.0: @@ -896,7 +824,6 @@ define @vuitofp_nxv2i16_nxv2f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i16(, metadata, metadata) define @vsitofp_nxv2i16_nxv2f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i16_nxv2f64: ; CHECK: # %bb.0: @@ -908,7 +835,6 @@ define @vsitofp_nxv2i16_nxv2f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i16(, metadata, metadata) define @vuitofp_nxv2i16_nxv2f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i16_nxv2f64: ; CHECK: # %bb.0: @@ -920,7 +846,6 @@ define @vuitofp_nxv2i16_nxv2f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i16(, metadata, metadata) define @vsitofp_nxv4i16_nxv4f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i16_nxv4f16: ; CHECK: # %bb.0: @@ -931,7 +856,6 @@ define @vsitofp_nxv4i16_nxv4f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i16(, metadata, metadata) define @vuitofp_nxv4i16_nxv4f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i16_nxv4f16: ; CHECK: # %bb.0: @@ -942,7 +866,6 @@ define @vuitofp_nxv4i16_nxv4f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i16(, metadata, metadata) define @vsitofp_nxv4i16_nxv4f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i16_nxv4f32: ; CHECK: # %bb.0: @@ -954,7 +877,6 @@ define @vsitofp_nxv4i16_nxv4f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i16(, metadata, metadata) define @vuitofp_nxv4i16_nxv4f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i16_nxv4f32: ; CHECK: # %bb.0: @@ -966,7 +888,6 @@ define @vuitofp_nxv4i16_nxv4f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i16(, metadata, metadata) define @vsitofp_nxv4i16_nxv4f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i16_nxv4f64: ; CHECK: # %bb.0: @@ -978,7 +899,6 @@ define @vsitofp_nxv4i16_nxv4f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i16(, metadata, metadata) define @vuitofp_nxv4i16_nxv4f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i16_nxv4f64: ; CHECK: # %bb.0: @@ -990,7 +910,6 @@ define @vuitofp_nxv4i16_nxv4f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i16(, metadata, metadata) define @vsitofp_nxv8i16_nxv8f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i16_nxv8f16: ; CHECK: # %bb.0: @@ -1001,7 +920,6 @@ define @vsitofp_nxv8i16_nxv8f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i16(, metadata, metadata) define @vuitofp_nxv8i16_nxv8f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i16_nxv8f16: ; CHECK: # %bb.0: @@ -1012,7 +930,6 @@ define @vuitofp_nxv8i16_nxv8f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i16(, metadata, metadata) define @vsitofp_nxv8i16_nxv8f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i16_nxv8f32: ; CHECK: # %bb.0: @@ -1024,7 +941,6 @@ define @vsitofp_nxv8i16_nxv8f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i16(, metadata, metadata) define @vuitofp_nxv8i16_nxv8f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i16_nxv8f32: ; CHECK: # %bb.0: @@ -1036,7 +952,6 @@ define @vuitofp_nxv8i16_nxv8f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i16(, metadata, metadata) define @vsitofp_nxv8i16_nxv8f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i16_nxv8f64: ; CHECK: # %bb.0: @@ -1048,7 +963,6 @@ define @vsitofp_nxv8i16_nxv8f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i16(, metadata, metadata) define @vuitofp_nxv8i16_nxv8f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i16_nxv8f64: ; CHECK: # %bb.0: @@ -1060,7 +974,6 @@ define @vuitofp_nxv8i16_nxv8f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i16(, metadata, metadata) define @vsitofp_nxv16i16_nxv16f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv16i16_nxv16f16: ; CHECK: # %bb.0: @@ -1071,7 +984,6 @@ define @vsitofp_nxv16i16_nxv16f16( %va) ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i16(, metadata, metadata) define @vuitofp_nxv16i16_nxv16f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv16i16_nxv16f16: ; CHECK: # %bb.0: @@ -1082,7 +994,6 @@ define @vuitofp_nxv16i16_nxv16f16( %va) ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i16(, metadata, metadata) define @vsitofp_nxv16i16_nxv16f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv16i16_nxv16f32: ; CHECK: # %bb.0: @@ -1094,7 +1005,6 @@ define @vsitofp_nxv16i16_nxv16f32( %va) ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i16(, metadata, metadata) define @vuitofp_nxv16i16_nxv16f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv16i16_nxv16f32: ; CHECK: # %bb.0: @@ -1106,7 +1016,6 @@ define @vuitofp_nxv16i16_nxv16f32( %va) ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i16(, metadata, metadata) define @vsitofp_nxv32i16_nxv32f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv32i16_nxv32f16: ; CHECK: # %bb.0: @@ -1117,7 +1026,6 @@ define @vsitofp_nxv32i16_nxv32f16( %va) ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i16(, metadata, metadata) define @vuitofp_nxv32i16_nxv32f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv32i16_nxv32f16: ; CHECK: # %bb.0: @@ -1128,7 +1036,6 @@ define @vuitofp_nxv32i16_nxv32f16( %va) ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i32(, metadata, metadata) define @vsitofp_nxv1i32_nxv1f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i32_nxv1f16: ; CHECK: # %bb.0: @@ -1140,7 +1047,6 @@ define @vsitofp_nxv1i32_nxv1f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i32(, metadata, metadata) define @vuitofp_nxv1i32_nxv1f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i32_nxv1f16: ; CHECK: # %bb.0: @@ -1152,7 +1058,6 @@ define @vuitofp_nxv1i32_nxv1f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i32(, metadata, metadata) define @vsitofp_nxv1i32_nxv1f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i32_nxv1f32: ; CHECK: # %bb.0: @@ -1163,7 +1068,6 @@ define @vsitofp_nxv1i32_nxv1f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i32(, metadata, metadata) define @vuitofp_nxv1i32_nxv1f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i32_nxv1f32: ; CHECK: # %bb.0: @@ -1174,7 +1078,6 @@ define @vuitofp_nxv1i32_nxv1f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i32(, metadata, metadata) define @vsitofp_nxv1i32_nxv1f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i32_nxv1f64: ; CHECK: # %bb.0: @@ -1186,7 +1089,6 @@ define @vsitofp_nxv1i32_nxv1f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i32(, metadata, metadata) define @vuitofp_nxv1i32_nxv1f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i32_nxv1f64: ; CHECK: # %bb.0: @@ -1198,7 +1100,6 @@ define @vuitofp_nxv1i32_nxv1f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i32(, metadata, metadata) define @vsitofp_nxv2i32_nxv2f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i32_nxv2f16: ; CHECK: # %bb.0: @@ -1210,7 +1111,6 @@ define @vsitofp_nxv2i32_nxv2f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i32(, metadata, metadata) define @vuitofp_nxv2i32_nxv2f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i32_nxv2f16: ; CHECK: # %bb.0: @@ -1222,7 +1122,6 @@ define @vuitofp_nxv2i32_nxv2f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i32(, metadata, metadata) define @vsitofp_nxv2i32_nxv2f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i32_nxv2f32: ; CHECK: # %bb.0: @@ -1233,7 +1132,6 @@ define @vsitofp_nxv2i32_nxv2f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i32(, metadata, metadata) define @vuitofp_nxv2i32_nxv2f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i32_nxv2f32: ; CHECK: # %bb.0: @@ -1244,7 +1142,6 @@ define @vuitofp_nxv2i32_nxv2f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i32(, metadata, metadata) define @vsitofp_nxv2i32_nxv2f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i32_nxv2f64: ; CHECK: # %bb.0: @@ -1256,7 +1153,6 @@ define @vsitofp_nxv2i32_nxv2f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i32(, metadata, metadata) define @vuitofp_nxv2i32_nxv2f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i32_nxv2f64: ; CHECK: # %bb.0: @@ -1268,7 +1164,6 @@ define @vuitofp_nxv2i32_nxv2f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i32(, metadata, metadata) define @vsitofp_nxv4i32_nxv4f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i32_nxv4f16: ; CHECK: # %bb.0: @@ -1280,7 +1175,6 @@ define @vsitofp_nxv4i32_nxv4f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i32(, metadata, metadata) define @vuitofp_nxv4i32_nxv4f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i32_nxv4f16: ; CHECK: # %bb.0: @@ -1292,7 +1186,6 @@ define @vuitofp_nxv4i32_nxv4f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i32(, metadata, metadata) define @vsitofp_nxv4i32_nxv4f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i32_nxv4f32: ; CHECK: # %bb.0: @@ -1303,7 +1196,6 @@ define @vsitofp_nxv4i32_nxv4f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i32(, metadata, metadata) define @vuitofp_nxv4i32_nxv4f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i32_nxv4f32: ; CHECK: # %bb.0: @@ -1314,7 +1206,6 @@ define @vuitofp_nxv4i32_nxv4f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i32(, metadata, metadata) define @vsitofp_nxv4i32_nxv4f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i32_nxv4f64: ; CHECK: # %bb.0: @@ -1326,7 +1217,6 @@ define @vsitofp_nxv4i32_nxv4f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i32(, metadata, metadata) define @vuitofp_nxv4i32_nxv4f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i32_nxv4f64: ; CHECK: # %bb.0: @@ -1338,7 +1228,6 @@ define @vuitofp_nxv4i32_nxv4f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i32(, metadata, metadata) define @vsitofp_nxv8i32_nxv8f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i32_nxv8f16: ; CHECK: # %bb.0: @@ -1350,7 +1239,6 @@ define @vsitofp_nxv8i32_nxv8f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i32(, metadata, metadata) define @vuitofp_nxv8i32_nxv8f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i32_nxv8f16: ; CHECK: # %bb.0: @@ -1362,7 +1250,6 @@ define @vuitofp_nxv8i32_nxv8f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i32(, metadata, metadata) define @vsitofp_nxv8i32_nxv8f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i32_nxv8f32: ; CHECK: # %bb.0: @@ -1373,7 +1260,6 @@ define @vsitofp_nxv8i32_nxv8f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i32(, metadata, metadata) define @vuitofp_nxv8i32_nxv8f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i32_nxv8f32: ; CHECK: # %bb.0: @@ -1384,7 +1270,6 @@ define @vuitofp_nxv8i32_nxv8f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i32(, metadata, metadata) define @vsitofp_nxv8i32_nxv8f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i32_nxv8f64: ; CHECK: # %bb.0: @@ -1396,7 +1281,6 @@ define @vsitofp_nxv8i32_nxv8f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i32(, metadata, metadata) define @vuitofp_nxv8i32_nxv8f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i32_nxv8f64: ; CHECK: # %bb.0: @@ -1408,7 +1292,6 @@ define @vuitofp_nxv8i32_nxv8f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i32(, metadata, metadata) define @vsitofp_nxv16i32_nxv16f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv16i32_nxv16f16: ; CHECK: # %bb.0: @@ -1420,7 +1303,6 @@ define @vsitofp_nxv16i32_nxv16f16( %va) ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i32(, metadata, metadata) define @vuitofp_nxv16i32_nxv16f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv16i32_nxv16f16: ; CHECK: # %bb.0: @@ -1432,7 +1314,6 @@ define @vuitofp_nxv16i32_nxv16f16( %va) ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i32(, metadata, metadata) define @vsitofp_nxv16i32_nxv16f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv16i32_nxv16f32: ; CHECK: # %bb.0: @@ -1443,7 +1324,6 @@ define @vsitofp_nxv16i32_nxv16f32( %va) ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i32(, metadata, metadata) define @vuitofp_nxv16i32_nxv16f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv16i32_nxv16f32: ; CHECK: # %bb.0: @@ -1454,7 +1334,6 @@ define @vuitofp_nxv16i32_nxv16f32( %va) ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i64(, metadata, metadata) define @vsitofp_nxv1i64_nxv1f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i64_nxv1f16: ; CHECK: # %bb.0: @@ -1467,7 +1346,6 @@ define @vsitofp_nxv1i64_nxv1f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i64(, metadata, metadata) define @vuitofp_nxv1i64_nxv1f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i64_nxv1f16: ; CHECK: # %bb.0: @@ -1480,7 +1358,6 @@ define @vuitofp_nxv1i64_nxv1f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i64(, metadata, metadata) define @vsitofp_nxv1i64_nxv1f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i64_nxv1f32: ; CHECK: # %bb.0: @@ -1492,7 +1369,6 @@ define @vsitofp_nxv1i64_nxv1f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i64(, metadata, metadata) define @vuitofp_nxv1i64_nxv1f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i64_nxv1f32: ; CHECK: # %bb.0: @@ -1504,7 +1380,6 @@ define @vuitofp_nxv1i64_nxv1f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i64(, metadata, metadata) define @vsitofp_nxv1i64_nxv1f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i64_nxv1f64: ; CHECK: # %bb.0: @@ -1515,7 +1390,6 @@ define @vsitofp_nxv1i64_nxv1f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i64(, metadata, metadata) define @vuitofp_nxv1i64_nxv1f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i64_nxv1f64: ; CHECK: # %bb.0: @@ -1526,8 +1400,6 @@ define @vuitofp_nxv1i64_nxv1f64( %va) st ret %evec } - -declare @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i64(, metadata, metadata) define @vsitofp_nxv2i64_nxv2f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i64_nxv2f16: ; CHECK: # %bb.0: @@ -1540,7 +1412,6 @@ define @vsitofp_nxv2i64_nxv2f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i64(, metadata, metadata) define @vuitofp_nxv2i64_nxv2f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i64_nxv2f16: ; CHECK: # %bb.0: @@ -1553,7 +1424,6 @@ define @vuitofp_nxv2i64_nxv2f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i64(, metadata, metadata) define @vsitofp_nxv2i64_nxv2f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i64_nxv2f32: ; CHECK: # %bb.0: @@ -1565,7 +1435,6 @@ define @vsitofp_nxv2i64_nxv2f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i64(, metadata, metadata) define @vuitofp_nxv2i64_nxv2f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i64_nxv2f32: ; CHECK: # %bb.0: @@ -1577,7 +1446,6 @@ define @vuitofp_nxv2i64_nxv2f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i64(, metadata, metadata) define @vsitofp_nxv2i64_nxv2f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i64_nxv2f64: ; CHECK: # %bb.0: @@ -1588,7 +1456,6 @@ define @vsitofp_nxv2i64_nxv2f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i64(, metadata, metadata) define @vuitofp_nxv2i64_nxv2f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i64_nxv2f64: ; CHECK: # %bb.0: @@ -1599,7 +1466,6 @@ define @vuitofp_nxv2i64_nxv2f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i64(, metadata, metadata) define @vsitofp_nxv4i64_nxv4f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i64_nxv4f16: ; CHECK: # %bb.0: @@ -1612,7 +1478,6 @@ define @vsitofp_nxv4i64_nxv4f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i64(, metadata, metadata) define @vuitofp_nxv4i64_nxv4f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i64_nxv4f16: ; CHECK: # %bb.0: @@ -1625,7 +1490,6 @@ define @vuitofp_nxv4i64_nxv4f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i64(, metadata, metadata) define @vsitofp_nxv4i64_nxv4f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i64_nxv4f32: ; CHECK: # %bb.0: @@ -1637,7 +1501,6 @@ define @vsitofp_nxv4i64_nxv4f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i64(, metadata, metadata) define @vuitofp_nxv4i64_nxv4f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i64_nxv4f32: ; CHECK: # %bb.0: @@ -1649,7 +1512,6 @@ define @vuitofp_nxv4i64_nxv4f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i64(, metadata, metadata) define @vsitofp_nxv4i64_nxv4f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i64_nxv4f64: ; CHECK: # %bb.0: @@ -1660,7 +1522,6 @@ define @vsitofp_nxv4i64_nxv4f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i64(, metadata, metadata) define @vuitofp_nxv4i64_nxv4f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i64_nxv4f64: ; CHECK: # %bb.0: @@ -1671,7 +1532,6 @@ define @vuitofp_nxv4i64_nxv4f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i64(, metadata, metadata) define @vsitofp_nxv8i64_nxv8f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i64_nxv8f16: ; CHECK: # %bb.0: @@ -1684,7 +1544,6 @@ define @vsitofp_nxv8i64_nxv8f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i64(, metadata, metadata) define @vuitofp_nxv8i64_nxv8f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i64_nxv8f16: ; CHECK: # %bb.0: @@ -1697,7 +1556,6 @@ define @vuitofp_nxv8i64_nxv8f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i64(, metadata, metadata) define @vsitofp_nxv8i64_nxv8f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i64_nxv8f32: ; CHECK: # %bb.0: @@ -1709,7 +1567,6 @@ define @vsitofp_nxv8i64_nxv8f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i64(, metadata, metadata) define @vuitofp_nxv8i64_nxv8f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i64_nxv8f32: ; CHECK: # %bb.0: @@ -1721,7 +1578,6 @@ define @vuitofp_nxv8i64_nxv8f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i64(, metadata, metadata) define @vsitofp_nxv8i64_nxv8f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i64_nxv8f64: ; CHECK: # %bb.0: @@ -1732,7 +1588,6 @@ define @vsitofp_nxv8i64_nxv8f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i64(, metadata, metadata) define @vuitofp_nxv8i64_nxv8f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i64_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-no-prop.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-no-prop.ll index e1f641afd2cfe..9560249972141 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-no-prop.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-no-prop.ll @@ -2,26 +2,6 @@ ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vadd.nxv4i32.nxv4i32(, , , iXLen) -declare @llvm.riscv.vrgather.vv.nxv4i32.iXLen( - , - , - , - iXLen) - -declare @llvm.riscv.vslidedown.nxv4i32( - , - , - iXLen, - iXLen, - iXLen); - -declare @llvm.riscv.vslide1down.nxv4i32.i32( - , - , - i32, - iXLen); - define @vrgather( %passthru, %a, %b, iXLen %vl1, iXLen %vl2) { ; CHECK-LABEL: vrgather: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.ll index b4ebf5444df7c..866e4e7e11ab6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.ll @@ -27,12 +27,6 @@ entry: ret <2 x i32> %y12 } -declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16( %a, %b, iXLen %2, %3, %4, %z) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -53,12 +47,6 @@ entry: ret %x } -declare @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16( - , - , - , - iXLen, iXLen); - define @vnclip( %a, %b, iXLen %2, %3, %4, %z) nounwind { ; CHECK-LABEL: vnclip: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll index 359601150cb98..ed407dc2161e0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll @@ -2,8 +2,6 @@ ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vadd.nxv4i32.nxv4i32(, , , iXLen) - define @different_imm_vl_with_ta( %passthru, %a, %b, iXLen %vl1, iXLen %vl2) { ; CHECK-LABEL: different_imm_vl_with_ta: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vle.ll b/llvm/test/CodeGen/RISCV/rvv/vle.ll index 0b67d683ed8be..968161dd204ab 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vle.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vle.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vle.nxv1i64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1i64_nxv1i64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2i64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2i64_nxv2i64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2i64_nxv2i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4i64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4i64_nxv4i64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4i64_nxv4i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8i64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8i64_nxv8i64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8i64_nxv8i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1f64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1f64_nxv1f64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1f64_nxv1f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2f64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2f64_nxv2f64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2f64_nxv2f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4f64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4f64_nxv4f64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4f64_nxv4f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8f64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8f64_nxv8f64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8f64_nxv8f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1i32_nxv1i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2i32_nxv2i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4i32_nxv4i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8i32_nxv8i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16i32_nxv16i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1f32_nxv1f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1f32_nxv1f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2f32_nxv2f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2f32_nxv2f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -649,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4f32_nxv4f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -669,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4f32_nxv4f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -692,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8f32_nxv8f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -712,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8f32_nxv8f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -735,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16f32_nxv16f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -755,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16f32_nxv16f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -778,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1i16_nxv1i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -798,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -821,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2i16_nxv2i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -841,13 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -864,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4i16_nxv4i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -884,13 +639,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -907,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8i16_nxv8i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -927,13 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -950,11 +686,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16i16_nxv16i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -970,13 +701,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -993,11 +717,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv32i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv32i16_nxv32i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1013,13 +732,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv32i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1036,11 +748,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1f16_nxv1f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1056,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1f16_nxv1f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1079,11 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2f16_nxv2f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1099,13 +794,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2f16_nxv2f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1122,11 +810,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4f16_nxv4f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1142,13 +825,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4f16_nxv4f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1165,11 +841,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8f16_nxv8f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1185,13 +856,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8f16_nxv8f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1208,11 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16f16_nxv16f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1228,13 +887,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16f16_nxv16f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +903,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv32f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv32f16_nxv32f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv32f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv32f16_nxv32f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1294,11 +934,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1bf16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1bf16_nxv1bf16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1314,13 +949,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1bf16_nxv1bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1337,11 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2bf16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2bf16_nxv2bf16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1357,13 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2bf16_nxv2bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1380,11 +996,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4bf16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4bf16_nxv4bf16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1400,13 +1011,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4bf16_nxv4bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1423,11 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8bf16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8bf16_nxv8bf16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1443,13 +1042,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8bf16_nxv8bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1466,11 +1058,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16bf16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16bf16_nxv16bf16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1486,13 +1073,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16bf16_nxv16bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1509,11 +1089,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv32bf16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv32bf16_nxv32bf16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1529,13 +1104,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv32bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv32bf16_nxv32bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1552,11 +1120,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1i8_nxv1i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1572,13 +1135,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1595,11 +1151,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2i8_nxv2i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1615,13 +1166,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1638,11 +1182,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4i8_nxv4i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1658,13 +1197,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1681,11 +1213,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8i8_nxv8i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1701,13 +1228,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1724,11 +1244,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16i8_nxv16i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1744,13 +1259,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1767,11 +1275,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv32i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv32i8_nxv32i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1787,13 +1290,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv32i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1810,11 +1306,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv64i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv64i8_nxv64i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1830,13 +1321,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv64i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll index ca3ed15bc40e4..6b6276b838fba 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll @@ -1,12 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel -target-abi=lp64 < %s | FileCheck %s -declare { , i64 } @llvm.riscv.vleff.nxv8i8(, ptr, i64) -declare { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(, ptr, , i64, i64 immarg) - -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define i64 @test_vleff_nxv8i8(ptr %p, i64 %vl) { ; CHECK-LABEL: name: test_vleff_nxv8i8 ; CHECK: bb.0.entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff.ll b/llvm/test/CodeGen/RISCV/rvv/vleff.ll index 924d16ac4afb6..e20acd725a095 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vleff.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 -declare { , iXLen } @llvm.riscv.vleff.nxv1i64( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -36,13 +31,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -72,11 +60,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv2i64( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv2i64_nxv2i64(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -104,13 +87,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv2i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv2i64_nxv2i64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -140,11 +116,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv4i64( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv4i64_nxv4i64(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64: ; RV32: # %bb.0: # %entry @@ -172,13 +143,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv4i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv4i64_nxv4i64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64: ; RV32: # %bb.0: # %entry @@ -208,11 +172,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv8i64( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv8i64_nxv8i64(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64: ; RV32: # %bb.0: # %entry @@ -240,13 +199,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv8i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv8i64_nxv8i64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64: ; RV32: # %bb.0: # %entry @@ -276,11 +228,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv1f64( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv1f64_nxv1f64(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64: ; RV32: # %bb.0: # %entry @@ -308,13 +255,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1f64_nxv1f64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64: ; RV32: # %bb.0: # %entry @@ -344,11 +284,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv2f64( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv2f64_nxv2f64(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64: ; RV32: # %bb.0: # %entry @@ -376,13 +311,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv2f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv2f64_nxv2f64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64: ; RV32: # %bb.0: # %entry @@ -412,11 +340,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv4f64( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv4f64_nxv4f64(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64: ; RV32: # %bb.0: # %entry @@ -444,13 +367,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv4f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv4f64_nxv4f64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64: ; RV32: # %bb.0: # %entry @@ -480,11 +396,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv8f64( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv8f64_nxv8f64(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64: ; RV32: # %bb.0: # %entry @@ -512,13 +423,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv8f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv8f64_nxv8f64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64: ; RV32: # %bb.0: # %entry @@ -548,11 +452,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv1i32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv1i32_nxv1i32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32: ; RV32: # %bb.0: # %entry @@ -580,13 +479,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32: ; RV32: # %bb.0: # %entry @@ -616,11 +508,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv2i32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv2i32_nxv2i32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32: ; RV32: # %bb.0: # %entry @@ -648,13 +535,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv2i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32: ; RV32: # %bb.0: # %entry @@ -684,11 +564,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv4i32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv4i32_nxv4i32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32: ; RV32: # %bb.0: # %entry @@ -716,13 +591,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv4i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32: ; RV32: # %bb.0: # %entry @@ -752,11 +620,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv8i32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv8i32_nxv8i32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32: ; RV32: # %bb.0: # %entry @@ -784,13 +647,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv8i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32: ; RV32: # %bb.0: # %entry @@ -820,11 +676,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv16i32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv16i32_nxv16i32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32: ; RV32: # %bb.0: # %entry @@ -852,13 +703,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv16i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32: ; RV32: # %bb.0: # %entry @@ -888,11 +732,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv1f32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv1f32_nxv1f32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32: ; RV32: # %bb.0: # %entry @@ -920,13 +759,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1f32_nxv1f32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32: ; RV32: # %bb.0: # %entry @@ -956,11 +788,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv2f32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv2f32_nxv2f32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32: ; RV32: # %bb.0: # %entry @@ -988,13 +815,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv2f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv2f32_nxv2f32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32: ; RV32: # %bb.0: # %entry @@ -1024,11 +844,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv4f32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv4f32_nxv4f32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32: ; RV32: # %bb.0: # %entry @@ -1056,13 +871,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv4f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv4f32_nxv4f32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32: ; RV32: # %bb.0: # %entry @@ -1092,11 +900,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv8f32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv8f32_nxv8f32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32: ; RV32: # %bb.0: # %entry @@ -1124,13 +927,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv8f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv8f32_nxv8f32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32: ; RV32: # %bb.0: # %entry @@ -1160,11 +956,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv16f32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv16f32_nxv16f32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32: ; RV32: # %bb.0: # %entry @@ -1192,13 +983,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv16f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv16f32_nxv16f32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32: ; RV32: # %bb.0: # %entry @@ -1228,11 +1012,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv1i16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv1i16_nxv1i16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16: ; RV32: # %bb.0: # %entry @@ -1260,13 +1039,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16: ; RV32: # %bb.0: # %entry @@ -1296,11 +1068,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv2i16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv2i16_nxv2i16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16: ; RV32: # %bb.0: # %entry @@ -1328,13 +1095,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv2i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16: ; RV32: # %bb.0: # %entry @@ -1364,11 +1124,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv4i16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv4i16_nxv4i16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16: ; RV32: # %bb.0: # %entry @@ -1396,13 +1151,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv4i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16: ; RV32: # %bb.0: # %entry @@ -1432,11 +1180,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv8i16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv8i16_nxv8i16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16: ; RV32: # %bb.0: # %entry @@ -1464,13 +1207,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv8i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16: ; RV32: # %bb.0: # %entry @@ -1500,11 +1236,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv16i16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv16i16_nxv16i16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16: ; RV32: # %bb.0: # %entry @@ -1532,13 +1263,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv16i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16: ; RV32: # %bb.0: # %entry @@ -1568,11 +1292,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv32i16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv32i16_nxv32i16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16: ; RV32: # %bb.0: # %entry @@ -1600,13 +1319,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv32i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16: ; RV32: # %bb.0: # %entry @@ -1636,11 +1348,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv1bf16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv1half_nxv1bf16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv1half_nxv1bf16: ; RV32: # %bb.0: # %entry @@ -1668,13 +1375,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1half_nxv1bf16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1bf16: ; RV32: # %bb.0: # %entry @@ -1704,11 +1404,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv2bf16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv2half_nxv2bf16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv2half_nxv2bf16: ; RV32: # %bb.0: # %entry @@ -1736,13 +1431,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv2bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv2half_nxv2bf16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2bf16: ; RV32: # %bb.0: # %entry @@ -1772,11 +1460,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv4bf16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv4half_nxv4bf16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv4half_nxv4bf16: ; RV32: # %bb.0: # %entry @@ -1804,13 +1487,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv4bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv4half_nxv4bf16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4bf16: ; RV32: # %bb.0: # %entry @@ -1840,11 +1516,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv8bf16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv8half_nxv8bf16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv8half_nxv8bf16: ; RV32: # %bb.0: # %entry @@ -1872,13 +1543,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv8bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv8half_nxv8bf16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8bf16: ; RV32: # %bb.0: # %entry @@ -1908,11 +1572,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv16bf16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv16half_nxv16bf16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv16half_nxv16bf16: ; RV32: # %bb.0: # %entry @@ -1940,13 +1599,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv16bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv16half_nxv16bf16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16bf16: ; RV32: # %bb.0: # %entry @@ -1976,11 +1628,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv32bf16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv32half_nxv32bf16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv32half_nxv32bf16: ; RV32: # %bb.0: # %entry @@ -2008,13 +1655,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv32bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv32half_nxv32bf16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32bf16: ; RV32: # %bb.0: # %entry @@ -2044,11 +1684,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv1f16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv1bfloat_nxv1f16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv1bfloat_nxv1f16: ; RV32: # %bb.0: # %entry @@ -2076,13 +1711,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1bfloat_nxv1f16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1bfloat_nxv1f16: ; RV32: # %bb.0: # %entry @@ -2112,11 +1740,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv2f16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv2bfloat_nxv2f16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv2bfloat_nxv2f16: ; RV32: # %bb.0: # %entry @@ -2144,13 +1767,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv2f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv2bfloat_nxv2f16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2bfloat_nxv2f16: ; RV32: # %bb.0: # %entry @@ -2180,11 +1796,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv4f16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv4bfloat_nxv4f16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv4bfloat_nxv4f16: ; RV32: # %bb.0: # %entry @@ -2212,13 +1823,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv4f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv4bfloat_nxv4f16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4bfloat_nxv4f16: ; RV32: # %bb.0: # %entry @@ -2248,11 +1852,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv8f16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv8bfloat_nxv8f16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv8bfloat_nxv8f16: ; RV32: # %bb.0: # %entry @@ -2280,13 +1879,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv8f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv8bfloat_nxv8f16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8bfloat_nxv8f16: ; RV32: # %bb.0: # %entry @@ -2316,11 +1908,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv16f16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv16bfloat_nxv16f16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv16bfloat_nxv16f16: ; RV32: # %bb.0: # %entry @@ -2348,13 +1935,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv16f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv16bfloat_nxv16f16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16bfloat_nxv16f16: ; RV32: # %bb.0: # %entry @@ -2384,11 +1964,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv32f16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv32bfloat_nxv32f16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv32bfloat_nxv32f16: ; RV32: # %bb.0: # %entry @@ -2416,13 +1991,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv32f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv32bfloat_nxv32f16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32bfloat_nxv32f16: ; RV32: # %bb.0: # %entry @@ -2452,11 +2020,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv1i8( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv1i8_nxv1i8(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8: ; RV32: # %bb.0: # %entry @@ -2484,13 +2047,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8: ; RV32: # %bb.0: # %entry @@ -2520,11 +2076,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv2i8( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv2i8_nxv2i8(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8: ; RV32: # %bb.0: # %entry @@ -2552,13 +2103,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv2i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8: ; RV32: # %bb.0: # %entry @@ -2588,11 +2132,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv4i8( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv4i8_nxv4i8(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8: ; RV32: # %bb.0: # %entry @@ -2620,13 +2159,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv4i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8: ; RV32: # %bb.0: # %entry @@ -2656,11 +2188,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv8i8( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv8i8_nxv8i8(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8: ; RV32: # %bb.0: # %entry @@ -2688,13 +2215,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv8i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8: ; RV32: # %bb.0: # %entry @@ -2724,11 +2244,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv16i8( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv16i8_nxv16i8(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8: ; RV32: # %bb.0: # %entry @@ -2756,13 +2271,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv16i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8: ; RV32: # %bb.0: # %entry @@ -2792,11 +2300,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv32i8( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv32i8_nxv32i8(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8: ; RV32: # %bb.0: # %entry @@ -2824,13 +2327,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv32i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8: ; RV32: # %bb.0: # %entry @@ -2860,11 +2356,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv64i8( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv64i8_nxv64i8(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8: ; RV32: # %bb.0: # %entry @@ -2892,13 +2383,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv64i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vlm.ll b/llvm/test/CodeGen/RISCV/rvv/vlm.ll index 7f4b777b06eb0..1cfe1c664acec 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlm.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vlm.nxv1i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv1i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -17,8 +15,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv2i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv2i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -30,8 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv4i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv4i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -43,8 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv8i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv8i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -56,8 +48,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv16i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv16i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -69,8 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv32i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv32i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -82,8 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv64i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv64i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll index 2df7febfbc18a..d2fba96381bf2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll @@ -4,12 +4,6 @@ ; The intrinsics are not supported with RV32. -declare @llvm.riscv.vloxei.nxv1i8.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i8.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i8.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i8.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i32.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i32.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -507,14 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -532,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i32.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -555,14 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -580,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i64.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -602,14 +428,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -627,12 +445,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i64.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -649,14 +461,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -674,12 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i64.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -696,14 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -721,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i64.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -743,14 +527,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -768,12 +544,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -791,14 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -816,12 +578,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -839,14 +595,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,12 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -887,14 +629,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -912,12 +646,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -935,14 +663,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -960,12 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1bf16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1bf16_nxv1bf16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1bf16_nxv1bf16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -983,14 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1bf16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1008,12 +714,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2bf16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2bf16_nxv2bf16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2bf16_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1031,14 +731,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2bf16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1056,12 +748,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4bf16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4bf16_nxv4bf16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4bf16_nxv4bf16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1079,14 +765,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4bf16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8bf16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8bf16_nxv8bf16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8bf16_nxv8bf16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1127,14 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8bf16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1152,12 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f32.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1175,14 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1200,12 +850,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f32.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1223,14 +867,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1248,12 +884,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f32.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1271,14 +901,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1296,12 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f32.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1319,14 +935,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1344,12 +952,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f64.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1366,14 +968,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1391,12 +985,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f64.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1413,14 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1438,12 +1018,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f64.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1460,14 +1034,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1485,12 +1051,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f64.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1507,14 +1067,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei.ll index be9faa8867a78..d76db678ced7f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vloxei.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vloxei.nxv1i8.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i8.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i8.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i8.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i8.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i32.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -506,14 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -531,12 +377,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i32.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -553,14 +393,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,12 +410,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i32.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -600,14 +426,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -625,12 +443,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i32.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -647,14 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -672,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i32.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -694,14 +492,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -719,12 +509,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i64.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -742,14 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -767,12 +543,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i64.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -790,14 +560,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -815,12 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i64.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -838,14 +594,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -863,12 +611,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i64.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -886,14 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -911,12 +645,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -934,14 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -959,12 +679,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -982,14 +696,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1007,12 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1030,14 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1055,12 +747,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1078,14 +764,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1103,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1126,14 +798,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1151,12 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f32.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1173,14 +831,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1198,12 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f32.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1220,14 +864,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1245,12 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f32.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1267,14 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1292,12 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f32.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1314,14 +930,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1339,12 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f32.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1361,14 +963,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1386,12 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f64.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1409,14 +997,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1434,12 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f64.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1457,14 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1482,12 +1048,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f64.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1505,14 +1065,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1530,12 +1082,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f64.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1553,14 +1099,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1578,12 +1116,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i8.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1601,14 +1133,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1626,12 +1150,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i8.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1649,14 +1167,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1674,12 +1184,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i8.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1697,14 +1201,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1722,12 +1218,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i8.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1745,14 +1235,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1770,12 +1252,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i8.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1793,14 +1269,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1818,12 +1286,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32i8.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1841,14 +1303,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1866,12 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i16.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1888,14 +1336,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1913,12 +1353,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i16.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1935,14 +1369,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1960,12 +1386,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i16.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1982,14 +1402,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2007,12 +1419,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i16.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2029,14 +1435,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2054,12 +1452,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i16.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2076,14 +1468,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2101,12 +1485,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32i16.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2123,14 +1501,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2148,12 +1518,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i32.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2171,14 +1535,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2196,12 +1552,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i32.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2219,14 +1569,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2244,12 +1586,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i32.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2267,14 +1603,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2292,12 +1620,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i32.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2315,14 +1637,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2340,12 +1654,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i32.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2363,14 +1671,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2388,12 +1688,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i64.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2411,14 +1705,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2436,12 +1722,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i64.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2459,14 +1739,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2484,12 +1756,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i64.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2507,14 +1773,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2532,12 +1790,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i64.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2555,14 +1807,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2580,12 +1824,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f16.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2602,14 +1840,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2627,12 +1857,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f16.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2649,14 +1873,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2674,12 +1890,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f16.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2696,14 +1906,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2721,12 +1923,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f16.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2743,14 +1939,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2768,12 +1956,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f16.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2790,14 +1972,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2815,12 +1989,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32f16.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2837,14 +2005,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2862,12 +2022,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f32.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2885,14 +2039,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2910,12 +2056,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f32.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2933,14 +2073,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2958,12 +2090,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f32.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2981,14 +2107,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3006,12 +2124,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f32.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3029,14 +2141,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3054,12 +2158,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f32.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3077,14 +2175,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3102,12 +2192,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f64.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3125,14 +2209,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3150,12 +2226,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f64.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3173,14 +2243,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3198,12 +2260,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f64.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3221,14 +2277,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3246,12 +2294,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f64.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3269,14 +2311,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3294,12 +2328,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i8.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3316,14 +2344,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3341,12 +2361,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i8.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3363,14 +2377,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3388,12 +2394,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i8.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3410,14 +2410,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3435,12 +2427,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i8.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3457,14 +2443,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3482,12 +2460,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i8.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3504,14 +2476,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3529,12 +2493,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32i8.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3551,14 +2509,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3576,12 +2526,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv64i8.nxv64i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3598,14 +2542,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3623,12 +2559,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i16.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3646,14 +2576,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3671,12 +2593,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i16.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3694,14 +2610,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3719,12 +2627,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i16.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3742,14 +2644,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3767,12 +2661,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i16.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3790,14 +2678,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3815,12 +2695,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i16.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3838,14 +2712,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3863,12 +2729,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32i16.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3886,14 +2746,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3911,12 +2763,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i32.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3934,14 +2780,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3959,12 +2797,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i32.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3982,14 +2814,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4007,12 +2831,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i32.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4030,14 +2848,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4055,12 +2865,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i32.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4078,14 +2882,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4103,12 +2899,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i32.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4126,14 +2916,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4151,12 +2933,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i64.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4174,14 +2950,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4199,12 +2967,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i64.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4222,14 +2984,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4247,12 +3001,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i64.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4270,14 +3018,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4295,12 +3035,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i64.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4318,14 +3052,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4343,12 +3069,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f16.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4366,14 +3086,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4391,12 +3103,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f16.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4414,14 +3120,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4439,12 +3137,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f16.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4462,14 +3154,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4487,12 +3171,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f16.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4510,14 +3188,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4535,12 +3205,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f16.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4558,14 +3222,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4583,12 +3239,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32f16.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4606,14 +3256,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4631,12 +3273,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1bf16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1bf16_nxv1bf16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1bf16_nxv1bf16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4654,14 +3290,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1bf16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4679,12 +3307,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2bf16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2bf16_nxv2bf16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2bf16_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4702,14 +3324,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2bf16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4727,12 +3341,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4bf16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4bf16_nxv4bf16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4bf16_nxv4bf16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4750,14 +3358,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4bf16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4775,12 +3375,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8bf16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8bf16_nxv8bf16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8bf16_nxv8bf16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4798,14 +3392,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8bf16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4823,12 +3409,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16bf16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16bf16_nxv16bf16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16bf16_nxv16bf16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -4846,14 +3426,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16bf16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -4871,12 +3443,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f32.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4894,14 +3460,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4919,12 +3477,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f32.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4942,14 +3494,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4967,12 +3511,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f32.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4990,14 +3528,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5015,12 +3545,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f32.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5038,14 +3562,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5063,12 +3579,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f32.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -5086,14 +3596,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -5111,12 +3613,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f64.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5134,14 +3630,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5159,12 +3647,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f64.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5182,14 +3664,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5207,12 +3681,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f64.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5230,14 +3698,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5255,12 +3715,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f64.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5278,14 +3732,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll index 6d70d191ba8b6..e6b972dd40c79 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -31,9 +28,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -60,9 +54,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -89,9 +80,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -118,9 +106,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -147,9 +132,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -176,9 +158,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -205,9 +184,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -234,9 +210,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -263,9 +236,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -292,9 +262,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -321,9 +288,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -350,9 +314,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -379,9 +340,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -408,9 +366,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -437,9 +392,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -466,9 +418,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -495,9 +444,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -524,9 +470,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -553,9 +496,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -582,9 +522,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -611,9 +548,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -640,9 +574,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -669,9 +600,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -698,9 +626,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -727,9 +652,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -756,9 +678,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -785,9 +704,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -814,9 +730,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -843,9 +756,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -872,9 +782,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -901,9 +808,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -930,9 +834,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -959,9 +860,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -988,9 +886,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1017,9 +912,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1046,9 +938,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1075,9 +964,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1104,9 +990,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1133,9 +1016,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1162,9 +1042,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1191,9 +1068,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1220,9 +1094,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1249,9 +1120,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1278,9 +1146,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1307,9 +1172,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1336,9 +1198,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1365,9 +1224,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1394,9 +1250,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1423,9 +1276,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1452,9 +1302,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1481,9 +1328,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1510,9 +1354,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1539,9 +1380,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1568,9 +1406,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1597,9 +1432,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1626,9 +1458,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1655,9 +1484,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1684,9 +1510,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1713,9 +1536,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1742,9 +1562,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1771,9 +1588,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1800,9 +1614,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1829,9 +1640,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1858,9 +1666,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1887,9 +1692,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1916,9 +1718,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1945,9 +1744,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1974,9 +1770,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2003,9 +1796,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2032,9 +1822,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2061,9 +1848,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2090,9 +1874,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2119,9 +1900,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2148,9 +1926,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2177,9 +1952,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2206,9 +1978,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2235,9 +2004,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2264,9 +2030,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2293,9 +2056,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2322,9 +2082,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2351,9 +2108,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2380,9 +2134,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2409,9 +2160,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2438,9 +2186,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2467,9 +2212,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2496,9 +2238,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2525,9 +2264,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2554,9 +2290,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2583,9 +2316,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2612,9 +2342,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2641,9 +2368,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2670,9 +2394,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2699,9 +2420,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2728,9 +2446,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2757,9 +2472,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2786,9 +2498,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2815,9 +2524,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2844,9 +2550,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2873,9 +2576,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2902,9 +2602,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2931,9 +2628,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2960,9 +2654,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2989,9 +2680,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3018,9 +2706,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3047,9 +2732,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3076,9 +2758,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3105,9 +2784,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3134,9 +2810,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3163,9 +2836,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -3192,9 +2862,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3221,9 +2888,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3250,9 +2914,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3279,9 +2940,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3308,9 +2966,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3337,9 +2992,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3366,9 +3018,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3395,9 +3044,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3424,9 +3070,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3453,9 +3096,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3482,9 +3122,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3511,9 +3148,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3540,9 +3174,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3569,9 +3200,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3598,9 +3226,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3627,9 +3252,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3656,9 +3278,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3685,9 +3304,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3714,9 +3330,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3743,9 +3356,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3772,9 +3382,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3801,9 +3408,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3830,9 +3434,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3859,9 +3460,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3888,9 +3486,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3917,9 +3512,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3946,9 +3538,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3975,9 +3564,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4004,9 +3590,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4033,9 +3616,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4062,9 +3642,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4091,9 +3668,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4120,9 +3694,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4149,9 +3720,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4178,9 +3746,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4207,9 +3772,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4236,9 +3798,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4265,9 +3824,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4294,9 +3850,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4323,9 +3876,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4352,9 +3902,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4381,9 +3928,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4410,9 +3954,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4439,9 +3980,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4468,9 +4006,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4497,9 +4032,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4526,9 +4058,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4555,9 +4084,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4584,9 +4110,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4613,9 +4136,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4642,9 +4162,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4671,9 +4188,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4700,9 +4214,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4729,9 +4240,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4758,9 +4266,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4787,9 +4292,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4816,9 +4318,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4845,9 +4344,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4874,9 +4370,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4903,9 +4396,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4932,9 +4422,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4961,9 +4448,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4990,9 +4474,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5019,9 +4500,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5048,9 +4526,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5077,9 +4552,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5106,9 +4578,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5135,9 +4604,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5164,9 +4630,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5193,9 +4656,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5222,9 +4682,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -5251,9 +4708,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -5280,9 +4734,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5309,9 +4760,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5338,9 +4786,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5367,9 +4812,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5396,9 +4838,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5425,9 +4864,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5454,9 +4890,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5483,9 +4916,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5512,9 +4942,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5541,9 +4968,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5570,9 +4994,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5599,9 +5020,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5628,9 +5046,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5657,9 +5072,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5686,9 +5098,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5715,9 +5124,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5744,9 +5150,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5773,9 +5176,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5802,9 +5202,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5831,9 +5228,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5860,9 +5254,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5889,9 +5280,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5918,9 +5306,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5947,9 +5332,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5976,9 +5358,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6005,9 +5384,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6034,9 +5410,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6063,9 +5436,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6092,9 +5462,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6121,9 +5488,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6150,9 +5514,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6179,9 +5540,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6208,9 +5566,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6237,9 +5592,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6266,9 +5618,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6295,9 +5644,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6324,9 +5670,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6353,9 +5696,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6382,9 +5722,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6411,9 +5748,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6440,9 +5774,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6469,9 +5800,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6498,9 +5826,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6527,9 +5852,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6556,9 +5878,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6585,9 +5904,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6614,9 +5930,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6643,9 +5956,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6672,9 +5982,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6701,9 +6008,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6730,9 +6034,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6759,9 +6060,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6788,9 +6086,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6817,9 +6112,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6846,9 +6138,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6875,9 +6164,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6904,9 +6190,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6933,9 +6216,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6962,9 +6242,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6991,9 +6268,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7020,9 +6294,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7049,9 +6320,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7078,9 +6346,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7107,9 +6372,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7136,9 +6398,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7165,9 +6424,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7194,9 +6450,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7223,9 +6476,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7252,9 +6502,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7281,9 +6528,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7310,9 +6554,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7339,9 +6580,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7368,9 +6606,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7397,9 +6632,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7426,9 +6658,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7455,7 +6684,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7482,7 +6710,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7509,7 +6736,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7536,7 +6762,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7563,7 +6788,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7590,7 +6814,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7617,7 +6840,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7644,7 +6866,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7671,7 +6892,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7698,7 +6918,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -7725,7 +6944,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -7752,7 +6970,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -7779,7 +6996,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -7806,7 +7022,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -7833,7 +7048,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -7860,7 +7074,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7887,7 +7100,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7914,7 +7126,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7941,7 +7152,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7968,7 +7178,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7995,7 +7204,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8022,7 +7230,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8049,7 +7256,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8076,7 +7282,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8103,7 +7308,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -8130,7 +7334,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -8157,7 +7360,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -8184,7 +7386,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8211,7 +7412,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8238,7 +7438,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8265,7 +7464,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8292,7 +7490,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8319,7 +7516,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8346,7 +7542,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8373,7 +7568,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8400,7 +7594,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8427,7 +7620,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -8454,7 +7646,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -8481,7 +7672,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -8508,7 +7698,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8535,7 +7724,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8562,7 +7750,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8589,7 +7776,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8616,7 +7802,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8643,7 +7828,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8670,7 +7854,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8697,7 +7880,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8724,7 +7906,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8751,7 +7932,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8778,7 +7958,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8805,7 +7984,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8832,7 +8010,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8859,7 +8036,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8886,7 +8062,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8913,7 +8088,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8940,7 +8114,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8967,7 +8140,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8994,7 +8166,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9021,7 +8192,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9048,7 +8218,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9075,7 +8244,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9102,7 +8270,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9129,7 +8296,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9156,7 +8322,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9183,7 +8348,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9210,7 +8374,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9237,7 +8400,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9264,7 +8426,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9291,7 +8452,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9318,7 +8478,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9345,7 +8504,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9372,7 +8530,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9399,7 +8556,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9426,7 +8582,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9453,7 +8608,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9480,7 +8634,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9507,7 +8660,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9534,7 +8686,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9561,7 +8712,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9588,7 +8738,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9615,7 +8764,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9642,7 +8790,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9669,7 +8816,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9696,7 +8842,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9723,7 +8868,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -9750,7 +8894,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -9777,7 +8920,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -9804,7 +8946,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9831,7 +8972,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9858,7 +8998,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9885,7 +9024,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9912,7 +9050,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9939,7 +9076,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9966,7 +9102,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9993,7 +9128,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10020,7 +9154,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10047,7 +9180,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10074,7 +9206,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10101,7 +9232,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10128,7 +9258,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10155,7 +9284,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10182,7 +9310,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10209,7 +9336,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10236,7 +9362,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10263,7 +9388,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10290,7 +9414,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10317,7 +9440,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10344,7 +9466,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10371,7 +9492,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10398,7 +9518,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10425,7 +9544,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10452,7 +9570,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10479,7 +9596,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10506,7 +9622,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10533,7 +9648,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10560,7 +9674,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10587,7 +9700,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10614,7 +9726,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10641,7 +9752,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10668,7 +9778,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10695,7 +9804,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10722,7 +9830,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10749,7 +9856,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10776,7 +9882,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10803,7 +9908,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10830,7 +9934,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10857,7 +9960,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10884,7 +9986,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10911,7 +10012,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10938,7 +10038,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10965,7 +10064,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10992,7 +10090,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11019,7 +10116,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11046,7 +10142,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11073,7 +10168,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11100,7 +10194,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11127,7 +10220,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11154,7 +10246,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11181,7 +10272,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11208,7 +10298,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11235,7 +10324,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11262,7 +10350,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11289,7 +10376,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11316,7 +10402,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11343,7 +10428,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11370,7 +10454,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11397,7 +10480,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11424,7 +10506,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11451,7 +10532,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11478,7 +10558,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11505,7 +10584,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11532,7 +10610,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11559,7 +10636,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11586,7 +10662,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11613,7 +10688,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11640,7 +10714,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11667,7 +10740,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11694,7 +10766,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11721,7 +10792,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11748,7 +10818,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11775,7 +10844,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11802,7 +10870,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11829,7 +10896,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11856,7 +10922,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11883,7 +10948,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11910,7 +10974,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11937,7 +11000,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11964,7 +11026,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11991,7 +11052,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12018,7 +11078,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12045,7 +11104,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12072,7 +11130,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -12099,7 +11156,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -12126,7 +11182,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -12153,7 +11208,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -12180,7 +11234,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -12207,7 +11260,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -12234,7 +11286,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12261,7 +11312,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12288,7 +11338,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12315,7 +11364,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12342,7 +11390,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12369,7 +11416,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12396,7 +11442,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12423,7 +11468,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12450,7 +11494,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12477,7 +11520,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -12504,7 +11546,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -12531,7 +11572,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -12558,7 +11598,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12585,7 +11624,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12612,7 +11650,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12639,7 +11676,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12666,7 +11702,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12693,7 +11728,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12720,7 +11754,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12747,7 +11780,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12774,7 +11806,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12801,7 +11832,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -12828,7 +11858,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -12855,7 +11884,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -12882,7 +11910,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12909,7 +11936,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12936,7 +11962,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12963,7 +11988,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12990,7 +12014,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13017,7 +12040,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13044,7 +12066,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13071,7 +12092,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13098,7 +12118,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13125,7 +12144,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13152,7 +12170,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13179,7 +12196,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13206,7 +12222,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13233,7 +12248,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13260,7 +12274,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13287,7 +12300,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13314,7 +12326,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13341,7 +12352,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13368,7 +12378,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13395,7 +12404,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13422,7 +12430,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13449,7 +12456,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13476,7 +12482,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13503,7 +12508,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13530,7 +12534,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13557,7 +12560,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13584,7 +12586,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13611,7 +12612,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13638,7 +12638,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13665,7 +12664,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13692,7 +12690,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13719,7 +12716,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13746,7 +12742,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13773,7 +12768,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13800,7 +12794,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13827,7 +12820,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll index 8f85eb5638255..dcd7ca608f672 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -31,9 +28,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -60,9 +54,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -89,9 +80,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -118,9 +106,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -147,9 +132,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -176,9 +158,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -205,9 +184,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -234,9 +210,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -263,9 +236,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -292,9 +262,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -321,9 +288,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -350,9 +314,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -379,9 +340,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -408,9 +366,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -437,9 +392,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -466,9 +418,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -495,9 +444,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -524,9 +470,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -553,9 +496,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -582,9 +522,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -611,9 +548,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -640,9 +574,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -669,9 +600,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -698,9 +626,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -727,9 +652,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -756,9 +678,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -785,9 +704,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -814,9 +730,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -843,9 +756,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -872,9 +782,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -901,9 +808,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -930,9 +834,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -959,9 +860,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -988,9 +886,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1017,9 +912,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1046,9 +938,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1075,9 +964,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1104,9 +990,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1133,9 +1016,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1162,9 +1042,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1191,9 +1068,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1220,9 +1094,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1249,9 +1120,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1278,9 +1146,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1307,9 +1172,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1336,9 +1198,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1365,9 +1224,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1394,9 +1250,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1423,9 +1276,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1452,9 +1302,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1481,9 +1328,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1510,9 +1354,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1539,9 +1380,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1568,9 +1406,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1597,9 +1432,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1626,9 +1458,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1655,9 +1484,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1684,9 +1510,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1713,9 +1536,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1742,9 +1562,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1771,9 +1588,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1800,9 +1614,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1829,9 +1640,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1858,9 +1666,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1887,9 +1692,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1916,9 +1718,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1945,9 +1744,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1974,9 +1770,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2003,9 +1796,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2032,9 +1822,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2061,9 +1848,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2090,9 +1874,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2119,9 +1900,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2148,9 +1926,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2177,9 +1952,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2206,9 +1978,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2235,9 +2004,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2264,9 +2030,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2293,9 +2056,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2322,9 +2082,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2351,9 +2108,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2380,9 +2134,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2409,9 +2160,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2438,9 +2186,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2467,9 +2212,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2496,9 +2238,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2525,9 +2264,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2554,9 +2290,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2583,9 +2316,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2612,9 +2342,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2641,9 +2368,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2670,9 +2394,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2699,9 +2420,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2728,9 +2446,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2757,9 +2472,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2786,9 +2498,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2815,9 +2524,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2844,9 +2550,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2873,9 +2576,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2902,9 +2602,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2931,9 +2628,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2960,9 +2654,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2989,9 +2680,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3018,9 +2706,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3047,9 +2732,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3076,9 +2758,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3105,9 +2784,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3134,9 +2810,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3163,9 +2836,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3192,9 +2862,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -3221,9 +2888,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3250,9 +2914,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3279,9 +2940,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3308,9 +2966,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -3337,9 +2992,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3366,9 +3018,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3395,9 +3044,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3424,9 +3070,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3453,9 +3096,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3482,9 +3122,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3511,9 +3148,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3540,9 +3174,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3569,9 +3200,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3598,9 +3226,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3627,9 +3252,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3656,9 +3278,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -3685,9 +3304,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3714,9 +3330,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3743,9 +3356,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3772,9 +3382,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -3801,9 +3408,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3830,9 +3434,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3859,9 +3460,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3888,9 +3486,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3917,9 +3512,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3946,9 +3538,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3975,9 +3564,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4004,9 +3590,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -4033,9 +3616,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4062,9 +3642,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -4091,9 +3668,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -4120,9 +3694,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4149,9 +3720,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4178,9 +3746,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4207,9 +3772,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4236,9 +3798,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4265,9 +3824,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4294,9 +3850,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4323,9 +3876,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4352,9 +3902,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4381,9 +3928,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4410,9 +3954,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4439,9 +3980,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4468,9 +4006,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4497,9 +4032,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4526,9 +4058,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4555,9 +4084,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -4584,9 +4110,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4613,9 +4136,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4642,9 +4162,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4671,9 +4188,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4700,9 +4214,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4729,9 +4240,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4758,9 +4266,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4787,9 +4292,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4816,9 +4318,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4845,9 +4344,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4874,9 +4370,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4903,9 +4396,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4932,9 +4422,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4961,9 +4448,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4990,9 +4474,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -5019,9 +4500,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -5048,9 +4526,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5077,9 +4552,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5106,9 +4578,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5135,9 +4604,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5164,9 +4630,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5193,9 +4656,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5222,9 +4682,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5251,9 +4708,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5280,9 +4734,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5309,9 +4760,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5338,9 +4786,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5367,9 +4812,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5396,9 +4838,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5425,9 +4864,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5454,9 +4890,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5483,9 +4916,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5512,9 +4942,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5541,9 +4968,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5570,9 +4994,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5599,9 +5020,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5628,9 +5046,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5657,9 +5072,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5686,9 +5098,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5715,9 +5124,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5744,9 +5150,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5773,9 +5176,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5802,9 +5202,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5831,9 +5228,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5860,9 +5254,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5889,9 +5280,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5918,9 +5306,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5947,9 +5332,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5976,9 +5358,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6005,9 +5384,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6034,9 +5410,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6063,9 +5436,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6092,9 +5462,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6121,9 +5488,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6150,9 +5514,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6179,9 +5540,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6208,9 +5566,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6237,9 +5592,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6266,9 +5618,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6295,9 +5644,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6324,9 +5670,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6353,9 +5696,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6382,9 +5722,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6411,9 +5748,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6440,9 +5774,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6469,9 +5800,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6498,9 +5826,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6527,9 +5852,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6556,9 +5878,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6585,9 +5904,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6614,9 +5930,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6643,9 +5956,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6672,9 +5982,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6701,9 +6008,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6730,9 +6034,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6759,9 +6060,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6788,9 +6086,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -6817,9 +6112,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -6846,9 +6138,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -6875,9 +6164,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -6904,9 +6190,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6933,9 +6216,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6962,9 +6242,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6991,9 +6268,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7020,9 +6294,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7049,9 +6320,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7078,9 +6346,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7107,9 +6372,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7136,9 +6398,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7165,9 +6424,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7194,9 +6450,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7223,9 +6476,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -7252,9 +6502,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7281,9 +6528,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7310,9 +6554,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7339,9 +6580,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7368,9 +6606,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7397,9 +6632,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7426,9 +6658,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7455,9 +6684,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7484,9 +6710,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7513,9 +6736,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7542,9 +6762,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7571,9 +6788,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -7600,9 +6814,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7629,9 +6840,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7658,9 +6866,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7687,9 +6892,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7716,9 +6918,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7745,9 +6944,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7774,9 +6970,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7803,9 +6996,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7832,9 +7022,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7861,9 +7048,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7890,9 +7074,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7919,9 +7100,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7948,9 +7126,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7977,9 +7152,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8006,9 +7178,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8035,9 +7204,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8064,9 +7230,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8093,9 +7256,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8122,9 +7282,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8151,9 +7308,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8180,9 +7334,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8209,9 +7360,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8238,9 +7386,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8267,9 +7412,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8296,9 +7438,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8325,9 +7464,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8354,9 +7490,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8383,9 +7516,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8412,9 +7542,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8441,9 +7568,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8470,9 +7594,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8499,9 +7620,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8528,9 +7646,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8557,9 +7672,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8586,9 +7698,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8615,9 +7724,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8644,9 +7750,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8673,9 +7776,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8702,9 +7802,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8731,9 +7828,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8760,9 +7854,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8789,9 +7880,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8818,9 +7906,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8847,9 +7932,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -8876,9 +7958,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8905,9 +7984,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8934,9 +8010,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8963,9 +8036,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8992,9 +8062,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9021,9 +8088,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9050,9 +8114,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9079,9 +8140,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9108,9 +8166,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9137,9 +8192,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9166,9 +8218,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9195,9 +8244,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9224,9 +8270,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9253,9 +8296,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9282,9 +8322,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9311,9 +8348,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9340,9 +8374,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9369,9 +8400,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9398,9 +8426,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9427,9 +8452,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9456,9 +8478,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9485,9 +8504,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9514,9 +8530,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9543,9 +8556,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9572,9 +8582,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9601,9 +8608,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9630,9 +8634,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9659,9 +8660,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9688,9 +8686,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9717,9 +8712,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9746,9 +8738,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9775,9 +8764,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9804,7 +8790,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9831,7 +8816,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9858,7 +8842,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9885,7 +8868,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9912,7 +8894,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9939,7 +8920,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9966,7 +8946,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9993,7 +8972,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10020,7 +8998,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10047,7 +9024,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10074,7 +9050,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10101,7 +9076,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10128,7 +9102,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -10155,7 +9128,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -10182,7 +9154,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -10209,7 +9180,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -10236,7 +9206,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -10263,7 +9232,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -10290,7 +9258,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -10317,7 +9284,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10344,7 +9310,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10371,7 +9336,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10398,7 +9362,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10425,7 +9388,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10452,7 +9414,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10479,7 +9440,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10506,7 +9466,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10533,7 +9492,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10560,7 +9518,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10587,7 +9544,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10614,7 +9570,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10641,7 +9596,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -10668,7 +9622,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -10695,7 +9648,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -10722,7 +9674,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -10749,7 +9700,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10776,7 +9726,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10803,7 +9752,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10830,7 +9778,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10857,7 +9804,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10884,7 +9830,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10911,7 +9856,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10938,7 +9882,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10965,7 +9908,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10992,7 +9934,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11019,7 +9960,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11046,7 +9986,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11073,7 +10012,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -11100,7 +10038,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -11127,7 +10064,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -11154,7 +10090,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -11181,7 +10116,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11208,7 +10142,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11235,7 +10168,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11262,7 +10194,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11289,7 +10220,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11316,7 +10246,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11343,7 +10272,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11370,7 +10298,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11397,7 +10324,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11424,7 +10350,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11451,7 +10376,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11478,7 +10402,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11505,7 +10428,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11532,7 +10454,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11559,7 +10480,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11586,7 +10506,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11613,7 +10532,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11640,7 +10558,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11667,7 +10584,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11694,7 +10610,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11721,7 +10636,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11748,7 +10662,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11775,7 +10688,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11802,7 +10714,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11829,7 +10740,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11856,7 +10766,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11883,7 +10792,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11910,7 +10818,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11937,7 +10844,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11964,7 +10870,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11991,7 +10896,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12018,7 +10922,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12045,7 +10948,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12072,7 +10974,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12099,7 +11000,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12126,7 +11026,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -12153,7 +11052,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12180,7 +11078,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12207,7 +11104,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12234,7 +11130,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12261,7 +11156,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12288,7 +11182,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12315,7 +11208,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12342,7 +11234,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12369,7 +11260,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12396,7 +11286,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12423,7 +11312,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12450,7 +11338,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -12477,7 +11364,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12504,7 +11390,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12531,7 +11416,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12558,7 +11442,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12585,7 +11468,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12612,7 +11494,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12639,7 +11520,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12666,7 +11546,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12693,7 +11572,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12720,7 +11598,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12747,7 +11624,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12774,7 +11650,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -12801,7 +11676,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -12828,7 +11702,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -12855,7 +11728,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -12882,7 +11754,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -12909,7 +11780,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12936,7 +11806,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12963,7 +11832,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12990,7 +11858,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13017,7 +11884,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13044,7 +11910,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13071,7 +11936,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13098,7 +11962,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13125,7 +11988,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13152,7 +12014,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13179,7 +12040,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13206,7 +12066,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -13233,7 +12092,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13260,7 +12118,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13287,7 +12144,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13314,7 +12170,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13341,7 +12196,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13368,7 +12222,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13395,7 +12248,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13422,7 +12274,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13449,7 +12300,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13476,7 +12326,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13503,7 +12352,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13530,7 +12378,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -13557,7 +12404,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13584,7 +12430,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13611,7 +12456,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13638,7 +12482,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13665,7 +12508,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13692,7 +12534,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13719,7 +12560,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13746,7 +12586,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13773,7 +12612,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13800,7 +12638,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13827,7 +12664,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13854,7 +12690,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13881,7 +12716,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13908,7 +12742,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13935,7 +12768,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13962,7 +12794,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13989,7 +12820,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14016,7 +12846,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14043,7 +12872,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14070,7 +12898,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14097,7 +12924,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14124,7 +12950,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14151,7 +12976,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14178,7 +13002,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14205,7 +13028,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14232,7 +13054,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14259,7 +13080,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14286,7 +13106,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14313,7 +13132,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14340,7 +13158,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14367,7 +13184,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14394,7 +13210,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14421,7 +13236,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14448,7 +13262,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14475,7 +13288,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14502,7 +13314,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14529,7 +13340,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14556,7 +13366,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14583,7 +13392,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14610,7 +13418,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14637,7 +13444,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -14664,7 +13470,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -14691,7 +13496,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -14718,7 +13522,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -14745,7 +13548,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14772,7 +13574,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14799,7 +13600,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14826,7 +13626,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14853,7 +13652,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14880,7 +13678,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14907,7 +13704,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14934,7 +13730,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14961,7 +13756,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14988,7 +13782,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15015,7 +13808,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15042,7 +13834,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15069,7 +13860,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15096,7 +13886,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15123,7 +13912,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15150,7 +13938,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15177,7 +13964,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15204,7 +13990,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15231,7 +14016,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15258,7 +14042,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15285,7 +14068,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15312,7 +14094,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15339,7 +14120,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15366,7 +14146,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15393,7 +14172,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15420,7 +14198,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15447,7 +14224,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15474,7 +14250,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15501,7 +14276,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15528,7 +14302,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15555,7 +14328,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15582,7 +14354,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15609,7 +14380,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15636,7 +14406,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15663,7 +14432,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15690,7 +14458,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15717,7 +14484,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15744,7 +14510,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15771,7 +14536,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15798,7 +14562,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15825,7 +14588,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -15852,7 +14614,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -15879,7 +14640,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -15906,7 +14666,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -15933,7 +14692,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -15960,7 +14718,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -15987,7 +14744,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -16014,7 +14770,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -16041,7 +14796,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -16068,7 +14822,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -16095,7 +14848,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -16122,7 +14874,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -16149,7 +14900,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -16176,7 +14926,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -16203,7 +14952,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -16230,7 +14978,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -16257,7 +15004,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -16284,7 +15030,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -16311,7 +15056,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -16338,7 +15082,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -16365,7 +15108,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -16392,7 +15134,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -16419,7 +15160,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -16446,7 +15186,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -16473,7 +15212,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -16500,7 +15238,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -16527,7 +15264,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -16554,7 +15290,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -16581,7 +15316,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -16608,7 +15342,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -16635,7 +15368,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -16662,7 +15394,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -16689,7 +15420,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -16716,7 +15446,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -16743,7 +15472,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -16770,7 +15498,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -16797,7 +15524,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -16824,7 +15550,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -16851,7 +15576,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -16878,7 +15602,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -16905,7 +15628,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -16932,7 +15654,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -16959,7 +15680,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -16986,7 +15706,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17013,7 +15732,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -17040,7 +15758,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -17067,7 +15784,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -17094,7 +15810,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -17121,7 +15836,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -17148,7 +15862,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -17175,7 +15888,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -17202,7 +15914,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -17229,7 +15940,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -17256,7 +15966,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -17283,7 +15992,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -17310,7 +16018,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17337,7 +16044,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -17364,7 +16070,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -17391,7 +16096,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -17418,7 +16122,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -17445,7 +16148,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -17472,7 +16174,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -17499,7 +16200,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -17526,7 +16226,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -17553,7 +16252,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -17580,7 +16278,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -17607,7 +16304,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -17634,7 +16330,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17661,7 +16356,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -17688,7 +16382,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -17715,7 +16408,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -17742,7 +16434,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -17769,7 +16460,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -17796,7 +16486,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -17823,7 +16512,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -17850,7 +16538,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -17877,7 +16564,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -17904,7 +16590,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -17931,7 +16616,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -17958,7 +16642,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17985,7 +16668,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -18012,7 +16694,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -18039,7 +16720,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -18066,7 +16746,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -18093,7 +16772,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -18120,7 +16798,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -18147,7 +16824,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -18174,7 +16850,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -18201,7 +16876,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -18228,7 +16902,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -18255,7 +16928,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vlse.ll b/llvm/test/CodeGen/RISCV/rvv/vlse.ll index ac7be3021e633..f7c18141f3abf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlse.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vlse.nxv1i64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2i64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2i64_nxv2i64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4i64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4i64_nxv4i64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8i64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8i64_nxv8i64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1f64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1f64_nxv1f64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1f64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2f64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2f64_nxv2f64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2f64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4f64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4f64_nxv4f64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4f64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -333,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8f64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8f64_nxv8f64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -355,14 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8f64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -380,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1i32_nxv1i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -402,14 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -427,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2i32_nxv2i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -449,14 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -474,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4i32_nxv4i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -496,14 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -521,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8i32_nxv8i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -543,14 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -568,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16i32_nxv16i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -590,14 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -615,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1f32_nxv1f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -637,14 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -662,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2f32_nxv2f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -684,14 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -709,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4f32_nxv4f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -731,14 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -756,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8f32_nxv8f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -778,14 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -803,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16f32_nxv16f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -825,14 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -850,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1i16_nxv1i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -872,14 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -897,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2i16_nxv2i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -919,14 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -944,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4i16_nxv4i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -966,14 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -991,12 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8i16_nxv8i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1013,14 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1038,12 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16i16_nxv16i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1060,14 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1085,12 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv32i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv32i16_nxv32i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1107,14 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv32i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1132,12 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1f16_nxv1f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1154,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1179,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2f16_nxv2f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1201,14 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1226,12 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4f16_nxv4f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1248,14 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1273,12 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8f16_nxv8f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1295,14 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1320,12 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16f16_nxv16f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1342,14 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1367,12 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv32f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv32f16_nxv32f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1389,14 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv32f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1414,12 +994,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1bf16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1bf16_nxv1bf16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1436,14 +1010,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1bf16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1bf16_nxv1bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1461,12 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2bf16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2bf16_nxv2bf16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1483,14 +1043,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2bf16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2bf16_nxv2bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1508,12 +1060,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4bf16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4bf16_nxv4bf16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1530,14 +1076,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4bf16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4bf16_nxv4bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1555,12 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8bf16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8bf16_nxv8bf16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1577,14 +1109,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8bf16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8bf16_nxv8bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1602,12 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16bf16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16bf16_nxv16bf16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1624,14 +1142,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16bf16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16bf16_nxv16bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1649,12 +1159,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv32bf16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv32bf16_nxv32bf16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1671,14 +1175,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv32bf16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv32bf16_nxv32bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1696,12 +1192,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1i8_nxv1i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1718,14 +1208,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1743,12 +1225,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2i8_nxv2i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1765,14 +1241,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1790,12 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4i8_nxv4i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1812,14 +1274,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1837,12 +1291,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8i8_nxv8i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1859,14 +1307,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1884,12 +1324,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16i8_nxv16i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1906,14 +1340,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1931,12 +1357,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv32i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv32i8_nxv32i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1953,14 +1373,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv32i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1978,12 +1390,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv64i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv64i8_nxv64i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -2000,14 +1406,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv64i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll index 1a5574cae96f6..7a25753e2cab9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64x \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i32, i32) -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define void @test_vlseg2ff_dead_value(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_dead_value: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll index 9086144f85667..d6ece0e8ef1fb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry @@ -41,9 +38,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -80,9 +74,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -119,9 +110,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -158,9 +146,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -197,9 +182,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -236,9 +218,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry @@ -276,9 +255,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -316,9 +292,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -356,9 +329,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -396,9 +366,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -436,9 +403,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry @@ -477,9 +441,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -518,9 +479,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -559,9 +517,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -600,9 +555,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -641,9 +593,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry @@ -683,9 +632,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -725,9 +671,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -767,9 +710,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -809,9 +749,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry @@ -852,9 +789,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -895,9 +829,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -938,9 +869,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -981,9 +909,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry @@ -1025,9 +950,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -1069,9 +991,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1113,9 +1032,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1157,9 +1073,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry @@ -1202,9 +1115,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -1247,9 +1157,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1292,9 +1199,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1337,8 +1241,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -1375,8 +1277,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -1413,8 +1313,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1451,8 +1349,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1489,8 +1385,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1527,8 +1421,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -1566,8 +1458,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1605,8 +1495,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1644,8 +1532,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1683,8 +1569,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -1723,8 +1607,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1763,8 +1645,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1803,8 +1683,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1843,8 +1721,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -1884,8 +1760,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1925,8 +1799,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1966,8 +1838,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -2008,8 +1878,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2050,8 +1918,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2092,8 +1958,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -2135,8 +1999,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2178,8 +2040,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2221,8 +2081,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -2265,8 +2123,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -2309,8 +2165,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2353,8 +2207,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2391,8 +2243,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2429,8 +2279,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2467,8 +2315,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2505,8 +2351,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2544,8 +2388,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2583,8 +2425,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2622,8 +2462,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2662,8 +2500,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2702,8 +2538,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2742,8 +2576,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2783,8 +2615,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2824,8 +2654,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2866,8 +2694,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2908,8 +2734,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2951,8 +2775,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2994,8 +2816,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3038,8 +2858,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3082,8 +2900,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3120,8 +2936,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3158,8 +2972,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3196,8 +3008,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3235,8 +3045,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3274,8 +3082,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3314,8 +3120,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3354,8 +3158,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3395,8 +3197,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3437,8 +3237,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3480,8 +3278,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3524,7 +3320,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -3561,7 +3356,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -3598,7 +3392,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3635,7 +3428,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3672,7 +3464,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3709,7 +3500,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -3747,7 +3537,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3785,7 +3574,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3823,7 +3611,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3861,7 +3648,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -3900,7 +3686,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3939,7 +3724,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3978,7 +3762,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -4017,7 +3800,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -4057,7 +3839,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -4097,7 +3878,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -4137,7 +3917,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -4178,7 +3957,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -4219,7 +3997,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -4260,7 +4037,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -4302,7 +4078,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -4344,7 +4119,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -4386,7 +4160,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -4429,7 +4202,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -4472,7 +4244,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -4515,7 +4286,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -4552,7 +4322,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -4589,7 +4358,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -4626,7 +4394,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -4663,7 +4430,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -4701,7 +4467,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -4739,7 +4504,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -4777,7 +4541,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -4816,7 +4579,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -4855,7 +4617,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -4894,7 +4655,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -4934,7 +4694,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -4974,7 +4733,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -5015,7 +4773,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -5056,7 +4813,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -5098,7 +4854,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -5140,7 +4895,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -5183,7 +4937,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -5226,7 +4979,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -5263,7 +5015,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -5300,7 +5051,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -5337,7 +5087,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -5375,7 +5124,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -5413,7 +5161,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -5452,7 +5199,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -5491,7 +5237,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -5531,7 +5276,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -5572,7 +5316,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -5614,7 +5357,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -5657,7 +5399,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -5694,7 +5435,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -5731,7 +5471,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -5768,7 +5507,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -5805,7 +5543,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -5842,7 +5579,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -5880,7 +5616,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -5918,7 +5653,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -5956,7 +5690,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -5994,7 +5727,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -6033,7 +5765,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -6072,7 +5803,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -6111,7 +5841,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -6150,7 +5879,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -6190,7 +5918,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -6230,7 +5957,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -6270,7 +5996,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -6311,7 +6036,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -6352,7 +6076,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -6393,7 +6116,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -6435,7 +6157,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -6477,7 +6198,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -6519,7 +6239,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -6562,7 +6281,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -6605,7 +6323,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll index 813208c534e31..a6100d9737010 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64x \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define void @test_vlseg2ff_dead_value(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_dead_value: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll index 7ddae4293c29f..1f763ce6b2474 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry @@ -41,9 +38,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -80,9 +74,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -119,9 +110,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -158,9 +146,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -197,9 +182,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -236,9 +218,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry @@ -276,9 +255,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -316,9 +292,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -356,9 +329,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -396,9 +366,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -436,9 +403,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry @@ -477,9 +441,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -518,9 +479,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -559,9 +517,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -600,9 +555,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -641,9 +593,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry @@ -683,9 +632,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -725,9 +671,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -767,9 +710,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -809,9 +749,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry @@ -852,9 +789,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -895,9 +829,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -938,9 +869,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -981,9 +909,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry @@ -1025,9 +950,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -1069,9 +991,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1113,9 +1032,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1157,9 +1073,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry @@ -1202,9 +1115,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -1247,9 +1157,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1292,9 +1199,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1337,8 +1241,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -1375,8 +1277,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -1413,8 +1313,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1451,8 +1349,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1489,8 +1385,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1527,8 +1421,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -1566,8 +1458,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1605,8 +1495,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1644,8 +1532,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1683,8 +1569,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -1723,8 +1607,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1763,8 +1645,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1803,8 +1683,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1843,8 +1721,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -1884,8 +1760,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1925,8 +1799,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1966,8 +1838,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -2008,8 +1878,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2050,8 +1918,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2092,8 +1958,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -2135,8 +1999,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2178,8 +2040,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2221,8 +2081,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -2265,8 +2123,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -2309,8 +2165,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2353,8 +2207,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2391,8 +2243,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2429,8 +2279,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2467,8 +2315,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2505,8 +2351,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2544,8 +2388,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2583,8 +2425,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2622,8 +2462,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2662,8 +2500,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2702,8 +2538,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2742,8 +2576,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2783,8 +2615,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2824,8 +2654,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2866,8 +2694,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2908,8 +2734,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2951,8 +2775,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2994,8 +2816,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3038,8 +2858,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3082,8 +2900,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3120,8 +2936,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3158,8 +2972,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3196,8 +3008,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3235,8 +3045,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3274,8 +3082,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3314,8 +3120,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3354,8 +3158,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3395,8 +3197,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3437,8 +3237,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3480,8 +3278,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3524,7 +3320,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -3561,7 +3356,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -3598,7 +3392,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3635,7 +3428,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3672,7 +3464,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3709,7 +3500,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -3747,7 +3537,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3785,7 +3574,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3823,7 +3611,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3861,7 +3648,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -3900,7 +3686,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3939,7 +3724,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3978,7 +3762,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -4017,7 +3800,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -4057,7 +3839,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -4097,7 +3878,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -4137,7 +3917,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -4178,7 +3957,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -4219,7 +3997,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -4260,7 +4037,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -4302,7 +4078,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -4344,7 +4119,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -4386,7 +4160,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -4429,7 +4202,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -4472,7 +4244,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -4515,7 +4286,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -4552,7 +4322,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -4589,7 +4358,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -4626,7 +4394,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -4663,7 +4430,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -4701,7 +4467,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -4739,7 +4504,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -4777,7 +4541,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -4816,7 +4579,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -4855,7 +4617,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -4894,7 +4655,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -4934,7 +4694,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -4974,7 +4733,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -5015,7 +4773,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -5056,7 +4813,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -5098,7 +4854,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -5140,7 +4895,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -5183,7 +4937,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -5226,7 +4979,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -5263,7 +5015,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -5300,7 +5051,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -5337,7 +5087,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -5375,7 +5124,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -5413,7 +5161,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -5452,7 +5199,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -5491,7 +5237,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -5531,7 +5276,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -5572,7 +5316,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -5614,7 +5357,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -5657,7 +5399,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -5694,7 +5435,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -5731,7 +5471,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -5768,7 +5507,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -5805,7 +5543,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -5842,7 +5579,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -5880,7 +5616,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -5918,7 +5653,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -5956,7 +5690,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -5994,7 +5727,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -6033,7 +5765,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -6072,7 +5803,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -6111,7 +5841,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -6150,7 +5879,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -6190,7 +5918,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -6230,7 +5957,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -6270,7 +5996,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -6311,7 +6036,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -6352,7 +6076,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -6393,7 +6116,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -6435,7 +6157,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -6477,7 +6198,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -6519,7 +6239,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -6562,7 +6281,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -6605,7 +6323,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll index 0fa51c56a9a86..3f6b65b99be91 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry @@ -29,9 +26,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -56,9 +50,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -83,9 +74,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -110,9 +98,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -137,9 +122,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -164,9 +146,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry @@ -191,9 +170,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -218,9 +194,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -245,9 +218,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -272,9 +242,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -299,9 +266,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry @@ -326,9 +290,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -353,9 +314,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -380,9 +338,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -407,9 +362,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -434,9 +386,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry @@ -461,9 +410,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -488,9 +434,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -515,9 +458,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -542,9 +482,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry @@ -569,9 +506,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -596,9 +530,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -623,9 +554,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -650,9 +578,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry @@ -677,9 +602,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -704,9 +626,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -731,9 +650,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -758,9 +674,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry @@ -785,9 +698,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -812,9 +722,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -839,9 +746,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -866,8 +770,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -892,8 +794,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -918,8 +818,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -944,8 +842,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -970,8 +866,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -996,8 +890,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -1022,8 +914,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1048,8 +938,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1074,8 +962,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1100,8 +986,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -1126,8 +1010,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1152,8 +1034,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1178,8 +1058,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1204,8 +1082,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -1230,8 +1106,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1256,8 +1130,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1282,8 +1154,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -1308,8 +1178,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1334,8 +1202,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1360,8 +1226,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -1386,8 +1250,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1412,8 +1274,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1438,8 +1298,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -1464,8 +1322,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1490,8 +1346,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1516,8 +1370,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -1542,8 +1394,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1568,8 +1418,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1594,8 +1442,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1620,8 +1466,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1646,8 +1490,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1672,8 +1514,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1698,8 +1538,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1724,8 +1562,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1750,8 +1586,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1776,8 +1610,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1802,8 +1634,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1828,8 +1658,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1854,8 +1682,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1880,8 +1706,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1906,8 +1730,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1932,8 +1754,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1958,8 +1778,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1984,8 +1802,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2010,8 +1826,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2036,8 +1850,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2062,8 +1874,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2088,8 +1898,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2114,8 +1922,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2140,8 +1946,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2166,8 +1970,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2192,8 +1994,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2218,8 +2018,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2244,8 +2042,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2270,7 +2066,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -2295,7 +2090,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2320,7 +2114,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2345,7 +2138,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2370,7 +2162,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2395,7 +2186,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -2420,7 +2210,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2445,7 +2234,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2470,7 +2258,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2495,7 +2282,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -2520,7 +2306,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2545,7 +2330,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2570,7 +2354,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2595,7 +2378,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -2620,7 +2402,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2645,7 +2426,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2670,7 +2450,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -2695,7 +2474,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2720,7 +2498,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2745,7 +2522,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -2770,7 +2546,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2795,7 +2570,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2820,7 +2594,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -2845,7 +2618,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -2870,7 +2642,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2895,7 +2666,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2920,7 +2690,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2945,7 +2714,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2970,7 +2738,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2995,7 +2762,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3020,7 +2786,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3045,7 +2810,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3070,7 +2834,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3095,7 +2858,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3120,7 +2882,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3145,7 +2906,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3170,7 +2930,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3195,7 +2954,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -3220,7 +2978,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3245,7 +3002,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3270,7 +3026,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3295,7 +3050,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3320,7 +3074,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3345,7 +3098,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3370,7 +3122,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3395,7 +3146,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3420,7 +3170,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3445,7 +3194,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3470,7 +3218,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3495,7 +3242,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3520,7 +3266,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3545,7 +3290,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3570,7 +3314,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3595,7 +3338,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3620,7 +3362,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -3645,7 +3386,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -3670,7 +3410,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3695,7 +3434,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3720,7 +3458,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3745,7 +3482,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -3770,7 +3506,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3795,7 +3530,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3820,7 +3554,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3845,7 +3578,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -3870,7 +3602,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3895,7 +3626,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3920,7 +3650,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3945,7 +3674,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -3970,7 +3698,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3995,7 +3722,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -4020,7 +3746,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -4045,7 +3770,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -4070,7 +3794,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -4095,7 +3818,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -4120,7 +3842,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -4145,7 +3866,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -4170,7 +3890,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -4195,7 +3914,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -4220,7 +3938,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll index d2cb825f9426c..aa4c3e40a04cb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry @@ -29,9 +26,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -56,9 +50,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -83,9 +74,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -110,9 +98,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -137,9 +122,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -164,9 +146,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry @@ -191,9 +170,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -218,9 +194,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -245,9 +218,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -272,9 +242,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -299,9 +266,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry @@ -326,9 +290,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -353,9 +314,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -380,9 +338,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -407,9 +362,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -434,9 +386,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry @@ -461,9 +410,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -488,9 +434,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -515,9 +458,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -542,9 +482,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry @@ -569,9 +506,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -596,9 +530,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -623,9 +554,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -650,9 +578,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry @@ -677,9 +602,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -704,9 +626,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -731,9 +650,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -758,9 +674,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry @@ -785,9 +698,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -812,9 +722,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -839,9 +746,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -866,8 +770,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -892,8 +794,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -918,8 +818,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -944,8 +842,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -970,8 +866,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -996,8 +890,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -1022,8 +914,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1048,8 +938,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1074,8 +962,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1100,8 +986,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -1126,8 +1010,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1152,8 +1034,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1178,8 +1058,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1204,8 +1082,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -1230,8 +1106,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1256,8 +1130,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1282,8 +1154,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -1308,8 +1178,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1334,8 +1202,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1360,8 +1226,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -1386,8 +1250,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1412,8 +1274,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1438,8 +1298,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -1464,8 +1322,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1490,8 +1346,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1516,8 +1370,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -1542,8 +1394,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1568,8 +1418,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1594,8 +1442,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1620,8 +1466,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1646,8 +1490,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1672,8 +1514,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1698,8 +1538,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1724,8 +1562,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1750,8 +1586,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1776,8 +1610,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1802,8 +1634,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1828,8 +1658,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1854,8 +1682,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1880,8 +1706,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1906,8 +1730,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1932,8 +1754,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1958,8 +1778,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1984,8 +1802,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2010,8 +1826,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2036,8 +1850,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2062,8 +1874,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2088,8 +1898,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2114,8 +1922,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2140,8 +1946,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2166,8 +1970,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2192,8 +1994,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2218,8 +2018,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2244,8 +2042,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2270,7 +2066,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -2295,7 +2090,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2320,7 +2114,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2345,7 +2138,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2370,7 +2162,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2395,7 +2186,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -2420,7 +2210,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2445,7 +2234,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2470,7 +2258,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2495,7 +2282,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -2520,7 +2306,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2545,7 +2330,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2570,7 +2354,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2595,7 +2378,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -2620,7 +2402,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2645,7 +2426,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2670,7 +2450,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -2695,7 +2474,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2720,7 +2498,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2745,7 +2522,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -2770,7 +2546,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2795,7 +2570,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2820,7 +2594,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -2845,7 +2618,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -2870,7 +2642,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2895,7 +2666,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2920,7 +2690,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2945,7 +2714,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2970,7 +2738,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2995,7 +2762,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3020,7 +2786,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3045,7 +2810,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3070,7 +2834,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3095,7 +2858,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3120,7 +2882,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3145,7 +2906,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3170,7 +2930,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3195,7 +2954,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -3220,7 +2978,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3245,7 +3002,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3270,7 +3026,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3295,7 +3050,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3320,7 +3074,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3345,7 +3098,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3370,7 +3122,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3395,7 +3146,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3420,7 +3170,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3445,7 +3194,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3470,7 +3218,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3495,7 +3242,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3520,7 +3266,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3545,7 +3290,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3570,7 +3314,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3595,7 +3338,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3620,7 +3362,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -3645,7 +3386,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -3670,7 +3410,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3695,7 +3434,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3720,7 +3458,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3745,7 +3482,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -3770,7 +3506,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3795,7 +3530,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3820,7 +3554,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3845,7 +3578,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -3870,7 +3602,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3895,7 +3626,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3920,7 +3650,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3945,7 +3674,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -3970,7 +3698,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3995,7 +3722,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -4020,7 +3746,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -4045,7 +3770,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -4070,7 +3794,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -4095,7 +3818,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -4120,7 +3842,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -4145,7 +3866,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -4170,7 +3890,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -4195,7 +3914,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -4220,7 +3938,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll index 474b24c15db80..720e9759e52ac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll @@ -4,12 +4,6 @@ ; The intrinsics are not supported with RV32. -declare @llvm.riscv.vluxei.nxv1i8.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i8.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i8.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i8.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i32.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i32.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -507,14 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -532,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i32.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -555,14 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -580,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i64.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -602,14 +428,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -627,12 +445,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i64.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -649,14 +461,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -674,12 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i64.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -696,14 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -721,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i64.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -743,14 +527,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -768,12 +544,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -791,14 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -816,12 +578,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -839,14 +595,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,12 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -887,14 +629,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -912,12 +646,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -935,14 +663,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -960,12 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1bf16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1bf16_nxv1bf16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1bf16_nxv1bf16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -983,14 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1bf16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1008,12 +714,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2bf16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2bf16_nxv2bf16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2bf16_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1031,14 +731,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2bf16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1056,12 +748,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4bf16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4bf16_nxv4bf16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4bf16_nxv4bf16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1079,14 +765,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4bf16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8bf16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8bf16_nxv8bf16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8bf16_nxv8bf16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1127,14 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8bf16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1152,12 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f32.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1175,14 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1200,12 +850,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f32.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1223,14 +867,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1248,12 +884,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f32.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1271,14 +901,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1296,12 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f32.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1319,14 +935,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1344,12 +952,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f64.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1366,14 +968,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1391,12 +985,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f64.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1413,14 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1438,12 +1018,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f64.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1460,14 +1034,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1485,12 +1051,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f64.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1507,14 +1067,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei.ll index 520b75f30d140..2360cc1f9dd4c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vluxei.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vluxei.nxv1i8.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i8.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i8.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i8.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i8.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i32.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -506,14 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -531,12 +377,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i32.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -553,14 +393,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,12 +410,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -600,14 +426,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -625,12 +443,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i32.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -647,14 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -672,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i32.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -694,14 +492,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -719,12 +509,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i64.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -742,14 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -767,12 +543,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i64.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -790,14 +560,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -815,12 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i64.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -838,14 +594,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -863,12 +611,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i64.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -886,14 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -911,12 +645,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -934,14 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -959,12 +679,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -982,14 +696,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1007,12 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1030,14 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1055,12 +747,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1078,14 +764,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1103,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1126,14 +798,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1151,12 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1bf16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1bf16_nxv1bf16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1bf16_nxv1bf16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1174,14 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1bf16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1199,12 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2bf16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2bf16_nxv2bf16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2bf16_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1222,14 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2bf16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1247,12 +883,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4bf16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4bf16_nxv4bf16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4bf16_nxv4bf16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1270,14 +900,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4bf16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1295,12 +917,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8bf16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8bf16_nxv8bf16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8bf16_nxv8bf16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1318,14 +934,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8bf16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1343,12 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16bf16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16bf16_nxv16bf16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16bf16_nxv16bf16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1366,14 +968,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16bf16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1391,12 +985,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f32.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1413,14 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1438,12 +1018,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f32.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1460,14 +1034,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1485,12 +1051,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f32.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1507,14 +1067,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1532,12 +1084,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f32.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1554,14 +1100,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1579,12 +1117,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f32.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1601,14 +1133,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1626,12 +1150,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f64.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1649,14 +1167,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1674,12 +1184,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f64.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1697,14 +1201,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1722,12 +1218,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f64.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1745,14 +1235,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1770,12 +1252,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f64.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1793,14 +1269,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1818,12 +1286,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i8.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1841,14 +1303,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1866,12 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i8.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1889,14 +1337,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1914,12 +1354,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i8.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1937,14 +1371,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1962,12 +1388,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i8.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1985,14 +1405,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2010,12 +1422,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i8.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2033,14 +1439,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2058,12 +1456,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32i8.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2081,14 +1473,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2106,12 +1490,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i16.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2128,14 +1506,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2153,12 +1523,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i16.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2175,14 +1539,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2200,12 +1556,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i16.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2222,14 +1572,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2247,12 +1589,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i16.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2269,14 +1605,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2294,12 +1622,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i16.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2316,14 +1638,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2341,12 +1655,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32i16.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2363,14 +1671,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2388,12 +1688,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i32.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2411,14 +1705,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2436,12 +1722,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i32.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2459,14 +1739,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2484,12 +1756,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2507,14 +1773,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2532,12 +1790,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i32.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2555,14 +1807,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2580,12 +1824,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i32.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2603,14 +1841,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2628,12 +1858,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i64.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2651,14 +1875,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2676,12 +1892,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i64.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2699,14 +1909,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2724,12 +1926,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i64.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2747,14 +1943,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2772,12 +1960,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i64.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2795,14 +1977,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2820,12 +1994,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f16.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2842,14 +2010,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2867,12 +2027,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f16.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2889,14 +2043,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2914,12 +2060,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f16.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2936,14 +2076,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2961,12 +2093,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f16.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2983,14 +2109,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3008,12 +2126,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f16.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3030,14 +2142,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3055,12 +2159,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32f16.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -3077,14 +2175,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -3102,12 +2192,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f32.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3125,14 +2209,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3150,12 +2226,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f32.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3173,14 +2243,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3198,12 +2260,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f32.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3221,14 +2277,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3246,12 +2294,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f32.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3269,14 +2311,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3294,12 +2328,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f32.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3317,14 +2345,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3342,12 +2362,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f64.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3365,14 +2379,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3390,12 +2396,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f64.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3413,14 +2413,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3438,12 +2430,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f64.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3461,14 +2447,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3486,12 +2464,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f64.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3509,14 +2481,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3534,12 +2498,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i8.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3556,14 +2514,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3581,12 +2531,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i8.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3603,14 +2547,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3628,12 +2564,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i8.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3650,14 +2580,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3675,12 +2597,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i8.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3697,14 +2613,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3722,12 +2630,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i8.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3744,14 +2646,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3769,12 +2663,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32i8.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3791,14 +2679,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3816,12 +2696,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv64i8.nxv64i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3838,14 +2712,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3863,12 +2729,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i16.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3886,14 +2746,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3911,12 +2763,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i16.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3934,14 +2780,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3959,12 +2797,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i16.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3982,14 +2814,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4007,12 +2831,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i16.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4030,14 +2848,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4055,12 +2865,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i16.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4078,14 +2882,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4103,12 +2899,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32i16.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4126,14 +2916,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4151,12 +2933,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i32.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4174,14 +2950,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4199,12 +2967,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i32.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4222,14 +2984,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4247,12 +3001,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4270,14 +3018,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4295,12 +3035,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i32.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4318,14 +3052,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4343,12 +3069,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i32.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4366,14 +3086,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4391,12 +3103,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i64.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4414,14 +3120,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4439,12 +3137,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i64.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4462,14 +3154,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4487,12 +3171,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i64.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4510,14 +3188,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4535,12 +3205,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i64.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4558,14 +3222,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4583,12 +3239,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f16.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4606,14 +3256,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4631,12 +3273,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f16.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4654,14 +3290,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4679,12 +3307,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f16.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4702,14 +3324,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4727,12 +3341,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f16.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4750,14 +3358,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4775,12 +3375,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f16.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4798,14 +3392,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4823,12 +3409,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32f16.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4846,14 +3426,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4871,12 +3443,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f32.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4894,14 +3460,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4919,12 +3477,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f32.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4942,14 +3494,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4967,12 +3511,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f32.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4990,14 +3528,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5015,12 +3545,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f32.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5038,14 +3562,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5063,12 +3579,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f32.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -5086,14 +3596,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -5111,12 +3613,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f64.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5134,14 +3630,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5159,12 +3647,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f64.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5182,14 +3664,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5207,12 +3681,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f64.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5230,14 +3698,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5255,12 +3715,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f64.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5278,14 +3732,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll index 0c9aa28d3b137..77572b597ccf5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -31,9 +28,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -60,9 +54,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -89,9 +80,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -118,9 +106,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -147,9 +132,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -176,9 +158,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -205,9 +184,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -234,9 +210,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -263,9 +236,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -292,9 +262,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -321,9 +288,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -350,9 +314,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -379,9 +340,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -408,9 +366,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -437,9 +392,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -466,9 +418,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -495,9 +444,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -524,9 +470,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -553,9 +496,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -582,9 +522,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -611,9 +548,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -640,9 +574,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -669,9 +600,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -698,9 +626,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -727,9 +652,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -756,9 +678,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -785,9 +704,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -814,9 +730,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -843,9 +756,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -872,9 +782,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -901,9 +808,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -930,9 +834,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -959,9 +860,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -988,9 +886,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1017,9 +912,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1046,9 +938,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1075,9 +964,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1104,9 +990,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1133,9 +1016,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1162,9 +1042,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1191,9 +1068,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1220,9 +1094,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1249,9 +1120,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1278,9 +1146,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1307,9 +1172,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1336,9 +1198,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1365,9 +1224,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1394,9 +1250,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1423,9 +1276,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1452,9 +1302,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1481,9 +1328,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1510,9 +1354,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1539,9 +1380,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1568,9 +1406,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1597,9 +1432,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1626,9 +1458,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1655,9 +1484,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1684,9 +1510,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1713,9 +1536,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1742,9 +1562,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1771,9 +1588,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1800,9 +1614,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1829,9 +1640,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1858,9 +1666,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1887,9 +1692,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1916,9 +1718,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1945,9 +1744,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1974,9 +1770,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2003,9 +1796,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2032,9 +1822,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2061,9 +1848,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2090,9 +1874,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2119,9 +1900,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2148,9 +1926,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2177,9 +1952,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2206,9 +1978,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2235,9 +2004,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2264,9 +2030,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2293,9 +2056,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2322,9 +2082,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2351,9 +2108,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2380,9 +2134,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2409,9 +2160,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2438,9 +2186,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2467,9 +2212,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2496,9 +2238,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2525,9 +2264,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2554,9 +2290,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2583,9 +2316,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2612,9 +2342,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2641,9 +2368,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2670,9 +2394,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2699,9 +2420,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2728,9 +2446,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2757,9 +2472,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2786,9 +2498,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2815,9 +2524,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2844,9 +2550,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2873,9 +2576,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2902,9 +2602,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2931,9 +2628,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2960,9 +2654,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2989,9 +2680,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3018,9 +2706,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3047,9 +2732,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3076,9 +2758,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3105,9 +2784,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3134,9 +2810,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3163,9 +2836,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -3192,9 +2862,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3221,9 +2888,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3250,9 +2914,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3279,9 +2940,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3308,9 +2966,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3337,9 +2992,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3366,9 +3018,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3395,9 +3044,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3424,9 +3070,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3453,9 +3096,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3482,9 +3122,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3511,9 +3148,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3540,9 +3174,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3569,9 +3200,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3598,9 +3226,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3627,9 +3252,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3656,9 +3278,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3685,9 +3304,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3714,9 +3330,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3743,9 +3356,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3772,9 +3382,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3801,9 +3408,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3830,9 +3434,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3859,9 +3460,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3888,9 +3486,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3917,9 +3512,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3946,9 +3538,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3975,9 +3564,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4004,9 +3590,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4033,9 +3616,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4062,9 +3642,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4091,9 +3668,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4120,9 +3694,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4149,9 +3720,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4178,9 +3746,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4207,9 +3772,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4236,9 +3798,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4265,9 +3824,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4294,9 +3850,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4323,9 +3876,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4352,9 +3902,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4381,9 +3928,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4410,9 +3954,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4439,9 +3980,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4468,9 +4006,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4497,9 +4032,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4526,9 +4058,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4555,9 +4084,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4584,9 +4110,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4613,9 +4136,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4642,9 +4162,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4671,9 +4188,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4700,9 +4214,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4729,9 +4240,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4758,9 +4266,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4787,9 +4292,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4816,9 +4318,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4845,9 +4344,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4874,9 +4370,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4903,9 +4396,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4932,9 +4422,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4961,9 +4448,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4990,9 +4474,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5019,9 +4500,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5048,9 +4526,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5077,9 +4552,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5106,9 +4578,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5135,9 +4604,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5164,9 +4630,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5193,9 +4656,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5222,9 +4682,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -5251,9 +4708,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -5280,9 +4734,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5309,9 +4760,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5338,9 +4786,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5367,9 +4812,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5396,9 +4838,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5425,9 +4864,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5454,9 +4890,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5483,9 +4916,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5512,9 +4942,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5541,9 +4968,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5570,9 +4994,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5599,9 +5020,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5628,9 +5046,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5657,9 +5072,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5686,9 +5098,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5715,9 +5124,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5744,9 +5150,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5773,9 +5176,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5802,9 +5202,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5831,9 +5228,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5860,9 +5254,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5889,9 +5280,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5918,9 +5306,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5947,9 +5332,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5976,9 +5358,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6005,9 +5384,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6034,9 +5410,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6063,9 +5436,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6092,9 +5462,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6121,9 +5488,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6150,9 +5514,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6179,9 +5540,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6208,9 +5566,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6237,9 +5592,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6266,9 +5618,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6295,9 +5644,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6324,9 +5670,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6353,9 +5696,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6382,9 +5722,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6411,9 +5748,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6440,9 +5774,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6469,9 +5800,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6498,9 +5826,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6527,9 +5852,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6556,9 +5878,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6585,9 +5904,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6614,9 +5930,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6643,9 +5956,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6672,9 +5982,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6701,9 +6008,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6730,9 +6034,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6759,9 +6060,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6788,9 +6086,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6817,9 +6112,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6846,9 +6138,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6875,9 +6164,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6904,9 +6190,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6933,9 +6216,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6962,9 +6242,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6991,9 +6268,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7020,9 +6294,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7049,9 +6320,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7078,9 +6346,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7107,9 +6372,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7136,9 +6398,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7165,9 +6424,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7194,9 +6450,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7223,9 +6476,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7252,9 +6502,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7281,9 +6528,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7310,9 +6554,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7339,9 +6580,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7368,9 +6606,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7397,9 +6632,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7426,9 +6658,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7455,7 +6684,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7482,7 +6710,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7509,7 +6736,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7536,7 +6762,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7563,7 +6788,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7590,7 +6814,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7617,7 +6840,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7644,7 +6866,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7671,7 +6892,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7698,7 +6918,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -7725,7 +6944,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -7752,7 +6970,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -7779,7 +6996,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -7806,7 +7022,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -7833,7 +7048,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -7860,7 +7074,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7887,7 +7100,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7914,7 +7126,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7941,7 +7152,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7968,7 +7178,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7995,7 +7204,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8022,7 +7230,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8049,7 +7256,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8076,7 +7282,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8103,7 +7308,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -8130,7 +7334,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -8157,7 +7360,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -8184,7 +7386,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8211,7 +7412,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8238,7 +7438,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8265,7 +7464,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8292,7 +7490,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8319,7 +7516,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8346,7 +7542,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8373,7 +7568,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8400,7 +7594,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8427,7 +7620,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -8454,7 +7646,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -8481,7 +7672,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -8508,7 +7698,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8535,7 +7724,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8562,7 +7750,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8589,7 +7776,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8616,7 +7802,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8643,7 +7828,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8670,7 +7854,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8697,7 +7880,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8724,7 +7906,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8751,7 +7932,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8778,7 +7958,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8805,7 +7984,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8832,7 +8010,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8859,7 +8036,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8886,7 +8062,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8913,7 +8088,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8940,7 +8114,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8967,7 +8140,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8994,7 +8166,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9021,7 +8192,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9048,7 +8218,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9075,7 +8244,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9102,7 +8270,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9129,7 +8296,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9156,7 +8322,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9183,7 +8348,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9210,7 +8374,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9237,7 +8400,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9264,7 +8426,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9291,7 +8452,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9318,7 +8478,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9345,7 +8504,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9372,7 +8530,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9399,7 +8556,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9426,7 +8582,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9453,7 +8608,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9480,7 +8634,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9507,7 +8660,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9534,7 +8686,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9561,7 +8712,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9588,7 +8738,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9615,7 +8764,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9642,7 +8790,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9669,7 +8816,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9696,7 +8842,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9723,7 +8868,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -9750,7 +8894,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -9777,7 +8920,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -9804,7 +8946,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9831,7 +8972,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9858,7 +8998,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9885,7 +9024,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9912,7 +9050,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9939,7 +9076,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9966,7 +9102,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9993,7 +9128,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10020,7 +9154,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10047,7 +9180,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10074,7 +9206,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10101,7 +9232,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10128,7 +9258,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10155,7 +9284,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10182,7 +9310,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10209,7 +9336,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10236,7 +9362,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10263,7 +9388,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10290,7 +9414,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10317,7 +9440,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10344,7 +9466,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10371,7 +9492,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10398,7 +9518,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10425,7 +9544,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10452,7 +9570,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10479,7 +9596,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10506,7 +9622,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10533,7 +9648,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10560,7 +9674,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10587,7 +9700,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10614,7 +9726,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10641,7 +9752,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10668,7 +9778,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10695,7 +9804,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10722,7 +9830,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10749,7 +9856,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10776,7 +9882,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10803,7 +9908,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10830,7 +9934,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10857,7 +9960,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10884,7 +9986,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10911,7 +10012,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10938,7 +10038,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10965,7 +10064,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10992,7 +10090,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11019,7 +10116,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11046,7 +10142,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11073,7 +10168,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11100,7 +10194,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11127,7 +10220,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11154,7 +10246,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11181,7 +10272,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11208,7 +10298,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11235,7 +10324,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11262,7 +10350,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11289,7 +10376,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11316,7 +10402,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11343,7 +10428,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11370,7 +10454,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11397,7 +10480,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11424,7 +10506,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11451,7 +10532,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11478,7 +10558,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11505,7 +10584,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11532,7 +10610,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11559,7 +10636,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11586,7 +10662,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11613,7 +10688,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11640,7 +10714,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11667,7 +10740,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11694,7 +10766,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11721,7 +10792,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11748,7 +10818,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11775,7 +10844,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11802,7 +10870,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11829,7 +10896,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11856,7 +10922,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11883,7 +10948,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11910,7 +10974,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11937,7 +11000,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11964,7 +11026,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11991,7 +11052,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12018,7 +11078,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12045,7 +11104,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12072,7 +11130,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -12099,7 +11156,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -12126,7 +11182,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -12153,7 +11208,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -12180,7 +11234,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -12207,7 +11260,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -12234,7 +11286,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12261,7 +11312,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12288,7 +11338,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12315,7 +11364,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12342,7 +11390,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12369,7 +11416,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12396,7 +11442,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12423,7 +11468,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12450,7 +11494,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12477,7 +11520,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -12504,7 +11546,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -12531,7 +11572,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -12558,7 +11598,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12585,7 +11624,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12612,7 +11650,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12639,7 +11676,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12666,7 +11702,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12693,7 +11728,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12720,7 +11754,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12747,7 +11780,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12774,7 +11806,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12801,7 +11832,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -12828,7 +11858,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -12855,7 +11884,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -12882,7 +11910,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12909,7 +11936,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12936,7 +11962,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12963,7 +11988,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12990,7 +12014,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13017,7 +12040,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13044,7 +12066,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13071,7 +12092,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13098,7 +12118,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13125,7 +12144,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13152,7 +12170,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13179,7 +12196,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13206,7 +12222,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13233,7 +12248,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13260,7 +12274,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13287,7 +12300,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13314,7 +12326,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13341,7 +12352,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13368,7 +12378,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13395,7 +12404,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13422,7 +12430,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13449,7 +12456,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13476,7 +12482,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13503,7 +12508,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13530,7 +12534,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13557,7 +12560,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13584,7 +12586,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13611,7 +12612,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13638,7 +12638,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13665,7 +12664,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13692,7 +12690,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13719,7 +12716,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13746,7 +12742,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13773,7 +12768,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13800,7 +12794,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13827,7 +12820,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll index cfe5ab2b07e64..0e43923294137 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -44,9 +41,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -73,9 +67,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -102,9 +93,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -131,9 +119,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -160,9 +145,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -189,9 +171,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -218,9 +197,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -247,9 +223,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -276,9 +249,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -305,9 +275,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -334,9 +301,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -363,9 +327,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -392,9 +353,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -421,9 +379,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -450,9 +405,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -479,9 +431,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -508,9 +457,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -537,9 +483,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -566,9 +509,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -595,9 +535,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -624,9 +561,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -666,9 +600,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -695,9 +626,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -724,9 +652,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -753,9 +678,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -782,9 +704,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -811,9 +730,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -840,9 +756,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -869,9 +782,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -898,9 +808,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -927,9 +834,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -956,9 +860,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -985,9 +886,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1014,9 +912,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1043,9 +938,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1072,9 +964,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1101,9 +990,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1130,9 +1016,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1159,9 +1042,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1188,9 +1068,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1230,9 +1107,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1259,9 +1133,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1288,9 +1159,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1317,9 +1185,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1346,9 +1211,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1375,9 +1237,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1404,9 +1263,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1433,9 +1289,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1462,9 +1315,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1491,9 +1341,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1520,9 +1367,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1549,9 +1393,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1578,9 +1419,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1607,9 +1445,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1636,9 +1471,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1665,9 +1497,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1694,9 +1523,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1723,9 +1549,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1752,9 +1575,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1794,9 +1614,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1823,9 +1640,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1852,9 +1666,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1881,9 +1692,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1910,9 +1718,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1939,9 +1744,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1968,9 +1770,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1997,9 +1796,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2026,9 +1822,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2055,9 +1848,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2084,9 +1874,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2113,9 +1900,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2142,9 +1926,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2171,9 +1952,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2200,9 +1978,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2229,9 +2004,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2271,9 +2043,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2300,9 +2069,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2329,9 +2095,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2358,9 +2121,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2387,9 +2147,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2416,9 +2173,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2445,9 +2199,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2474,9 +2225,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2503,9 +2251,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2532,9 +2277,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2561,9 +2303,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2590,9 +2329,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2619,9 +2355,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2648,9 +2381,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2677,9 +2407,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2706,9 +2433,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2748,9 +2472,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2777,9 +2498,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2806,9 +2524,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2835,9 +2550,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2864,9 +2576,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2893,9 +2602,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2922,9 +2628,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2951,9 +2654,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2980,9 +2680,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3009,9 +2706,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3038,9 +2732,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3067,9 +2758,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3096,9 +2784,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3125,9 +2810,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3154,9 +2836,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3183,9 +2862,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3225,9 +2901,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3254,9 +2927,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3283,9 +2953,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -3312,9 +2979,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3341,9 +3005,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3370,9 +3031,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3399,9 +3057,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -3428,9 +3083,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3457,9 +3109,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3486,9 +3135,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3515,9 +3161,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3544,9 +3187,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3573,9 +3213,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3602,9 +3239,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3631,9 +3265,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3660,9 +3291,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3689,9 +3317,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3718,9 +3343,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3747,9 +3369,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -3776,9 +3395,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3805,9 +3421,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3834,9 +3447,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3863,9 +3473,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -3892,9 +3499,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3921,9 +3525,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3950,9 +3551,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3979,9 +3577,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4008,9 +3603,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4037,9 +3629,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4066,9 +3655,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4095,9 +3681,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -4124,9 +3707,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4153,9 +3733,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -4182,9 +3759,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -4211,9 +3785,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4240,9 +3811,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4269,9 +3837,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4298,9 +3863,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4327,9 +3889,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4356,9 +3915,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4385,9 +3941,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4414,9 +3967,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4443,9 +3993,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4472,9 +4019,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4501,9 +4045,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4530,9 +4071,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4559,9 +4097,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4588,9 +4123,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4617,9 +4149,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4646,9 +4175,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -4675,9 +4201,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4704,9 +4227,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4733,9 +4253,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4762,9 +4279,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4791,9 +4305,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4820,9 +4331,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4849,9 +4357,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4878,9 +4383,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4907,9 +4409,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4936,9 +4435,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4965,9 +4461,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4994,9 +4487,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5023,9 +4513,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5052,9 +4539,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -5081,9 +4565,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -5110,9 +4591,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -5139,9 +4617,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5168,9 +4643,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5197,9 +4669,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5226,9 +4695,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5255,9 +4721,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5284,9 +4747,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5313,9 +4773,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5342,9 +4799,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5371,9 +4825,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5400,9 +4851,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5429,9 +4877,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5458,9 +4903,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5487,9 +4929,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5516,9 +4955,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5545,9 +4981,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5574,9 +5007,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5603,9 +5033,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5632,9 +5059,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5661,9 +5085,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5690,9 +5111,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5719,9 +5137,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5748,9 +5163,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5777,9 +5189,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5806,9 +5215,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5835,9 +5241,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5864,9 +5267,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5893,9 +5293,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5922,9 +5319,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5951,9 +5345,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5980,9 +5371,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6009,9 +5397,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6038,9 +5423,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6067,9 +5449,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6096,9 +5475,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6125,9 +5501,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6154,9 +5527,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6183,9 +5553,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6212,9 +5579,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6241,9 +5605,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6270,9 +5631,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6299,9 +5657,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6328,9 +5683,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6357,9 +5709,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6386,9 +5735,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6415,9 +5761,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6444,9 +5787,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6473,9 +5813,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6502,9 +5839,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6531,9 +5865,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6560,9 +5891,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6589,9 +5917,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6618,9 +5943,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6647,9 +5969,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6676,9 +5995,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6705,9 +6021,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6734,9 +6047,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6763,9 +6073,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6792,9 +6099,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6821,9 +6125,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6850,9 +6151,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6879,9 +6177,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -6908,9 +6203,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -6937,9 +6229,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -6966,9 +6255,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -6995,9 +6281,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7024,9 +6307,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7053,9 +6333,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7082,9 +6359,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7111,9 +6385,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7140,9 +6411,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7169,9 +6437,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7198,9 +6463,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7227,9 +6489,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7256,9 +6515,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7285,9 +6541,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7314,9 +6567,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -7343,9 +6593,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7372,9 +6619,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7401,9 +6645,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7430,9 +6671,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7459,9 +6697,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7488,9 +6723,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7517,9 +6749,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7546,9 +6775,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7575,9 +6801,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7604,9 +6827,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7633,9 +6853,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7662,9 +6879,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -7691,9 +6905,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7720,9 +6931,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7749,9 +6957,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7778,9 +6983,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7807,9 +7009,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7836,9 +7035,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7865,9 +7061,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7894,9 +7087,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7923,9 +7113,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7952,9 +7139,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7981,9 +7165,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8010,9 +7191,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8039,9 +7217,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8068,9 +7243,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8097,9 +7269,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8126,9 +7295,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8155,9 +7321,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8184,9 +7347,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8213,9 +7373,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8242,9 +7399,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8271,9 +7425,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8300,9 +7451,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8329,9 +7477,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8358,9 +7503,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8387,9 +7529,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8416,9 +7555,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8445,9 +7581,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8474,9 +7607,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8503,9 +7633,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8532,9 +7659,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8561,9 +7685,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8590,9 +7711,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8619,9 +7737,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8648,9 +7763,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8677,9 +7789,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8706,9 +7815,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8735,9 +7841,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8764,9 +7867,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8793,9 +7893,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8822,9 +7919,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8851,9 +7945,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8880,9 +7971,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8909,9 +7997,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8938,9 +8023,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -8967,9 +8049,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8996,9 +8075,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9025,9 +8101,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9054,9 +8127,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9083,9 +8153,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9112,9 +8179,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9141,9 +8205,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9170,9 +8231,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9199,9 +8257,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9228,9 +8283,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9257,9 +8309,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9286,9 +8335,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9315,9 +8361,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9344,9 +8387,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9373,9 +8413,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9402,9 +8439,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9431,9 +8465,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9460,9 +8491,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9489,9 +8517,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9518,9 +8543,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9547,9 +8569,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9576,9 +8595,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9605,9 +8621,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9634,9 +8647,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9663,9 +8673,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9692,9 +8699,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9721,9 +8725,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9750,9 +8751,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9779,9 +8777,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9808,9 +8803,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9837,9 +8829,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9866,9 +8855,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9895,7 +8881,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9922,7 +8907,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9949,7 +8933,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9976,7 +8959,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10003,7 +8985,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10030,7 +9011,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10057,7 +9037,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10084,7 +9063,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10111,7 +9089,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10138,7 +9115,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10165,7 +9141,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10192,7 +9167,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10219,7 +9193,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -10246,7 +9219,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -10273,7 +9245,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -10300,7 +9271,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -10327,7 +9297,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -10354,7 +9323,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -10381,7 +9349,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -10408,7 +9375,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10435,7 +9401,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10462,7 +9427,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10489,7 +9453,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10516,7 +9479,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10543,7 +9505,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10570,7 +9531,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10597,7 +9557,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10624,7 +9583,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10651,7 +9609,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10678,7 +9635,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10705,7 +9661,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10732,7 +9687,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -10759,7 +9713,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -10786,7 +9739,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -10813,7 +9765,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -10840,7 +9791,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10867,7 +9817,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10894,7 +9843,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10921,7 +9869,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10948,7 +9895,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10975,7 +9921,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11002,7 +9947,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11029,7 +9973,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11056,7 +9999,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11083,7 +10025,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11110,7 +10051,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11137,7 +10077,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11164,7 +10103,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -11191,7 +10129,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -11218,7 +10155,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -11245,7 +10181,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -11272,7 +10207,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11299,7 +10233,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11326,7 +10259,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11353,7 +10285,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11380,7 +10311,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11407,7 +10337,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11434,7 +10363,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11461,7 +10389,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11488,7 +10415,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11515,7 +10441,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11542,7 +10467,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11569,7 +10493,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11596,7 +10519,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11623,7 +10545,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11650,7 +10571,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11677,7 +10597,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11704,7 +10623,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11731,7 +10649,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11758,7 +10675,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11785,7 +10701,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11812,7 +10727,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11839,7 +10753,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11866,7 +10779,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11893,7 +10805,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11920,7 +10831,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11947,7 +10857,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11974,7 +10883,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12001,7 +10909,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12028,7 +10935,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12055,7 +10961,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12082,7 +10987,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12109,7 +11013,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12136,7 +11039,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12163,7 +11065,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12190,7 +11091,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12217,7 +11117,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -12244,7 +11143,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12271,7 +11169,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12298,7 +11195,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12325,7 +11221,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12352,7 +11247,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12379,7 +11273,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12406,7 +11299,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12433,7 +11325,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12460,7 +11351,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12487,7 +11377,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12514,7 +11403,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12541,7 +11429,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -12568,7 +11455,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12595,7 +11481,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12622,7 +11507,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12649,7 +11533,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12676,7 +11559,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12703,7 +11585,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12730,7 +11611,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12757,7 +11637,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12784,7 +11663,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12811,7 +11689,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12838,7 +11715,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12865,7 +11741,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -12892,7 +11767,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -12919,7 +11793,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -12946,7 +11819,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -12973,7 +11845,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -13000,7 +11871,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13027,7 +11897,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13054,7 +11923,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13081,7 +11949,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13108,7 +11975,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13135,7 +12001,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13162,7 +12027,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13189,7 +12053,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13216,7 +12079,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13243,7 +12105,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13270,7 +12131,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13297,7 +12157,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -13324,7 +12183,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13351,7 +12209,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13378,7 +12235,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13405,7 +12261,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13432,7 +12287,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13459,7 +12313,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13486,7 +12339,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13513,7 +12365,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13540,7 +12391,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13567,7 +12417,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13594,7 +12443,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13621,7 +12469,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -13648,7 +12495,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13675,7 +12521,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13702,7 +12547,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13729,7 +12573,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13756,7 +12599,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13783,7 +12625,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13810,7 +12651,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13837,7 +12677,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13864,7 +12703,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13891,7 +12729,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13918,7 +12755,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13945,7 +12781,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13972,7 +12807,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13999,7 +12833,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14026,7 +12859,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14053,7 +12885,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14080,7 +12911,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14107,7 +12937,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14134,7 +12963,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14161,7 +12989,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14188,7 +13015,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14215,7 +13041,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14242,7 +13067,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14269,7 +13093,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14296,7 +13119,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14323,7 +13145,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14350,7 +13171,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14377,7 +13197,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14404,7 +13223,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14431,7 +13249,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14458,7 +13275,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14485,7 +13301,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14512,7 +13327,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14539,7 +13353,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14566,7 +13379,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14593,7 +13405,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14620,7 +13431,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14647,7 +13457,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14674,7 +13483,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14701,7 +13509,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14728,7 +13535,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -14755,7 +13561,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -14782,7 +13587,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -14809,7 +13613,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -14836,7 +13639,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14863,7 +13665,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14890,7 +13691,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14917,7 +13717,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14944,7 +13743,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14971,7 +13769,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14998,7 +13795,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15025,7 +13821,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15052,7 +13847,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15079,7 +13873,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15106,7 +13899,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15133,7 +13925,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15160,7 +13951,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15187,7 +13977,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15214,7 +14003,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15241,7 +14029,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15268,7 +14055,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15295,7 +14081,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15322,7 +14107,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15349,7 +14133,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15376,7 +14159,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15403,7 +14185,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15430,7 +14211,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15457,7 +14237,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15484,7 +14263,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15511,7 +14289,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15538,7 +14315,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15565,7 +14341,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15592,7 +14367,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15619,7 +14393,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15646,7 +14419,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15673,7 +14445,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15700,7 +14471,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15727,7 +14497,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15754,7 +14523,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15781,7 +14549,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15808,7 +14575,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15835,7 +14601,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15862,7 +14627,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15889,7 +14653,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15916,7 +14679,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -15943,7 +14705,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -15970,7 +14731,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -15997,7 +14757,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -16024,7 +14783,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -16051,7 +14809,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -16078,7 +14835,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -16105,7 +14861,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -16132,7 +14887,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -16159,7 +14913,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -16186,7 +14939,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -16213,7 +14965,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -16240,7 +14991,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -16267,7 +15017,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -16294,7 +15043,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -16321,7 +15069,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -16348,7 +15095,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -16375,7 +15121,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -16402,7 +15147,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -16429,7 +15173,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -16456,7 +15199,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -16483,7 +15225,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -16510,7 +15251,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -16537,7 +15277,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -16564,7 +15303,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -16591,7 +15329,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -16618,7 +15355,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -16645,7 +15381,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -16672,7 +15407,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -16699,7 +15433,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -16726,7 +15459,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -16753,7 +15485,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -16780,7 +15511,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -16807,7 +15537,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -16834,7 +15563,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -16861,7 +15589,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -16888,7 +15615,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -16915,7 +15641,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -16942,7 +15667,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -16969,7 +15693,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -16996,7 +15719,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -17023,7 +15745,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -17050,7 +15771,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -17077,7 +15797,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17104,7 +15823,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -17131,7 +15849,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -17158,7 +15875,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -17185,7 +15901,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -17212,7 +15927,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -17239,7 +15953,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -17266,7 +15979,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -17293,7 +16005,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -17320,7 +16031,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -17347,7 +16057,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -17374,7 +16083,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -17401,7 +16109,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17428,7 +16135,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -17455,7 +16161,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -17482,7 +16187,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -17509,7 +16213,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -17536,7 +16239,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -17563,7 +16265,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -17590,7 +16291,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -17617,7 +16317,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -17644,7 +16343,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -17671,7 +16369,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -17698,7 +16395,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -17725,7 +16421,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17752,7 +16447,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -17779,7 +16473,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -17806,7 +16499,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -17833,7 +16525,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -17860,7 +16551,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -17887,7 +16577,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -17914,7 +16603,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -17941,7 +16629,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -17968,7 +16655,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -17995,7 +16681,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -18022,7 +16707,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -18049,7 +16733,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -18076,7 +16759,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -18103,7 +16785,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -18130,7 +16811,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -18157,7 +16837,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -18184,7 +16863,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -18211,7 +16889,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -18238,7 +16915,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -18265,7 +16941,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -18292,7 +16967,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -18319,7 +16993,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -18346,7 +17019,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll index c334e70f1f358..2ad7ac9390515 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.mul.nxv1i8(, , , i32) -declare @llvm.vp.add.nxv1i8(, , , i32) -declare @llvm.vp.merge.nxv1i8(, , , i32) -declare @llvm.vp.select.nxv1i8(, , , i32) - define @vmacc_vv_nxv1i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv1i8: ; CHECK: # %bb.0: @@ -93,11 +88,6 @@ define @vmacc_vx_nxv1i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv2i8(, , , i32) -declare @llvm.vp.add.nxv2i8(, , , i32) -declare @llvm.vp.merge.nxv2i8(, , , i32) -declare @llvm.vp.select.nxv2i8(, , , i32) - define @vmacc_vv_nxv2i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv2i8: ; CHECK: # %bb.0: @@ -182,11 +172,6 @@ define @vmacc_vx_nxv2i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv4i8(, , , i32) -declare @llvm.vp.add.nxv4i8(, , , i32) -declare @llvm.vp.merge.nxv4i8(, , , i32) -declare @llvm.vp.select.nxv4i8(, , , i32) - define @vmacc_vv_nxv4i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv4i8: ; CHECK: # %bb.0: @@ -271,11 +256,6 @@ define @vmacc_vx_nxv4i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv8i8(, , , i32) -declare @llvm.vp.add.nxv8i8(, , , i32) -declare @llvm.vp.merge.nxv8i8(, , , i32) -declare @llvm.vp.select.nxv8i8(, , , i32) - define @vmacc_vv_nxv8i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv8i8: ; CHECK: # %bb.0: @@ -360,11 +340,6 @@ define @vmacc_vx_nxv8i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv16i8(, , , i32) -declare @llvm.vp.add.nxv16i8(, , , i32) -declare @llvm.vp.merge.nxv16i8(, , , i32) -declare @llvm.vp.select.nxv16i8(, , , i32) - define @vmacc_vv_nxv16i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv16i8: ; CHECK: # %bb.0: @@ -449,11 +424,6 @@ define @vmacc_vx_nxv16i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv32i8(, , , i32) -declare @llvm.vp.add.nxv32i8(, , , i32) -declare @llvm.vp.merge.nxv32i8(, , , i32) -declare @llvm.vp.select.nxv32i8(, , , i32) - define @vmacc_vv_nxv32i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv32i8: ; CHECK: # %bb.0: @@ -538,11 +508,6 @@ define @vmacc_vx_nxv32i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv64i8(, , , i32) -declare @llvm.vp.add.nxv64i8(, , , i32) -declare @llvm.vp.merge.nxv64i8(, , , i32) -declare @llvm.vp.select.nxv64i8(, , , i32) - define @vmacc_vv_nxv64i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv64i8: ; CHECK: # %bb.0: @@ -630,11 +595,6 @@ define @vmacc_vx_nxv64i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv1i16(, , , i32) -declare @llvm.vp.add.nxv1i16(, , , i32) -declare @llvm.vp.merge.nxv1i16(, , , i32) -declare @llvm.vp.select.nxv1i16(, , , i32) - define @vmacc_vv_nxv1i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv1i16: ; CHECK: # %bb.0: @@ -719,11 +679,6 @@ define @vmacc_vx_nxv1i16_ta( %a, i16 %b, %u } -declare @llvm.vp.mul.nxv2i16(, , , i32) -declare @llvm.vp.add.nxv2i16(, , , i32) -declare @llvm.vp.merge.nxv2i16(, , , i32) -declare @llvm.vp.select.nxv2i16(, , , i32) - define @vmacc_vv_nxv2i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv2i16: ; CHECK: # %bb.0: @@ -808,11 +763,6 @@ define @vmacc_vx_nxv2i16_ta( %a, i16 %b, %u } -declare @llvm.vp.mul.nxv4i16(, , , i32) -declare @llvm.vp.add.nxv4i16(, , , i32) -declare @llvm.vp.merge.nxv4i16(, , , i32) -declare @llvm.vp.select.nxv4i16(, , , i32) - define @vmacc_vv_nxv4i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv4i16: ; CHECK: # %bb.0: @@ -897,11 +847,6 @@ define @vmacc_vx_nxv4i16_ta( %a, i16 %b, %u } -declare @llvm.vp.mul.nxv8i16(, , , i32) -declare @llvm.vp.add.nxv8i16(, , , i32) -declare @llvm.vp.merge.nxv8i16(, , , i32) -declare @llvm.vp.select.nxv8i16(, , , i32) - define @vmacc_vv_nxv8i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv8i16: ; CHECK: # %bb.0: @@ -986,11 +931,6 @@ define @vmacc_vx_nxv8i16_ta( %a, i16 %b, %u } -declare @llvm.vp.mul.nxv16i16(, , , i32) -declare @llvm.vp.add.nxv16i16(, , , i32) -declare @llvm.vp.merge.nxv16i16(, , , i32) -declare @llvm.vp.select.nxv16i16(, , , i32) - define @vmacc_vv_nxv16i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv16i16: ; CHECK: # %bb.0: @@ -1075,11 +1015,6 @@ define @vmacc_vx_nxv16i16_ta( %a, i16 %b, ret %u } -declare @llvm.vp.mul.nxv32i16(, , , i32) -declare @llvm.vp.add.nxv32i16(, , , i32) -declare @llvm.vp.merge.nxv32i16(, , , i32) -declare @llvm.vp.select.nxv32i16(, , , i32) - define @vmacc_vv_nxv32i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1167,11 +1102,6 @@ define @vmacc_vx_nxv32i16_ta( %a, i16 %b, ret %u } -declare @llvm.vp.mul.nxv1i32(, , , i32) -declare @llvm.vp.add.nxv1i32(, , , i32) -declare @llvm.vp.merge.nxv1i32(, , , i32) -declare @llvm.vp.select.nxv1i32(, , , i32) - define @vmacc_vv_nxv1i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1256,11 +1186,6 @@ define @vmacc_vx_nxv1i32_ta( %a, i32 %b, %u } -declare @llvm.vp.mul.nxv2i32(, , , i32) -declare @llvm.vp.add.nxv2i32(, , , i32) -declare @llvm.vp.merge.nxv2i32(, , , i32) -declare @llvm.vp.select.nxv2i32(, , , i32) - define @vmacc_vv_nxv2i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1345,11 +1270,6 @@ define @vmacc_vx_nxv2i32_ta( %a, i32 %b, %u } -declare @llvm.vp.mul.nxv4i32(, , , i32) -declare @llvm.vp.add.nxv4i32(, , , i32) -declare @llvm.vp.merge.nxv4i32(, , , i32) -declare @llvm.vp.select.nxv4i32(, , , i32) - define @vmacc_vv_nxv4i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1434,11 +1354,6 @@ define @vmacc_vx_nxv4i32_ta( %a, i32 %b, %u } -declare @llvm.vp.mul.nxv8i32(, , , i32) -declare @llvm.vp.add.nxv8i32(, , , i32) -declare @llvm.vp.merge.nxv8i32(, , , i32) -declare @llvm.vp.select.nxv8i32(, , , i32) - define @vmacc_vv_nxv8i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1523,11 +1438,6 @@ define @vmacc_vx_nxv8i32_ta( %a, i32 %b, %u } -declare @llvm.vp.mul.nxv16i32(, , , i32) -declare @llvm.vp.add.nxv16i32(, , , i32) -declare @llvm.vp.merge.nxv16i32(, , , i32) -declare @llvm.vp.select.nxv16i32(, , , i32) - define @vmacc_vv_nxv16i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1615,11 +1525,6 @@ define @vmacc_vx_nxv16i32_ta( %a, i32 %b, ret %u } -declare @llvm.vp.mul.nxv1i64(, , , i32) -declare @llvm.vp.add.nxv1i64(, , , i32) -declare @llvm.vp.merge.nxv1i64(, , , i32) -declare @llvm.vp.select.nxv1i64(, , , i32) - define @vmacc_vv_nxv1i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1751,11 +1656,6 @@ define @vmacc_vx_nxv1i64_ta( %a, i64 %b, %u } -declare @llvm.vp.mul.nxv2i64(, , , i32) -declare @llvm.vp.add.nxv2i64(, , , i32) -declare @llvm.vp.merge.nxv2i64(, , , i32) -declare @llvm.vp.select.nxv2i64(, , , i32) - define @vmacc_vv_nxv2i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1887,11 +1787,6 @@ define @vmacc_vx_nxv2i64_ta( %a, i64 %b, %u } -declare @llvm.vp.mul.nxv4i64(, , , i32) -declare @llvm.vp.add.nxv4i64(, , , i32) -declare @llvm.vp.merge.nxv4i64(, , , i32) -declare @llvm.vp.select.nxv4i64(, , , i32) - define @vmacc_vv_nxv4i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv4i64: ; CHECK: # %bb.0: @@ -2023,11 +1918,6 @@ define @vmacc_vx_nxv4i64_ta( %a, i64 %b, %u } -declare @llvm.vp.mul.nxv8i64(, , , i32) -declare @llvm.vp.add.nxv8i64(, , , i32) -declare @llvm.vp.merge.nxv8i64(, , , i32) -declare @llvm.vp.select.nxv8i64(, , , i32) - define @vmacc_vv_nxv8i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc.ll index b8b4baf53b677..7aade205167b5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmacc.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmacc.nxv1i8.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv2i8.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv4i8.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv8i8.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv16i8.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -215,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv32i8.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv1i16.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -333,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv2i16.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -380,13 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv4i16.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -427,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv8i16.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -474,13 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv16i16.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -521,13 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv1i32.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -544,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -568,13 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv2i32.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -591,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -615,13 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -638,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -662,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -685,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -709,13 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv1i64.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -732,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -756,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv2i64.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -779,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -803,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv4i64.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -826,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -850,13 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv1i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -873,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -897,13 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv2i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -920,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv2i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -944,13 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv4i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -967,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv4i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv8i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1014,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv8i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1038,13 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv16i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1061,13 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv16i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1085,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv32i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1108,13 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv32i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1132,13 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv1i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1155,13 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1179,13 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv2i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1202,13 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv2i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1226,13 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv4i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1249,13 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv4i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1273,13 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv8i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1296,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv8i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1320,13 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv16i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1343,13 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv16i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1367,13 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv1i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1390,13 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1414,13 +994,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv2i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1437,13 +1010,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv2i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1461,13 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv4i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1484,13 +1043,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv4i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1508,13 +1060,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv8i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1531,13 +1076,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv8i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1555,13 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv1i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -1591,13 +1122,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -1628,13 +1152,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv2i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -1664,13 +1181,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv2i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -1701,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv4i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64: ; RV32: # %bb.0: # %entry @@ -1737,13 +1240,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv4i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in.ll index 31c12db79a946..33a49cefd54d9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -73,12 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -119,12 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -165,12 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -188,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -211,12 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -234,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -257,12 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -280,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -303,12 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -326,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -349,12 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -372,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -395,12 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -441,12 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -464,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -487,12 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -510,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -533,12 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -556,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -579,12 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -602,12 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -625,12 +463,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv32i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -648,12 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv64i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -671,12 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -694,12 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -717,12 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -740,12 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -763,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -786,12 +582,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv32i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -809,12 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -832,12 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -855,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -878,12 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -901,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -924,12 +684,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i64.i64( - , - i64, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -960,12 +714,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i64.i64( - , - i64, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -996,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i64.i64( - , - i64, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1032,12 +774,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i64.i64( - , - i64, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.ll index 4777903558e4c..503c0fd4c232c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmadc.nxv1i8.nxv1i8( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i8.nxv2i8( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i8.nxv4i8( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i8.nxv8i8( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i8.nxv16i8( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv32i8.nxv32i8( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv64i8.nxv64i8( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -144,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i16.nxv1i16( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -164,11 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i16.nxv2i16( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -184,11 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i16.nxv4i16( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -204,11 +154,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i16.nxv8i16( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -224,11 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i16.nxv16i16( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -244,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv32i16.nxv32i16( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -264,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i32.nxv1i32( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -284,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i32.nxv2i32( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -304,11 +229,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i32.nxv4i32( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -324,11 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i32.nxv8i32( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -344,11 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i32.nxv16i32( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -364,11 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i64.nxv1i64( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -384,11 +289,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i64.nxv2i64( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -404,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i64.nxv4i64( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -424,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i64.nxv8i64( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -444,11 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -464,11 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -484,11 +364,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -504,11 +379,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -524,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -544,11 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -564,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv64i8.i8( - , - i8, - iXLen); - define @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -584,11 +439,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -604,11 +454,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -624,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -644,11 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -664,11 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -684,11 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv32i16.i16( - , - i16, - iXLen); - define @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -704,11 +529,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -724,11 +544,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -744,11 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -764,11 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -784,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i32.i32( - , - i32, - iXLen); - define @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -804,11 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -836,11 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -868,11 +658,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -900,11 +685,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i64.i64( - , - i64, - iXLen); - define @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll index f55c7c0b90b3f..fe5b8b9bf6d52 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.mul.nxv1i8(, , , i32) -declare @llvm.vp.add.nxv1i8(, , , i32) -declare @llvm.vp.merge.nxv1i8(, , , i32) -declare @llvm.vp.select.nxv1i8(, , , i32) - define @vmadd_vv_nxv1i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv1i8: ; CHECK: # %bb.0: @@ -87,11 +82,6 @@ define @vmadd_vx_nxv1i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv2i8(, , , i32) -declare @llvm.vp.add.nxv2i8(, , , i32) -declare @llvm.vp.merge.nxv2i8(, , , i32) -declare @llvm.vp.select.nxv2i8(, , , i32) - define @vmadd_vv_nxv2i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv2i8: ; CHECK: # %bb.0: @@ -170,11 +160,6 @@ define @vmadd_vx_nxv2i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv4i8(, , , i32) -declare @llvm.vp.add.nxv4i8(, , , i32) -declare @llvm.vp.merge.nxv4i8(, , , i32) -declare @llvm.vp.select.nxv4i8(, , , i32) - define @vmadd_vv_nxv4i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv4i8: ; CHECK: # %bb.0: @@ -253,11 +238,6 @@ define @vmadd_vx_nxv4i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv8i8(, , , i32) -declare @llvm.vp.add.nxv8i8(, , , i32) -declare @llvm.vp.merge.nxv8i8(, , , i32) -declare @llvm.vp.select.nxv8i8(, , , i32) - define @vmadd_vv_nxv8i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv8i8: ; CHECK: # %bb.0: @@ -336,11 +316,6 @@ define @vmadd_vx_nxv8i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv16i8(, , , i32) -declare @llvm.vp.add.nxv16i8(, , , i32) -declare @llvm.vp.merge.nxv16i8(, , , i32) -declare @llvm.vp.select.nxv16i8(, , , i32) - define @vmadd_vv_nxv16i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv16i8: ; CHECK: # %bb.0: @@ -419,11 +394,6 @@ define @vmadd_vx_nxv16i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv32i8(, , , i32) -declare @llvm.vp.add.nxv32i8(, , , i32) -declare @llvm.vp.merge.nxv32i8(, , , i32) -declare @llvm.vp.select.nxv32i8(, , , i32) - define @vmadd_vv_nxv32i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv32i8: ; CHECK: # %bb.0: @@ -502,11 +472,6 @@ define @vmadd_vx_nxv32i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv64i8(, , , i32) -declare @llvm.vp.add.nxv64i8(, , , i32) -declare @llvm.vp.merge.nxv64i8(, , , i32) -declare @llvm.vp.select.nxv64i8(, , , i32) - define @vmadd_vv_nxv64i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv64i8: ; CHECK: # %bb.0: @@ -588,11 +553,6 @@ define @vmadd_vx_nxv64i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv1i16(, , , i32) -declare @llvm.vp.add.nxv1i16(, , , i32) -declare @llvm.vp.merge.nxv1i16(, , , i32) -declare @llvm.vp.select.nxv1i16(, , , i32) - define @vmadd_vv_nxv1i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv1i16: ; CHECK: # %bb.0: @@ -671,11 +631,6 @@ define @vmadd_vx_nxv1i16_ta( %a, i16 %b, %u } -declare @llvm.vp.mul.nxv2i16(, , , i32) -declare @llvm.vp.add.nxv2i16(, , , i32) -declare @llvm.vp.merge.nxv2i16(, , , i32) -declare @llvm.vp.select.nxv2i16(, , , i32) - define @vmadd_vv_nxv2i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv2i16: ; CHECK: # %bb.0: @@ -754,11 +709,6 @@ define @vmadd_vx_nxv2i16_ta( %a, i16 %b, %u } -declare @llvm.vp.mul.nxv4i16(, , , i32) -declare @llvm.vp.add.nxv4i16(, , , i32) -declare @llvm.vp.merge.nxv4i16(, , , i32) -declare @llvm.vp.select.nxv4i16(, , , i32) - define @vmadd_vv_nxv4i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv4i16: ; CHECK: # %bb.0: @@ -837,11 +787,6 @@ define @vmadd_vx_nxv4i16_ta( %a, i16 %b, %u } -declare @llvm.vp.mul.nxv8i16(, , , i32) -declare @llvm.vp.add.nxv8i16(, , , i32) -declare @llvm.vp.merge.nxv8i16(, , , i32) -declare @llvm.vp.select.nxv8i16(, , , i32) - define @vmadd_vv_nxv8i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv8i16: ; CHECK: # %bb.0: @@ -920,11 +865,6 @@ define @vmadd_vx_nxv8i16_ta( %a, i16 %b, %u } -declare @llvm.vp.mul.nxv16i16(, , , i32) -declare @llvm.vp.add.nxv16i16(, , , i32) -declare @llvm.vp.merge.nxv16i16(, , , i32) -declare @llvm.vp.select.nxv16i16(, , , i32) - define @vmadd_vv_nxv16i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv16i16: ; CHECK: # %bb.0: @@ -1003,11 +943,6 @@ define @vmadd_vx_nxv16i16_ta( %a, i16 %b, ret %u } -declare @llvm.vp.mul.nxv32i16(, , , i32) -declare @llvm.vp.add.nxv32i16(, , , i32) -declare @llvm.vp.merge.nxv32i16(, , , i32) -declare @llvm.vp.select.nxv32i16(, , , i32) - define @vmadd_vv_nxv32i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1089,11 +1024,6 @@ define @vmadd_vx_nxv32i16_ta( %a, i16 %b, ret %u } -declare @llvm.vp.mul.nxv1i32(, , , i32) -declare @llvm.vp.add.nxv1i32(, , , i32) -declare @llvm.vp.merge.nxv1i32(, , , i32) -declare @llvm.vp.select.nxv1i32(, , , i32) - define @vmadd_vv_nxv1i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1172,11 +1102,6 @@ define @vmadd_vx_nxv1i32_ta( %a, i32 %b, %u } -declare @llvm.vp.mul.nxv2i32(, , , i32) -declare @llvm.vp.add.nxv2i32(, , , i32) -declare @llvm.vp.merge.nxv2i32(, , , i32) -declare @llvm.vp.select.nxv2i32(, , , i32) - define @vmadd_vv_nxv2i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1255,11 +1180,6 @@ define @vmadd_vx_nxv2i32_ta( %a, i32 %b, %u } -declare @llvm.vp.mul.nxv4i32(, , , i32) -declare @llvm.vp.add.nxv4i32(, , , i32) -declare @llvm.vp.merge.nxv4i32(, , , i32) -declare @llvm.vp.select.nxv4i32(, , , i32) - define @vmadd_vv_nxv4i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1338,11 +1258,6 @@ define @vmadd_vx_nxv4i32_ta( %a, i32 %b, %u } -declare @llvm.vp.mul.nxv8i32(, , , i32) -declare @llvm.vp.add.nxv8i32(, , , i32) -declare @llvm.vp.merge.nxv8i32(, , , i32) -declare @llvm.vp.select.nxv8i32(, , , i32) - define @vmadd_vv_nxv8i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1421,11 +1336,6 @@ define @vmadd_vx_nxv8i32_ta( %a, i32 %b, %u } -declare @llvm.vp.mul.nxv16i32(, , , i32) -declare @llvm.vp.add.nxv16i32(, , , i32) -declare @llvm.vp.merge.nxv16i32(, , , i32) -declare @llvm.vp.select.nxv16i32(, , , i32) - define @vmadd_vv_nxv16i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1507,11 +1417,6 @@ define @vmadd_vx_nxv16i32_ta( %a, i32 %b, ret %u } -declare @llvm.vp.mul.nxv1i64(, , , i32) -declare @llvm.vp.add.nxv1i64(, , , i32) -declare @llvm.vp.merge.nxv1i64(, , , i32) -declare @llvm.vp.select.nxv1i64(, , , i32) - define @vmadd_vv_nxv1i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1634,11 +1539,6 @@ define @vmadd_vx_nxv1i64_ta( %a, i64 %b, %u } -declare @llvm.vp.mul.nxv2i64(, , , i32) -declare @llvm.vp.add.nxv2i64(, , , i32) -declare @llvm.vp.merge.nxv2i64(, , , i32) -declare @llvm.vp.select.nxv2i64(, , , i32) - define @vmadd_vv_nxv2i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1761,11 +1661,6 @@ define @vmadd_vx_nxv2i64_ta( %a, i64 %b, %u } -declare @llvm.vp.mul.nxv4i64(, , , i32) -declare @llvm.vp.add.nxv4i64(, , , i32) -declare @llvm.vp.merge.nxv4i64(, , , i32) -declare @llvm.vp.select.nxv4i64(, , , i32) - define @vmadd_vv_nxv4i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1888,11 +1783,6 @@ define @vmadd_vx_nxv4i64_ta( %a, i64 %b, %u } -declare @llvm.vp.mul.nxv8i64(, , , i32) -declare @llvm.vp.add.nxv8i64(, , , i32) -declare @llvm.vp.merge.nxv8i64(, , , i32) -declare @llvm.vp.select.nxv8i64(, , , i32) - define @vmadd_vv_nxv8i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd.ll index 829d082ab7a4f..482642591c91a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmadd.nxv1i8.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv2i8.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv4i8.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv8i8.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv16i8.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -215,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv32i8.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv1i16.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -333,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv2i16.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -380,13 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv4i16.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -427,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv8i16.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -474,13 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv16i16.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -521,13 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv1i32.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -544,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -568,13 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv2i32.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -591,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -615,13 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -638,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -662,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -685,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -709,13 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv1i64.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -732,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -756,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv2i64.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -779,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -803,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv4i64.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -826,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -850,13 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv1i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -873,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv1i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -897,13 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv2i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -920,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv2i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -944,13 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv4i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -967,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv4i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv8i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1014,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv8i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1038,13 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv16i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1061,13 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv16i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1085,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv32i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1108,13 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv32i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1132,13 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv1i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1155,13 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv1i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1179,13 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv2i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1202,13 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv2i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1226,13 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv4i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1249,13 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv4i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1273,13 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv8i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1296,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv8i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1320,13 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv16i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1343,13 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv16i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1367,13 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv1i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1390,13 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv1i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1414,13 +994,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv2i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1437,13 +1010,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv2i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1461,13 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv4i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1484,13 +1043,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv4i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1508,13 +1060,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv8i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1531,13 +1076,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv8i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1555,13 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv1i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -1591,13 +1122,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv1i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -1628,13 +1152,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv2i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -1664,13 +1181,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv2i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -1701,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv4i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64: ; RV32: # %bb.0: # %entry @@ -1737,13 +1240,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv4i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmand.ll b/llvm/test/CodeGen/RISCV/rvv/vmand.ll index 67c89799779f0..c51a7463ac031 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmand.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmand.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmand.nxv1i1( - , - , - iXLen); - define @intrinsic_vmand_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmand.nxv2i1( - , - , - iXLen); - define @intrinsic_vmand_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmand.nxv4i1( - , - , - iXLen); - define @intrinsic_vmand_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmand.nxv8i1( - , - , - iXLen); - define @intrinsic_vmand_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmand.nxv16i1( - , - , - iXLen); - define @intrinsic_vmand_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmand.nxv32i1( - , - , - iXLen); - define @intrinsic_vmand_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmand.nxv64i1( - , - , - iXLen); - define @intrinsic_vmand_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmandn.ll b/llvm/test/CodeGen/RISCV/rvv/vmandn.ll index 38d71d12660b5..1361f9d67b522 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmandn.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmandn.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmandn.nxv1i1( - , - , - iXLen); - define @intrinsic_vmandn_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmandn.nxv2i1( - , - , - iXLen); - define @intrinsic_vmandn_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmandn.nxv4i1( - , - , - iXLen); - define @intrinsic_vmandn_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmandn.nxv8i1( - , - , - iXLen); - define @intrinsic_vmandn_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmandn.nxv16i1( - , - , - iXLen); - define @intrinsic_vmandn_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmandn.nxv32i1( - , - , - iXLen); - define @intrinsic_vmandn_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmandn.nxv64i1( - , - , - iXLen); - define @intrinsic_vmandn_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll index d81936354f6f3..3cf464247250a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.smax.nxv8i7(, , , i32) - define @vmax_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vx_nxv8i7: ; CHECK: # %bb.0: @@ -23,8 +21,6 @@ define @vmax_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.smax.nxv1i8(, , , i32) - define @vmax_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv1i8: ; CHECK: # %bb.0: @@ -81,8 +77,6 @@ define @vmax_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smax.nxv2i8(, , , i32) - define @vmax_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv2i8: ; CHECK: # %bb.0: @@ -127,8 +121,6 @@ define @vmax_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smax.nxv3i8(, , , i32) - define @vmax_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv3i8: ; CHECK: # %bb.0: @@ -173,8 +165,6 @@ define @vmax_vx_nxv3i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smax.nxv4i8(, , , i32) - define @vmax_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv4i8: ; CHECK: # %bb.0: @@ -219,8 +209,6 @@ define @vmax_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smax.nxv8i8(, , , i32) - define @vmax_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv8i8: ; CHECK: # %bb.0: @@ -265,8 +253,6 @@ define @vmax_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smax.nxv16i8(, , , i32) - define @vmax_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv16i8: ; CHECK: # %bb.0: @@ -311,8 +297,6 @@ define @vmax_vx_nxv16i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.smax.nxv32i8(, , , i32) - define @vmax_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv32i8: ; CHECK: # %bb.0: @@ -357,8 +341,6 @@ define @vmax_vx_nxv32i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.smax.nxv64i8(, , , i32) - define @vmax_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv64i8: ; CHECK: # %bb.0: @@ -405,8 +387,6 @@ define @vmax_vx_nxv64i8_unmasked( %va, i8 % ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.smax.nxv128i8(, , , i32) - define @vmax_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vx_nxv128i8: ; CHECK: # %bb.0: @@ -459,8 +439,6 @@ define @vmax_vx_nxv128i8_unmasked( %va, i ret %v } -declare @llvm.vp.smax.nxv1i16(, , , i32) - define @vmax_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv1i16: ; CHECK: # %bb.0: @@ -505,8 +483,6 @@ define @vmax_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.smax.nxv2i16(, , , i32) - define @vmax_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv2i16: ; CHECK: # %bb.0: @@ -551,8 +527,6 @@ define @vmax_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.smax.nxv4i16(, , , i32) - define @vmax_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv4i16: ; CHECK: # %bb.0: @@ -597,8 +571,6 @@ define @vmax_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.smax.nxv8i16(, , , i32) - define @vmax_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv8i16: ; CHECK: # %bb.0: @@ -643,8 +615,6 @@ define @vmax_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.smax.nxv16i16(, , , i32) - define @vmax_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv16i16: ; CHECK: # %bb.0: @@ -689,8 +659,6 @@ define @vmax_vx_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.smax.nxv32i16(, , , i32) - define @vmax_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv32i16: ; CHECK: # %bb.0: @@ -735,8 +703,6 @@ define @vmax_vx_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.smax.nxv1i32(, , , i32) - define @vmax_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv1i32: ; CHECK: # %bb.0: @@ -781,8 +747,6 @@ define @vmax_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.smax.nxv2i32(, , , i32) - define @vmax_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv2i32: ; CHECK: # %bb.0: @@ -827,8 +791,6 @@ define @vmax_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.smax.nxv4i32(, , , i32) - define @vmax_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv4i32: ; CHECK: # %bb.0: @@ -873,8 +835,6 @@ define @vmax_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.smax.nxv8i32(, , , i32) - define @vmax_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv8i32: ; CHECK: # %bb.0: @@ -919,8 +879,6 @@ define @vmax_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.smax.nxv16i32(, , , i32) - define @vmax_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv16i32: ; CHECK: # %bb.0: @@ -967,8 +925,6 @@ define @vmax_vx_nxv16i32_unmasked( %va, i ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.smax.nxv32i32(, , , i32) - define @vmax_vx_nxv32i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vx_nxv32i32: ; CHECK: # %bb.0: @@ -1024,8 +980,6 @@ define @vmax_vx_nxv32i32_unmasked( %va, i ; Test splitting when the %evl is a constant (albeit an unknown one). -declare i32 @llvm.vscale.i32() - define @vmax_vx_nxv32i32_evl_nx8( %va, i32 %b, %m) { ; RV32-LABEL: vmax_vx_nxv32i32_evl_nx8: ; RV32: # %bb.0: @@ -1091,8 +1045,6 @@ define @vmax_vx_nxv32i32_evl_nx16( %va, i ret %v } -declare @llvm.vp.smax.nxv1i64(, , , i32) - define @vmax_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1165,8 +1117,6 @@ define @vmax_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.smax.nxv2i64(, , , i32) - define @vmax_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1239,8 +1189,6 @@ define @vmax_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.smax.nxv4i64(, , , i32) - define @vmax_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1313,8 +1261,6 @@ define @vmax_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.smax.nxv8i64(, , , i32) - define @vmax_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax.ll b/llvm/test/CodeGen/RISCV/rvv/vmax.ll index 7b22649e26425..e90ccc7f21291 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmax.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll index 7603bcef1973e..e755d099df4a8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.umax.nxv8i7(, , , i32) - define @vmaxu_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vx_nxv8i7: ; CHECK: # %bb.0: @@ -22,8 +20,6 @@ define @vmaxu_vx_nxv8i7( %a, i7 signext %b, < ret %v } -declare @llvm.vp.umax.nxv1i8(, , , i32) - define @vmaxu_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv1i8: ; CHECK: # %bb.0: @@ -80,8 +76,6 @@ define @vmaxu_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umax.nxv2i8(, , , i32) - define @vmaxu_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv2i8: ; CHECK: # %bb.0: @@ -126,8 +120,6 @@ define @vmaxu_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umax.nxv3i8(, , , i32) - define @vmaxu_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv3i8: ; CHECK: # %bb.0: @@ -172,8 +164,6 @@ define @vmaxu_vx_nxv3i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umax.nxv4i8(, , , i32) - define @vmaxu_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv4i8: ; CHECK: # %bb.0: @@ -218,8 +208,6 @@ define @vmaxu_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umax.nxv8i8(, , , i32) - define @vmaxu_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv8i8: ; CHECK: # %bb.0: @@ -264,8 +252,6 @@ define @vmaxu_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umax.nxv16i8(, , , i32) - define @vmaxu_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv16i8: ; CHECK: # %bb.0: @@ -310,8 +296,6 @@ define @vmaxu_vx_nxv16i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.umax.nxv32i8(, , , i32) - define @vmaxu_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv32i8: ; CHECK: # %bb.0: @@ -356,8 +340,6 @@ define @vmaxu_vx_nxv32i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.umax.nxv64i8(, , , i32) - define @vmaxu_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv64i8: ; CHECK: # %bb.0: @@ -404,8 +386,6 @@ define @vmaxu_vx_nxv64i8_unmasked( %va, i8 ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.umax.nxv128i8(, , , i32) - define @vmaxu_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vx_nxv128i8: ; CHECK: # %bb.0: @@ -458,8 +438,6 @@ define @vmaxu_vx_nxv128i8_unmasked( %va, ret %v } -declare @llvm.vp.umax.nxv1i16(, , , i32) - define @vmaxu_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv1i16: ; CHECK: # %bb.0: @@ -504,8 +482,6 @@ define @vmaxu_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.umax.nxv2i16(, , , i32) - define @vmaxu_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv2i16: ; CHECK: # %bb.0: @@ -550,8 +526,6 @@ define @vmaxu_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.umax.nxv4i16(, , , i32) - define @vmaxu_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv4i16: ; CHECK: # %bb.0: @@ -596,8 +570,6 @@ define @vmaxu_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.umax.nxv8i16(, , , i32) - define @vmaxu_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv8i16: ; CHECK: # %bb.0: @@ -642,8 +614,6 @@ define @vmaxu_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.umax.nxv16i16(, , , i32) - define @vmaxu_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv16i16: ; CHECK: # %bb.0: @@ -688,8 +658,6 @@ define @vmaxu_vx_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.umax.nxv32i16(, , , i32) - define @vmaxu_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv32i16: ; CHECK: # %bb.0: @@ -734,8 +702,6 @@ define @vmaxu_vx_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.umax.nxv1i32(, , , i32) - define @vmaxu_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv1i32: ; CHECK: # %bb.0: @@ -780,8 +746,6 @@ define @vmaxu_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.umax.nxv2i32(, , , i32) - define @vmaxu_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv2i32: ; CHECK: # %bb.0: @@ -826,8 +790,6 @@ define @vmaxu_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.umax.nxv4i32(, , , i32) - define @vmaxu_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv4i32: ; CHECK: # %bb.0: @@ -872,8 +834,6 @@ define @vmaxu_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.umax.nxv8i32(, , , i32) - define @vmaxu_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv8i32: ; CHECK: # %bb.0: @@ -918,8 +878,6 @@ define @vmaxu_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.umax.nxv16i32(, , , i32) - define @vmaxu_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv16i32: ; CHECK: # %bb.0: @@ -966,8 +924,6 @@ define @vmaxu_vx_nxv16i32_unmasked( %va, ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.umax.nxv32i32(, , , i32) - define @vmaxu_vx_nxv32i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vx_nxv32i32: ; CHECK: # %bb.0: @@ -1023,8 +979,6 @@ define @vmaxu_vx_nxv32i32_unmasked( %va, ; Test splitting when the %evl is a constant (albeit an unknown one). -declare i32 @llvm.vscale.i32() - define @vmaxu_vx_nxv32i32_evl_nx8( %va, i32 %b, %m) { ; RV32-LABEL: vmaxu_vx_nxv32i32_evl_nx8: ; RV32: # %bb.0: @@ -1090,8 +1044,6 @@ define @vmaxu_vx_nxv32i32_evl_nx16( %va, ret %v } -declare @llvm.vp.umax.nxv1i64(, , , i32) - define @vmaxu_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1164,8 +1116,6 @@ define @vmaxu_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.umax.nxv2i64(, , , i32) - define @vmaxu_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1238,8 +1188,6 @@ define @vmaxu_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.umax.nxv4i64(, , , i32) - define @vmaxu_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1312,8 +1260,6 @@ define @vmaxu_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.umax.nxv8i64(, , , i32) - define @vmaxu_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu.ll index 377c182cab21c..96e7e3df34955 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmaxu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmclr.ll b/llvm/test/CodeGen/RISCV/rvv/vmclr.ll index c00fc445fc5a3..fff20306d17b7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmclr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmclr.ll @@ -4,9 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmclr.nxv1i1( - iXLen); - define @intrinsic_vmclr_m_pseudo_nxv1i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -20,9 +17,6 @@ entry: ret %a } -declare @llvm.riscv.vmclr.nxv2i1( - iXLen); - define @intrinsic_vmclr_m_pseudo_nxv2i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -36,9 +30,6 @@ entry: ret %a } -declare @llvm.riscv.vmclr.nxv4i1( - iXLen); - define @intrinsic_vmclr_m_pseudo_nxv4i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -52,9 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmclr.nxv8i1( - iXLen); - define @intrinsic_vmclr_m_pseudo_nxv8i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -68,9 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vmclr.nxv16i1( - iXLen); - define @intrinsic_vmclr_m_pseudo_nxv16i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -84,9 +69,6 @@ entry: ret %a } -declare @llvm.riscv.vmclr.nxv32i1( - iXLen); - define @intrinsic_vmclr_m_pseudo_nxv32i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -100,9 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmclr.nxv64i1( - iXLen); - define @intrinsic_vmclr_m_pseudo_nxv64i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmerge.ll b/llvm/test/CodeGen/RISCV/rvv/vmerge.ll index 3fb5aa02230b4..4a411475e337a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmerge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmerge.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck -check-prefixes=CHECK,RV64 %s -declare @llvm.riscv.vmerge.nxv1i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -52,13 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -76,13 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -100,13 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -124,13 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv32i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -148,13 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv64i8.nxv64i8( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -172,13 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -196,13 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -220,13 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -244,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -268,13 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -292,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv32i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -316,13 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -340,13 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -364,13 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -388,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -412,13 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -436,13 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -460,13 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -484,13 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -508,13 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -532,13 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -556,13 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -580,13 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -604,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -628,13 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -652,13 +463,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -676,13 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv64i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -700,13 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -724,13 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -748,13 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -772,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -796,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -820,13 +582,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv32i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -844,13 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -868,13 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -892,13 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -916,13 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -940,13 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -964,13 +684,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -999,13 +712,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1034,13 +740,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1069,13 +768,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -1478,13 +1170,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1f16.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1502,13 +1187,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2f16.nxv2f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1526,13 +1204,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4f16.nxv4f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1550,13 +1221,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8f16.nxv8f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1574,13 +1238,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16f16.nxv16f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1598,13 +1255,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv32f16.nxv32f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1622,13 +1272,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1646,13 +1289,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1670,13 +1306,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1694,13 +1323,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1718,13 +1340,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1742,13 +1357,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1766,13 +1374,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1f32.nxv1f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1790,13 +1391,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2f32.nxv2f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -1814,13 +1408,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4f32.nxv4f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1838,13 +1425,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8f32.nxv8f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1862,13 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16f32.nxv16f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -1886,13 +1459,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1f64.nxv1f64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1910,13 +1476,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2f64.nxv2f64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1934,13 +1493,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4f64.nxv4f64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1958,13 +1510,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8f64.nxv8f64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll index 9bd859b3452f2..668af55427891 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfeq.nxv1bf16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv2bf16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv2bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv4bf16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv4bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv8bf16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv8bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv16bf16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv16bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv1bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfeq_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -306,11 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv2bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfeq_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -326,13 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -353,11 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv4bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfeq_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -373,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -400,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv8bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfeq_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -420,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -447,11 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv16bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfeq_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -467,13 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll index babf8de57b7ea..c306ae258cd33 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfeq.nxv1f16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv2f16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv2f16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv4f16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv4f16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv8f16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv8f16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv16f16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv16f16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv1f32( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1f32( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv2f32( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv2f32( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv4f32( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv4f32( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv8f32( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv8f32( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv1f64( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1f64( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv2f64( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv2f64( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv4f64( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv4f64( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv1f16.f16( - , - half, - iXLen); - define @intrinsic_vmfeq_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -663,11 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv2f16.f16( - , - half, - iXLen); - define @intrinsic_vmfeq_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -683,13 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv2f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -710,11 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv4f16.f16( - , - half, - iXLen); - define @intrinsic_vmfeq_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -730,13 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv4f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -757,11 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv8f16.f16( - , - half, - iXLen); - define @intrinsic_vmfeq_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -777,13 +592,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv8f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -804,11 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv16f16.f16( - , - half, - iXLen); - define @intrinsic_vmfeq_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -824,13 +627,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv16f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -851,11 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv1f32.f32( - , - float, - iXLen); - define @intrinsic_vmfeq_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -871,13 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -898,11 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv2f32.f32( - , - float, - iXLen); - define @intrinsic_vmfeq_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -918,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv2f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -945,11 +717,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv4f32.f32( - , - float, - iXLen); - define @intrinsic_vmfeq_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -965,13 +732,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv4f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -992,11 +752,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv8f32.f32( - , - float, - iXLen); - define @intrinsic_vmfeq_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1012,13 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv8f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1039,11 +787,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv1f64.f64( - , - double, - iXLen); - define @intrinsic_vmfeq_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1059,13 +802,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1086,11 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv2f64.f64( - , - double, - iXLen); - define @intrinsic_vmfeq_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1106,13 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv2f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1133,11 +857,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv4f64.f64( - , - double, - iXLen); - define @intrinsic_vmfeq_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1153,13 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv4f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll index 73946dc1a744c..d1d53893ef407 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfge.nxv1bf16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv1bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv2bf16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv2bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv4bf16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv4bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv8bf16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv8bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv16bf16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv16bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv1bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfge_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -306,11 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv2bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfge_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -326,13 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -353,11 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv4bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfge_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -373,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -400,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv8bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfge_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -420,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -447,11 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv16bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfge_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -467,13 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll index 4a9dd2f7d769d..bb2ec4da79f89 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfge.nxv1f16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv2f16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv2f16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv4f16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv4f16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv8f16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv8f16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv16f16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv16f16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv1f32( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv1f32( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv2f32( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv2f32( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv4f32( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv4f32( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv8f32( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv8f32( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv1f64( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv1f64( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv2f64( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv2f64( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv4f64( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv4f64( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv1f16.f16( - , - half, - iXLen); - define @intrinsic_vmfge_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv1f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -663,11 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv2f16.f16( - , - half, - iXLen); - define @intrinsic_vmfge_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -683,13 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv2f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -710,11 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv4f16.f16( - , - half, - iXLen); - define @intrinsic_vmfge_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -730,13 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv4f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -757,11 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv8f16.f16( - , - half, - iXLen); - define @intrinsic_vmfge_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -777,13 +592,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv8f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -804,11 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv16f16.f16( - , - half, - iXLen); - define @intrinsic_vmfge_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -824,13 +627,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv16f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -851,11 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv1f32.f32( - , - float, - iXLen); - define @intrinsic_vmfge_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -871,13 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv1f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -898,11 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv2f32.f32( - , - float, - iXLen); - define @intrinsic_vmfge_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -918,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv2f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -945,11 +717,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv4f32.f32( - , - float, - iXLen); - define @intrinsic_vmfge_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -965,13 +732,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv4f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -992,11 +752,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv8f32.f32( - , - float, - iXLen); - define @intrinsic_vmfge_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1012,13 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv8f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1039,11 +787,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv1f64.f64( - , - double, - iXLen); - define @intrinsic_vmfge_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1059,13 +802,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv1f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1086,11 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv2f64.f64( - , - double, - iXLen); - define @intrinsic_vmfge_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1106,13 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv2f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1133,11 +857,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv4f64.f64( - , - double, - iXLen); - define @intrinsic_vmfge_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1153,13 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv4f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll index fac324ca5c125..384087126bee2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfgt.nxv1bf16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv1bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv2bf16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv2bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv4bf16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv4bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv8bf16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv8bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv16bf16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv16bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv1bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfgt_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -306,11 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv2bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfgt_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -326,13 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -353,11 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv4bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfgt_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -373,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -400,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv8bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfgt_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -420,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -447,11 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv16bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfgt_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -467,13 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll index c9c5e84937cec..ec05587161e2e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfgt.nxv1f16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv2f16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv2f16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv4f16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv4f16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv8f16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv8f16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv16f16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv16f16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv1f32( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv1f32( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv2f32( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv2f32( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv4f32( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv4f32( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv8f32( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv8f32( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv1f64( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv1f64( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv2f64( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv2f64( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv4f64( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv4f64( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv1f16.f16( - , - half, - iXLen); - define @intrinsic_vmfgt_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv1f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -663,11 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv2f16.f16( - , - half, - iXLen); - define @intrinsic_vmfgt_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -683,13 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv2f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -710,11 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv4f16.f16( - , - half, - iXLen); - define @intrinsic_vmfgt_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -730,13 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv4f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -757,11 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv8f16.f16( - , - half, - iXLen); - define @intrinsic_vmfgt_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -777,13 +592,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv8f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -804,11 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv16f16.f16( - , - half, - iXLen); - define @intrinsic_vmfgt_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -824,13 +627,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv16f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -851,11 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv1f32.f32( - , - float, - iXLen); - define @intrinsic_vmfgt_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -871,13 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv1f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -898,11 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv2f32.f32( - , - float, - iXLen); - define @intrinsic_vmfgt_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -918,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv2f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -945,11 +717,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv4f32.f32( - , - float, - iXLen); - define @intrinsic_vmfgt_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -965,13 +732,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv4f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -992,11 +752,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv8f32.f32( - , - float, - iXLen); - define @intrinsic_vmfgt_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1012,13 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv8f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1039,11 +787,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv1f64.f64( - , - double, - iXLen); - define @intrinsic_vmfgt_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1059,13 +802,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv1f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1086,11 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv2f64.f64( - , - double, - iXLen); - define @intrinsic_vmfgt_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1106,13 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv2f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1133,11 +857,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv4f64.f64( - , - double, - iXLen); - define @intrinsic_vmfgt_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1153,13 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv4f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll index 8356b7bbd3ff7..da7daba71b1ae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfle.nxv1bf16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv1bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv2bf16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv2bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv4bf16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv4bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv8bf16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv8bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv16bf16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv16bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv1bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfle_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -306,11 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv2bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfle_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -326,13 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -353,11 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv4bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfle_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -373,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -400,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv8bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfle_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -420,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -447,11 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv16bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfle_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -467,13 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll index 77d8dda258961..95980bd18e271 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfle.nxv1f16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv2f16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv2f16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv4f16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv4f16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv8f16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv8f16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv16f16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv16f16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv1f32( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv1f32( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv2f32( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv2f32( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv4f32( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv4f32( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv8f32( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv8f32( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv1f64( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv1f64( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv2f64( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv2f64( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv4f64( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv4f64( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv1f16.f16( - , - half, - iXLen); - define @intrinsic_vmfle_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv1f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -663,11 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv2f16.f16( - , - half, - iXLen); - define @intrinsic_vmfle_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -683,13 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv2f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -710,11 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv4f16.f16( - , - half, - iXLen); - define @intrinsic_vmfle_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -730,13 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv4f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -757,11 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv8f16.f16( - , - half, - iXLen); - define @intrinsic_vmfle_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -777,13 +592,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv8f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -804,11 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv16f16.f16( - , - half, - iXLen); - define @intrinsic_vmfle_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -824,13 +627,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv16f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -851,11 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv1f32.f32( - , - float, - iXLen); - define @intrinsic_vmfle_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -871,13 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv1f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -898,11 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv2f32.f32( - , - float, - iXLen); - define @intrinsic_vmfle_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -918,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv2f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -945,11 +717,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv4f32.f32( - , - float, - iXLen); - define @intrinsic_vmfle_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -965,13 +732,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv4f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -992,11 +752,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv8f32.f32( - , - float, - iXLen); - define @intrinsic_vmfle_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1012,13 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv8f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1039,11 +787,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv1f64.f64( - , - double, - iXLen); - define @intrinsic_vmfle_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1059,13 +802,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv1f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1086,11 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv2f64.f64( - , - double, - iXLen); - define @intrinsic_vmfle_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1106,13 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv2f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1133,11 +857,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv4f64.f64( - , - double, - iXLen); - define @intrinsic_vmfle_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1153,13 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv4f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll index 2e1bcc5e87bfc..1e992edcfd45a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmflt.nxv1bf16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv1bf16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv2bf16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv2bf16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv4bf16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv4bf16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv8bf16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv8bf16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv16bf16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv16bf16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv1bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmflt_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -306,11 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv2bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmflt_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -326,13 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -353,11 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv4bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmflt_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -373,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -400,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv8bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmflt_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -420,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -447,11 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv16bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmflt_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -467,13 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll index 0fdae8abe8f6b..24532977116af 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmflt.nxv1f16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv2f16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv2f16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv4f16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv4f16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv8f16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv8f16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv16f16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv16f16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv1f32( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv1f32( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv2f32( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv2f32( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv4f32( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv4f32( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv8f32( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv8f32( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv1f64( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv1f64( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv2f64( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv2f64( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv4f64( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv4f64( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv1f16.f16( - , - half, - iXLen); - define @intrinsic_vmflt_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv1f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -663,11 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv2f16.f16( - , - half, - iXLen); - define @intrinsic_vmflt_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -683,13 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv2f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -710,11 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv4f16.f16( - , - half, - iXLen); - define @intrinsic_vmflt_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -730,13 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv4f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -757,11 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv8f16.f16( - , - half, - iXLen); - define @intrinsic_vmflt_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -777,13 +592,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv8f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -804,11 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv16f16.f16( - , - half, - iXLen); - define @intrinsic_vmflt_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -824,13 +627,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv16f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -851,11 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv1f32.f32( - , - float, - iXLen); - define @intrinsic_vmflt_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -871,13 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv1f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -898,11 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv2f32.f32( - , - float, - iXLen); - define @intrinsic_vmflt_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -918,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv2f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -945,11 +717,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv4f32.f32( - , - float, - iXLen); - define @intrinsic_vmflt_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -965,13 +732,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv4f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -992,11 +752,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv8f32.f32( - , - float, - iXLen); - define @intrinsic_vmflt_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1012,13 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv8f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1039,11 +787,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv1f64.f64( - , - double, - iXLen); - define @intrinsic_vmflt_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1059,13 +802,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv1f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1086,11 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv2f64.f64( - , - double, - iXLen); - define @intrinsic_vmflt_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1106,13 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv2f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1133,11 +857,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv4f64.f64( - , - double, - iXLen); - define @intrinsic_vmflt_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1153,13 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv4f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll index 283ffc500fdde..90707b4b57b5e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfne.nxv1bf16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv1bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv2bf16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv2bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv4bf16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv4bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv8bf16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv8bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv16bf16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv16bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv1bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfne_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -306,11 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv2bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfne_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -326,13 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -353,11 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv4bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfne_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -373,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -400,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv8bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfne_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -420,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -447,11 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv16bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfne_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -467,13 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll index 1d0227f793728..aa09ca123bd6b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfne.nxv1f16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv2f16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv2f16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv4f16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv4f16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv8f16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv8f16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv16f16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv16f16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv1f32( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv1f32( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv2f32( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv2f32( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv4f32( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv4f32( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv8f32( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv8f32( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv1f64( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv1f64( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv2f64( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv2f64( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv4f64( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv4f64( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv1f16.f16( - , - half, - iXLen); - define @intrinsic_vmfne_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv1f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -663,11 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv2f16.f16( - , - half, - iXLen); - define @intrinsic_vmfne_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -683,13 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv2f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -710,11 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv4f16.f16( - , - half, - iXLen); - define @intrinsic_vmfne_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -730,13 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv4f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -757,11 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv8f16.f16( - , - half, - iXLen); - define @intrinsic_vmfne_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -777,13 +592,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv8f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -804,11 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv16f16.f16( - , - half, - iXLen); - define @intrinsic_vmfne_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -824,13 +627,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv16f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -851,11 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv1f32.f32( - , - float, - iXLen); - define @intrinsic_vmfne_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -871,13 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv1f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -898,11 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv2f32.f32( - , - float, - iXLen); - define @intrinsic_vmfne_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -918,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv2f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -945,11 +717,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv4f32.f32( - , - float, - iXLen); - define @intrinsic_vmfne_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -965,13 +732,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv4f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -992,11 +752,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv8f32.f32( - , - float, - iXLen); - define @intrinsic_vmfne_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1012,13 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv8f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1039,11 +787,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv1f64.f64( - , - double, - iXLen); - define @intrinsic_vmfne_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1059,13 +802,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv1f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1086,11 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv2f64.f64( - , - double, - iXLen); - define @intrinsic_vmfne_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1106,13 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv2f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1133,11 +857,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv4f64.f64( - , - double, - iXLen); - define @intrinsic_vmfne_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1153,13 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv4f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll index 3922b09f1f02d..961f63cbfbc95 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.smin.nxv8i7(, , , i32) - define @vmin_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vx_nxv8i7: ; CHECK: # %bb.0: @@ -23,8 +21,6 @@ define @vmin_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.smin.nxv1i8(, , , i32) - define @vmin_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv1i8: ; CHECK: # %bb.0: @@ -81,8 +77,6 @@ define @vmin_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smin.nxv2i8(, , , i32) - define @vmin_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv2i8: ; CHECK: # %bb.0: @@ -127,8 +121,6 @@ define @vmin_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smin.nxv3i8(, , , i32) - define @vmin_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv3i8: ; CHECK: # %bb.0: @@ -173,8 +165,6 @@ define @vmin_vx_nxv3i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smin.nxv4i8(, , , i32) - define @vmin_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv4i8: ; CHECK: # %bb.0: @@ -219,8 +209,6 @@ define @vmin_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smin.nxv8i8(, , , i32) - define @vmin_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv8i8: ; CHECK: # %bb.0: @@ -265,8 +253,6 @@ define @vmin_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smin.nxv16i8(, , , i32) - define @vmin_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv16i8: ; CHECK: # %bb.0: @@ -311,8 +297,6 @@ define @vmin_vx_nxv16i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.smin.nxv32i8(, , , i32) - define @vmin_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv32i8: ; CHECK: # %bb.0: @@ -357,8 +341,6 @@ define @vmin_vx_nxv32i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.smin.nxv64i8(, , , i32) - define @vmin_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv64i8: ; CHECK: # %bb.0: @@ -405,8 +387,6 @@ define @vmin_vx_nxv64i8_unmasked( %va, i8 % ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.smin.nxv128i8(, , , i32) - define @vmin_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vx_nxv128i8: ; CHECK: # %bb.0: @@ -459,8 +439,6 @@ define @vmin_vx_nxv128i8_unmasked( %va, i ret %v } -declare @llvm.vp.smin.nxv1i16(, , , i32) - define @vmin_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv1i16: ; CHECK: # %bb.0: @@ -505,8 +483,6 @@ define @vmin_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.smin.nxv2i16(, , , i32) - define @vmin_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv2i16: ; CHECK: # %bb.0: @@ -551,8 +527,6 @@ define @vmin_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.smin.nxv4i16(, , , i32) - define @vmin_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv4i16: ; CHECK: # %bb.0: @@ -597,8 +571,6 @@ define @vmin_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.smin.nxv8i16(, , , i32) - define @vmin_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv8i16: ; CHECK: # %bb.0: @@ -643,8 +615,6 @@ define @vmin_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.smin.nxv16i16(, , , i32) - define @vmin_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv16i16: ; CHECK: # %bb.0: @@ -689,8 +659,6 @@ define @vmin_vx_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.smin.nxv32i16(, , , i32) - define @vmin_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv32i16: ; CHECK: # %bb.0: @@ -735,8 +703,6 @@ define @vmin_vx_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.smin.nxv1i32(, , , i32) - define @vmin_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv1i32: ; CHECK: # %bb.0: @@ -781,8 +747,6 @@ define @vmin_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.smin.nxv2i32(, , , i32) - define @vmin_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv2i32: ; CHECK: # %bb.0: @@ -827,8 +791,6 @@ define @vmin_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.smin.nxv4i32(, , , i32) - define @vmin_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv4i32: ; CHECK: # %bb.0: @@ -873,8 +835,6 @@ define @vmin_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.smin.nxv8i32(, , , i32) - define @vmin_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv8i32: ; CHECK: # %bb.0: @@ -919,8 +879,6 @@ define @vmin_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.smin.nxv16i32(, , , i32) - define @vmin_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv16i32: ; CHECK: # %bb.0: @@ -967,8 +925,6 @@ define @vmin_vx_nxv16i32_unmasked( %va, i ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.smin.nxv32i32(, , , i32) - define @vmin_vx_nxv32i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vx_nxv32i32: ; CHECK: # %bb.0: @@ -1024,8 +980,6 @@ define @vmin_vx_nxv32i32_unmasked( %va, i ; Test splitting when the %evl is a constant (albeit an unknown one). -declare i32 @llvm.vscale.i32() - define @vmin_vx_nxv32i32_evl_nx8( %va, i32 %b, %m) { ; RV32-LABEL: vmin_vx_nxv32i32_evl_nx8: ; RV32: # %bb.0: @@ -1091,8 +1045,6 @@ define @vmin_vx_nxv32i32_evl_nx16( %va, i ret %v } -declare @llvm.vp.smin.nxv1i64(, , , i32) - define @vmin_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1165,8 +1117,6 @@ define @vmin_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.smin.nxv2i64(, , , i32) - define @vmin_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1239,8 +1189,6 @@ define @vmin_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.smin.nxv4i64(, , , i32) - define @vmin_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1313,8 +1261,6 @@ define @vmin_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.smin.nxv8i64(, , , i32) - define @vmin_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin.ll b/llvm/test/CodeGen/RISCV/rvv/vmin.ll index 17ad2442bb695..edd643f08ee43 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmin.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmin_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmin_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmin_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmin_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll index 59af953fd52d3..631799d24e14c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.umin.nxv8i7(, , , i32) - define @vminu_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vx_nxv8i7: ; CHECK: # %bb.0: @@ -22,8 +20,6 @@ define @vminu_vx_nxv8i7( %a, i7 signext %b, < ret %v } -declare @llvm.vp.umin.nxv1i8(, , , i32) - define @vminu_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv1i8: ; CHECK: # %bb.0: @@ -80,8 +76,6 @@ define @vminu_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umin.nxv2i8(, , , i32) - define @vminu_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv2i8: ; CHECK: # %bb.0: @@ -126,8 +120,6 @@ define @vminu_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umin.nxv3i8(, , , i32) - define @vminu_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv3i8: ; CHECK: # %bb.0: @@ -172,8 +164,6 @@ define @vminu_vx_nxv3i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umin.nxv4i8(, , , i32) - define @vminu_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv4i8: ; CHECK: # %bb.0: @@ -218,8 +208,6 @@ define @vminu_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umin.nxv8i8(, , , i32) - define @vminu_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv8i8: ; CHECK: # %bb.0: @@ -264,8 +252,6 @@ define @vminu_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umin.nxv16i8(, , , i32) - define @vminu_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv16i8: ; CHECK: # %bb.0: @@ -310,8 +296,6 @@ define @vminu_vx_nxv16i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.umin.nxv32i8(, , , i32) - define @vminu_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv32i8: ; CHECK: # %bb.0: @@ -356,8 +340,6 @@ define @vminu_vx_nxv32i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.umin.nxv64i8(, , , i32) - define @vminu_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv64i8: ; CHECK: # %bb.0: @@ -404,8 +386,6 @@ define @vminu_vx_nxv64i8_unmasked( %va, i8 ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.umin.nxv128i8(, , , i32) - define @vminu_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vx_nxv128i8: ; CHECK: # %bb.0: @@ -458,8 +438,6 @@ define @vminu_vx_nxv128i8_unmasked( %va, ret %v } -declare @llvm.vp.umin.nxv1i16(, , , i32) - define @vminu_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv1i16: ; CHECK: # %bb.0: @@ -504,8 +482,6 @@ define @vminu_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.umin.nxv2i16(, , , i32) - define @vminu_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv2i16: ; CHECK: # %bb.0: @@ -550,8 +526,6 @@ define @vminu_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.umin.nxv4i16(, , , i32) - define @vminu_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv4i16: ; CHECK: # %bb.0: @@ -596,8 +570,6 @@ define @vminu_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.umin.nxv8i16(, , , i32) - define @vminu_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv8i16: ; CHECK: # %bb.0: @@ -642,8 +614,6 @@ define @vminu_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.umin.nxv16i16(, , , i32) - define @vminu_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv16i16: ; CHECK: # %bb.0: @@ -688,8 +658,6 @@ define @vminu_vx_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.umin.nxv32i16(, , , i32) - define @vminu_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv32i16: ; CHECK: # %bb.0: @@ -734,8 +702,6 @@ define @vminu_vx_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.umin.nxv1i32(, , , i32) - define @vminu_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv1i32: ; CHECK: # %bb.0: @@ -780,8 +746,6 @@ define @vminu_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.umin.nxv2i32(, , , i32) - define @vminu_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv2i32: ; CHECK: # %bb.0: @@ -826,8 +790,6 @@ define @vminu_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.umin.nxv4i32(, , , i32) - define @vminu_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv4i32: ; CHECK: # %bb.0: @@ -872,8 +834,6 @@ define @vminu_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.umin.nxv8i32(, , , i32) - define @vminu_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv8i32: ; CHECK: # %bb.0: @@ -918,8 +878,6 @@ define @vminu_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.umin.nxv16i32(, , , i32) - define @vminu_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv16i32: ; CHECK: # %bb.0: @@ -966,8 +924,6 @@ define @vminu_vx_nxv16i32_unmasked( %va, ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.umin.nxv32i32(, , , i32) - define @vminu_vx_nxv32i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vx_nxv32i32: ; CHECK: # %bb.0: @@ -1023,8 +979,6 @@ define @vminu_vx_nxv32i32_unmasked( %va, ; Test splitting when the %evl is a constant (albeit an unknown one). -declare i32 @llvm.vscale.i32() - define @vminu_vx_nxv32i32_evl_nx8( %va, i32 %b, %m) { ; RV32-LABEL: vminu_vx_nxv32i32_evl_nx8: ; RV32: # %bb.0: @@ -1090,8 +1044,6 @@ define @vminu_vx_nxv32i32_evl_nx16( %va, ret %v } -declare @llvm.vp.umin.nxv1i64(, , , i32) - define @vminu_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1164,8 +1116,6 @@ define @vminu_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.umin.nxv2i64(, , , i32) - define @vminu_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1238,8 +1188,6 @@ define @vminu_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.umin.nxv4i64(, , , i32) - define @vminu_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1312,8 +1260,6 @@ define @vminu_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.umin.nxv8i64(, , , i32) - define @vminu_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu.ll b/llvm/test/CodeGen/RISCV/rvv/vminu.ll index ba86de4adb0bb..251f833d75faa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vminu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vminu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vminu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vminu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vminu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vminu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmnand.ll b/llvm/test/CodeGen/RISCV/rvv/vmnand.ll index 3406aebc4f8a8..4545f7009413b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmnand.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmnand.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmnand.nxv1i1( - , - , - iXLen); - define @intrinsic_vmnand_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmnand.nxv2i1( - , - , - iXLen); - define @intrinsic_vmnand_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmnand.nxv4i1( - , - , - iXLen); - define @intrinsic_vmnand_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmnand.nxv8i1( - , - , - iXLen); - define @intrinsic_vmnand_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmnand.nxv16i1( - , - , - iXLen); - define @intrinsic_vmnand_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmnand.nxv32i1( - , - , - iXLen); - define @intrinsic_vmnand_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmnand.nxv64i1( - , - , - iXLen); - define @intrinsic_vmnand_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmnor.ll b/llvm/test/CodeGen/RISCV/rvv/vmnor.ll index afd85767004df..1d19c7398220f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmnor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmnor.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmnor.nxv1i1( - , - , - iXLen); - define @intrinsic_vmnor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmnor.nxv2i1( - , - , - iXLen); - define @intrinsic_vmnor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmnor.nxv4i1( - , - , - iXLen); - define @intrinsic_vmnor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmnor.nxv8i1( - , - , - iXLen); - define @intrinsic_vmnor_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmnor.nxv16i1( - , - , - iXLen); - define @intrinsic_vmnor_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmnor.nxv32i1( - , - , - iXLen); - define @intrinsic_vmnor_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmnor.nxv64i1( - , - , - iXLen); - define @intrinsic_vmnor_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmor.ll b/llvm/test/CodeGen/RISCV/rvv/vmor.ll index bfd873186e83f..d42c46f785961 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmor.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmor.nxv1i1( - , - , - iXLen); - define @intrinsic_vmor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmor.nxv2i1( - , - , - iXLen); - define @intrinsic_vmor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmor.nxv4i1( - , - , - iXLen); - define @intrinsic_vmor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmor.nxv8i1( - , - , - iXLen); - define @intrinsic_vmor_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmor.nxv16i1( - , - , - iXLen); - define @intrinsic_vmor_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmor.nxv32i1( - , - , - iXLen); - define @intrinsic_vmor_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmor.nxv64i1( - , - , - iXLen); - define @intrinsic_vmor_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmorn.ll b/llvm/test/CodeGen/RISCV/rvv/vmorn.ll index ebc5c3a23c35a..8018cfeab9239 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmorn.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmorn.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmorn.nxv1i1( - , - , - iXLen); - define @intrinsic_vmorn_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmorn.nxv2i1( - , - , - iXLen); - define @intrinsic_vmorn_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmorn.nxv4i1( - , - , - iXLen); - define @intrinsic_vmorn_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmorn.nxv8i1( - , - , - iXLen); - define @intrinsic_vmorn_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmorn.nxv16i1( - , - , - iXLen); - define @intrinsic_vmorn_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmorn.nxv32i1( - , - , - iXLen); - define @intrinsic_vmorn_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmorn.nxv64i1( - , - , - iXLen); - define @intrinsic_vmorn_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in.ll index 9ce7d68ba4012..b052d6725822b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -73,12 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -119,12 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -165,12 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -188,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -211,12 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -234,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -257,12 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -280,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -303,12 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -326,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -349,12 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -372,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -395,12 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -441,12 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -464,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -487,12 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -510,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -533,12 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -556,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -579,12 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -602,12 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -625,12 +463,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -648,12 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -671,12 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -694,12 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -717,12 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -740,12 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -763,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -786,12 +582,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -809,12 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -832,12 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -855,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -878,12 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -901,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -924,12 +684,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64( - , - i64, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -960,12 +714,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64( - , - i64, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -996,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64( - , - i64, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1032,12 +774,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64( - , - i64, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.ll index 8c870a9332646..465639d366a93 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsbc.nxv1i8.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i8.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i8.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i8.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i8.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv32i8.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv64i8.nxv64i8( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -144,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i16.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -164,11 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i16.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -184,11 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i16.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -204,11 +154,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i16.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -224,11 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i16.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -244,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv32i16.nxv32i16( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -264,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i32.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -284,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i32.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -304,11 +229,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i32.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -324,11 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i32.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -344,11 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i32.nxv16i32( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -364,11 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i64.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -384,11 +289,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i64.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -404,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i64.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -424,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i64.nxv8i64( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -444,11 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -464,11 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -484,11 +364,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -504,11 +379,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -524,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -544,11 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -564,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv64i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -584,11 +439,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -604,11 +454,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -624,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -644,11 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -664,11 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -684,11 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv32i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -704,11 +529,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -724,11 +544,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -744,11 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -764,11 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -784,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -804,11 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -836,11 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -868,11 +658,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -900,11 +685,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll index 80e74faa8cd91..1f6c8ef61416c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll @@ -4,10 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmsbf.nxv1i1( - , - iXLen); - define @intrinsic_vmsbf_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -22,12 +18,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv1i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -46,10 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.nxv2i1( - , - iXLen); - define @intrinsic_vmsbf_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -64,12 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv2i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -88,10 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.nxv4i1( - , - iXLen); - define @intrinsic_vmsbf_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -106,12 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv4i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -130,10 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.nxv8i1( - , - iXLen); - define @intrinsic_vmsbf_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -148,12 +114,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv8i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -172,10 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.nxv16i1( - , - iXLen); - define @intrinsic_vmsbf_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -190,12 +146,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv16i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -214,10 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.nxv32i1( - , - iXLen); - define @intrinsic_vmsbf_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -232,12 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv32i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -256,10 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.nxv64i1( - , - iXLen); - define @intrinsic_vmsbf_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -274,12 +210,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv64i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll index 6407f39a65e8b..c87010b144696 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmseq.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmseq.nxv1i8( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv2i8( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv4i8( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv8i8( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv16i8( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv32i8( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv1i16( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv2i16( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv4i16( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv8i16( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv16i16( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv1i32( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv2i32( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv4i32( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv8i32( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv1i64( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv2i64( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv4i64( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmseq_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -942,13 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -969,11 +741,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmseq_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -989,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1016,11 +776,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmseq_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1036,13 +791,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1063,11 +811,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmseq_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1083,13 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1110,11 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmseq_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1130,13 +861,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1157,11 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmseq_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1177,13 +896,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,11 +916,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmseq_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1224,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmseq_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1298,11 +986,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmseq_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1021,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmseq_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1365,13 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,11 +1056,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmseq_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1412,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1439,11 +1091,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmseq_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1459,13 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1486,11 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmseq_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1141,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1533,11 +1161,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmseq_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1553,13 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1580,11 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmseq_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1627,11 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmseq_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmseq_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1659,13 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1701,11 +1293,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmseq_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmseq_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1733,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1775,11 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmseq_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmseq_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1807,13 +1382,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmset.ll b/llvm/test/CodeGen/RISCV/rvv/vmset.ll index 0c63d7a852143..e2853310e213c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmset.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmset.ll @@ -4,9 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmset.nxv1i1( - iXLen); - define @intrinsic_vmset_m_pseudo_nxv1i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -20,9 +17,6 @@ entry: ret %a } -declare @llvm.riscv.vmset.nxv2i1( - iXLen); - define @intrinsic_vmset_m_pseudo_nxv2i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -36,9 +30,6 @@ entry: ret %a } -declare @llvm.riscv.vmset.nxv4i1( - iXLen); - define @intrinsic_vmset_m_pseudo_nxv4i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -52,9 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmset.nxv8i1( - iXLen); - define @intrinsic_vmset_m_pseudo_nxv8i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -68,9 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vmset.nxv16i1( - iXLen); - define @intrinsic_vmset_m_pseudo_nxv16i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -84,9 +69,6 @@ entry: ret %a } -declare @llvm.riscv.vmset.nxv32i1( - iXLen); - define @intrinsic_vmset_m_pseudo_nxv32i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -100,9 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmset.nxv64i1( - iXLen); - define @intrinsic_vmset_m_pseudo_nxv64i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll index 45e3840f7e673..21a4143f323ee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsge.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsge_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -943,13 +722,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -970,11 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsge_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +758,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1018,11 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsge_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1039,13 +794,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1104,12 +852,6 @@ entry: ret %a } - -declare @llvm.riscv.vmsge.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsge_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1126,13 +868,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1153,11 +888,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsge_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1174,13 +904,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1201,11 +924,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsge_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1222,13 +940,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1249,11 +960,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsge_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1270,13 +976,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1297,11 +996,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsge_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1012,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1032,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsge_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1366,13 +1048,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,11 +1068,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsge_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1414,13 +1084,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1441,11 +1104,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsge_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1462,13 +1120,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1489,11 +1140,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsge_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1510,13 +1156,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1537,11 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsge_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1558,13 +1192,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1585,11 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsge_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1606,13 +1228,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1633,11 +1248,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsge_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1654,13 +1264,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1681,11 +1284,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsge_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsge_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1714,13 +1312,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1756,11 +1347,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsge_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsge_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1789,13 +1375,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1831,11 +1410,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsge_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsge_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1864,13 +1438,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll index d3f57d58c7ab7..4795e86983089 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsgeu.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgeu_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -943,13 +722,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -970,11 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgeu_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +758,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1018,11 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgeu_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1039,13 +794,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1066,11 +814,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgeu_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1087,13 +830,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1114,11 +850,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgeu_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1135,13 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1162,11 +886,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgeu_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,13 +902,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1210,11 +922,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgeu_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1231,13 +938,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1258,11 +958,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgeu_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1279,13 +974,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1306,11 +994,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgeu_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1327,13 +1010,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1354,11 +1030,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgeu_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1375,13 +1046,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1402,11 +1066,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgeu_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1423,13 +1082,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1450,11 +1102,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgeu_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1471,13 +1118,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1498,11 +1138,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgeu_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1519,13 +1154,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1546,11 +1174,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgeu_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1567,13 +1190,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1594,11 +1210,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgeu_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1615,13 +1226,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1642,11 +1246,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgeu_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1675,13 +1274,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1717,11 +1309,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgeu_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1750,13 +1337,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1792,11 +1372,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgeu_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1825,13 +1400,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll index 62ac44bfdf38c..f7b5cad0c8ed9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsgt.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgt_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -942,13 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -969,11 +741,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgt_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -989,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1016,11 +776,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgt_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1036,13 +791,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1063,11 +811,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgt_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1083,13 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1110,11 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgt_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1130,13 +861,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1157,11 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgt_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1177,13 +896,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,11 +916,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgt_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1224,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgt_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1298,11 +986,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgt_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1021,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgt_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1365,13 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,11 +1056,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgt_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1412,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1439,11 +1091,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgt_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1459,13 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1486,11 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgt_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1141,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1533,11 +1161,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgt_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1553,13 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1580,11 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgt_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1627,11 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgt_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgt_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1659,13 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1701,11 +1293,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgt_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgt_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1733,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1775,11 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgt_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgt_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1807,13 +1382,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll index d57b9cd5bae53..2f79b1ca2b93e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsgtu.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgtu_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -942,13 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -969,11 +741,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgtu_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -989,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1016,11 +776,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgtu_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1036,13 +791,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1063,11 +811,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgtu_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1083,13 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1110,11 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgtu_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1130,13 +861,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1157,11 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgtu_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1177,13 +896,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,11 +916,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgtu_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1224,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgtu_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1298,11 +986,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgtu_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1021,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgtu_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1365,13 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,11 +1056,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgtu_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1412,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1439,11 +1091,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgtu_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1459,13 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1486,11 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgtu_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1141,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1533,11 +1161,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgtu_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1553,13 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1580,11 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgtu_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1627,11 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgtu_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1659,13 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1701,11 +1293,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgtu_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1733,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1775,11 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgtu_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1807,13 +1382,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll index 9c70dcab1efde..00a3673b0f415 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll @@ -4,10 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmsif.nxv1i1( - , - iXLen); - define @intrinsic_vmsif_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -22,12 +18,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.mask.nxv1i1( - , - , - , - iXLen); - define @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -46,10 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.nxv2i1( - , - iXLen); - define @intrinsic_vmsif_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -64,12 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.mask.nxv2i1( - , - , - , - iXLen); - define @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -88,10 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.nxv4i1( - , - iXLen); - define @intrinsic_vmsif_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -106,12 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.mask.nxv4i1( - , - , - , - iXLen); - define @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -130,10 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.nxv8i1( - , - iXLen); - define @intrinsic_vmsif_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -148,12 +114,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.mask.nxv8i1( - , - , - , - iXLen); - define @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -172,10 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.nxv16i1( - , - iXLen); - define @intrinsic_vmsif_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -190,12 +146,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.mask.nxv16i1( - , - , - , - iXLen); - define @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -214,10 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.nxv32i1( - , - iXLen); - define @intrinsic_vmsif_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -232,12 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.mask.nxv32i1( - , - , - , - iXLen); - define @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -256,10 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.nxv64i1( - , - iXLen); - define @intrinsic_vmsif_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -274,12 +210,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.mask.nxv64i1( - , - , - , - iXLen); - define @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll index 9653dfd2518d8..ec60f75bb206d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsle.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsle.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsle_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -942,13 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -969,11 +741,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsle_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -989,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1016,11 +776,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsle_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1036,13 +791,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1063,11 +811,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsle_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1083,13 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1110,11 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsle_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1130,13 +861,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1157,11 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsle_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1177,13 +896,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,11 +916,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsle_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1224,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsle_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1298,11 +986,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsle_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1021,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsle_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1365,13 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,11 +1056,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsle_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1412,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1439,11 +1091,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsle_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1459,13 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1486,11 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsle_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1141,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1533,11 +1161,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsle_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1553,13 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1580,11 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsle_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1627,11 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsle_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsle_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1659,13 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1701,11 +1293,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsle_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsle_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1733,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1775,11 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsle_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsle_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1807,13 +1382,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll index 25ecfa65c7c48..1c57fc9002857 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsleu.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsleu_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -942,13 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -969,11 +741,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsleu_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -989,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1016,11 +776,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsleu_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1036,13 +791,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1063,11 +811,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsleu_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1083,13 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1110,11 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsleu_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1130,13 +861,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1157,11 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsleu_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1177,13 +896,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,11 +916,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsleu_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1224,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsleu_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1298,11 +986,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsleu_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1021,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsleu_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1365,13 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,11 +1056,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsleu_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1412,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1439,11 +1091,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsleu_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1459,13 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1486,11 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsleu_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1141,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1533,11 +1161,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsleu_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1553,13 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1580,11 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsleu_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1627,11 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsleu_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1659,13 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1701,11 +1293,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsleu_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1733,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1775,11 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsleu_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1807,13 +1382,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll index c17495e3b2119..e528d07dac51f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmslt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmslt.nxv1i8( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv2i8( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv4i8( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv8i8( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv16i8( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv32i8( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv1i16( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv2i16( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv4i16( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv8i16( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv16i16( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv1i32( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv2i32( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv4i32( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv8i32( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv1i64( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv2i64( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv4i64( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmslt_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -942,13 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -969,11 +741,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmslt_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -989,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1016,11 +776,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmslt_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1036,13 +791,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1063,11 +811,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmslt_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1083,13 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1110,11 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmslt_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1130,13 +861,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1157,11 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmslt_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1177,13 +896,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,11 +916,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmslt_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1224,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmslt_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1298,11 +986,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmslt_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1021,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmslt_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1365,13 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,11 +1056,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmslt_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1412,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1439,11 +1091,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmslt_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1459,13 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1486,11 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmslt_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1141,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1533,11 +1161,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmslt_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1553,13 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1580,11 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmslt_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1627,11 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmslt_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmslt_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1659,13 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1701,11 +1293,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmslt_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmslt_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1733,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1775,11 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmslt_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmslt_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1807,13 +1382,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll index a37a02848365d..8a909d0c03715 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsltu.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsltu_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -942,13 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -969,11 +741,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsltu_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -989,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1016,11 +776,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsltu_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1036,13 +791,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1063,11 +811,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsltu_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1083,13 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1110,11 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsltu_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1130,13 +861,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1157,11 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsltu_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1177,13 +896,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,11 +916,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsltu_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1224,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsltu_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1298,11 +986,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsltu_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1021,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsltu_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1365,13 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,11 +1056,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsltu_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1412,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1439,11 +1091,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsltu_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1459,13 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1486,11 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsltu_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1141,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1533,11 +1161,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsltu_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1553,13 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1580,11 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsltu_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1627,11 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsltu_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1659,13 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1701,11 +1293,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsltu_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1733,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1775,11 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsltu_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1807,13 +1382,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll index ed41a18dcc8d3..7e27d98cf9161 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsne.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsne.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsne_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -942,13 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -969,11 +741,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsne_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -989,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1016,11 +776,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsne_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1036,13 +791,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1063,11 +811,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsne_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1083,13 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1110,11 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsne_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1130,13 +861,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1157,11 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsne_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1177,13 +896,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,11 +916,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsne_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1224,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsne_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1298,11 +986,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsne_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1021,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsne_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1365,13 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,11 +1056,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsne_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1412,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1439,11 +1091,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsne_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1459,13 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1486,11 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsne_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1141,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1533,11 +1161,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsne_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1553,13 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1580,11 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsne_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1627,11 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsne_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsne_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1659,13 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1701,11 +1293,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsne_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsne_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1733,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1775,11 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsne_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsne_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1807,13 +1382,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll index 4b818a2b1e58f..4620ac5cc5c14 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll @@ -4,10 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmsof.nxv1i1( - , - iXLen); - define @intrinsic_vmsof_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -22,12 +18,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.mask.nxv1i1( - , - , - , - iXLen); - define @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -46,10 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.nxv2i1( - , - iXLen); - define @intrinsic_vmsof_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -64,12 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.mask.nxv2i1( - , - , - , - iXLen); - define @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -88,10 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.nxv4i1( - , - iXLen); - define @intrinsic_vmsof_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -106,12 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.mask.nxv4i1( - , - , - , - iXLen); - define @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -130,10 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.nxv8i1( - , - iXLen); - define @intrinsic_vmsof_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -148,12 +114,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.mask.nxv8i1( - , - , - , - iXLen); - define @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -172,10 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.nxv16i1( - , - iXLen); - define @intrinsic_vmsof_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -190,12 +146,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.mask.nxv16i1( - , - , - , - iXLen); - define @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -214,10 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.nxv32i1( - , - iXLen); - define @intrinsic_vmsof_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -232,12 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.mask.nxv32i1( - , - , - , - iXLen); - define @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -256,10 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.nxv64i1( - , - iXLen); - define @intrinsic_vmsof_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -274,12 +210,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.mask.nxv64i1( - , - , - , - iXLen); - define @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-vp-mask.ll index 419b55124a501..517808ee98d60 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmul-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-vp-mask.ll @@ -4,9 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK - -declare @llvm.vp.mul.nxv2i1(, , , i32) - define @vmul_vv_nxv2i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i1: ; CHECK: # %bb.0: @@ -17,8 +14,6 @@ define @vmul_vv_nxv2i1( %va, %v } -declare @llvm.vp.mul.nxv4i1(, , , i32) - define @vmul_vv_nxv4i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i1: ; CHECK: # %bb.0: @@ -29,8 +24,6 @@ define @vmul_vv_nxv4i1( %va, %v } -declare @llvm.vp.mul.nxv8i1(, , , i32) - define @vmul_vv_nxv8i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i1: ; CHECK: # %bb.0: @@ -41,8 +34,6 @@ define @vmul_vv_nxv8i1( %va, %v } -declare @llvm.vp.mul.nxv16i1(, , , i32) - define @vmul_vv_nxv16i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv16i1: ; CHECK: # %bb.0: @@ -53,8 +44,6 @@ define @vmul_vv_nxv16i1( %va, %v } -declare @llvm.vp.mul.nxv32i1(, , , i32) - define @vmul_vv_nxv32i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv32i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll index 14a236e071551..8d690bc71a2ef 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.mul.nxv8i7(, , , i32) - define @vmul_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv8i7: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define @vmul_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.mul.nxv1i8(, , , i32) - define @vmul_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv1i8: ; CHECK: # %bb.0: @@ -64,8 +60,6 @@ define @vmul_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.mul.nxv2i8(, , , i32) - define @vmul_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i8: ; CHECK: # %bb.0: @@ -110,8 +104,6 @@ define @vmul_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.mul.nxv4i8(, , , i32) - define @vmul_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i8: ; CHECK: # %bb.0: @@ -156,8 +148,6 @@ define @vmul_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.mul.nxv8i8(, , , i32) - define @vmul_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i8: ; CHECK: # %bb.0: @@ -202,8 +192,6 @@ define @vmul_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.mul.nxv16i8(, , , i32) - define @vmul_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv16i8: ; CHECK: # %bb.0: @@ -248,8 +236,6 @@ define @vmul_vx_nxv16i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.mul.nxv32i8(, , , i32) - define @vmul_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv32i8: ; CHECK: # %bb.0: @@ -294,8 +280,6 @@ define @vmul_vx_nxv32i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.mul.nxv64i8(, , , i32) - define @vmul_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv64i8: ; CHECK: # %bb.0: @@ -340,8 +324,6 @@ define @vmul_vx_nxv64i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.mul.nxv1i16(, , , i32) - define @vmul_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv1i16: ; CHECK: # %bb.0: @@ -386,8 +368,6 @@ define @vmul_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.mul.nxv2i16(, , , i32) - define @vmul_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i16: ; CHECK: # %bb.0: @@ -432,8 +412,6 @@ define @vmul_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.mul.nxv4i16(, , , i32) - define @vmul_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i16: ; CHECK: # %bb.0: @@ -478,8 +456,6 @@ define @vmul_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.mul.nxv8i16(, , , i32) - define @vmul_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i16: ; CHECK: # %bb.0: @@ -524,8 +500,6 @@ define @vmul_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.mul.nxv16i16(, , , i32) - define @vmul_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv16i16: ; CHECK: # %bb.0: @@ -570,8 +544,6 @@ define @vmul_vx_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.mul.nxv32i16(, , , i32) - define @vmul_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv32i16: ; CHECK: # %bb.0: @@ -616,8 +588,6 @@ define @vmul_vx_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.mul.nxv1i32(, , , i32) - define @vmul_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv1i32: ; CHECK: # %bb.0: @@ -662,8 +632,6 @@ define @vmul_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.mul.nxv2i32(, , , i32) - define @vmul_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i32: ; CHECK: # %bb.0: @@ -708,8 +676,6 @@ define @vmul_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.mul.nxv4i32(, , , i32) - define @vmul_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i32: ; CHECK: # %bb.0: @@ -754,8 +720,6 @@ define @vmul_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.mul.nxv7i32(, , , i32) - define @vmul_vv_nxv7i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv7i32: ; CHECK: # %bb.0: @@ -800,8 +764,6 @@ define @vmul_vx_nxv7i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.mul.nxv8i32(, , , i32) - define @vmul_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i32: ; CHECK: # %bb.0: @@ -846,8 +808,6 @@ define @vmul_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.mul.nxv16i32(, , , i32) - define @vmul_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv16i32: ; CHECK: # %bb.0: @@ -904,8 +864,6 @@ define @vmul_vx_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.mul.nxv1i64(, , , i32) - define @vmul_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv1i64: ; CHECK: # %bb.0: @@ -978,8 +936,6 @@ define @vmul_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.mul.nxv2i64(, , , i32) - define @vmul_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1052,8 +1008,6 @@ define @vmul_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.mul.nxv4i64(, , , i32) - define @vmul_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1126,8 +1080,6 @@ define @vmul_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.mul.nxv8i64(, , , i32) - define @vmul_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i64: ; CHECK: # %bb.0: @@ -1350,8 +1302,6 @@ define @vmul_vx_negpow2_nxv8i64_unmasked( % ret %v } -declare @llvm.vp.shl.nxv8i64(, , , i32) - define @vmul_vshl_vx_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vshl_vx_nxv8i64: ; CHECK: # %bb.0: @@ -1416,8 +1366,6 @@ define @vmul_vshl_vv_nxv8i64_unmasked( %va, ret %v } -declare @llvm.vp.add.nxv8i64(, , , i32) - define @vmul_vadd_vx_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vadd_vx_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul.ll b/llvm/test/CodeGen/RISCV/rvv/vmul.ll index 913232f7aedfc..90b44c6e6800c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul.ll @@ -8,12 +8,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64d \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmul.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -30,13 +24,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -76,13 +57,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -122,13 +90,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -146,12 +107,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,13 +156,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -238,12 +173,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -260,13 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -284,12 +206,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -306,13 +222,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -331,12 +240,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -353,13 +256,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -377,12 +273,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -399,13 +289,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -423,12 +306,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -445,13 +322,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -469,12 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -491,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -515,12 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -537,13 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -561,12 +405,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -583,13 +421,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -608,12 +439,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -630,13 +455,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -654,12 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -676,13 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -700,12 +505,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -722,13 +521,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -746,12 +538,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -768,13 +554,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -792,12 +571,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -814,13 +587,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -839,12 +605,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -861,13 +621,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -885,12 +638,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -907,13 +654,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -931,12 +671,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -953,13 +687,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -977,12 +704,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -999,13 +720,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1024,12 +738,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmul_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1046,13 +754,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1070,12 +771,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmul_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1092,13 +787,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1116,12 +804,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmul_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1138,13 +820,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1162,12 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmul_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1184,13 +853,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1208,12 +870,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmul_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,13 +886,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1254,12 +903,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmul_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1276,13 +919,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1300,12 +936,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmul_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1322,13 +952,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,12 +969,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmul_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1368,13 +985,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,12 +1002,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmul_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1414,13 +1018,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1438,12 +1035,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmul_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1460,13 +1051,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1484,12 +1068,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmul_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1084,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1530,12 +1101,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmul_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1552,13 +1117,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1576,12 +1134,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmul_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1598,13 +1150,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1622,12 +1167,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmul_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1644,13 +1183,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1668,12 +1200,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmul_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1690,13 +1216,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1714,12 +1233,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmul_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1736,13 +1249,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1760,12 +1266,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmul_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1782,13 +1282,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1806,12 +1299,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmul_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1828,13 +1315,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1852,12 +1332,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmul_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmul_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1886,13 +1360,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,12 +1389,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmul_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmul_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1956,13 +1417,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1992,12 +1446,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmul_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmul_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2026,13 +1474,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2062,12 +1503,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmul_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmul_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2096,13 +1531,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh.ll index 12d83ba58898f..bd2eac51207c1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulh.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh.ll @@ -10,12 +10,6 @@ ; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vmulh -declare @llvm.riscv.vmulh.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -32,13 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -56,12 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -102,12 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -124,13 +92,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -170,13 +125,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -194,12 +142,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -216,13 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -240,12 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,13 +224,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -333,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -355,13 +258,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -379,12 +275,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -401,13 +291,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -425,12 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -447,13 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -471,12 +341,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -493,13 +357,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -517,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -539,13 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -563,12 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -585,13 +423,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -610,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -632,13 +457,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -656,12 +474,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -678,13 +490,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -702,12 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -724,13 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -748,12 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -770,13 +556,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -794,12 +573,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -816,13 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -841,12 +607,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -863,13 +623,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -887,12 +640,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -909,13 +656,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -933,12 +673,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -955,13 +689,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -979,12 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1001,13 +722,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1026,12 +740,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1048,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1072,12 +773,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1094,13 +789,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1118,12 +806,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1140,13 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1164,12 +839,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1186,13 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1210,12 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1232,13 +888,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1256,12 +905,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1278,13 +921,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1302,12 +938,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,13 +954,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1348,12 +971,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1370,13 +987,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1394,12 +1004,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1416,13 +1020,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,12 +1037,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1462,13 +1053,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1486,12 +1070,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1508,13 +1086,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1532,12 +1103,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1554,13 +1119,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1578,12 +1136,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1152,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1624,12 +1169,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1646,13 +1185,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1670,12 +1202,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1692,13 +1218,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1716,12 +1235,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1738,13 +1251,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1762,12 +1268,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1784,13 +1284,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1808,12 +1301,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1830,13 +1317,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1854,12 +1334,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1888,13 +1362,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1924,12 +1391,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1958,13 +1419,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1994,12 +1448,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2028,13 +1476,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,12 +1505,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2098,13 +1533,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll index 5a785d8a678b1..90ec0a6766e24 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll @@ -10,12 +10,6 @@ ; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vmulhsu -declare @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -32,13 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -56,12 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -102,12 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -124,13 +92,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -170,13 +125,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -194,12 +142,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -216,13 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -240,12 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,13 +224,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -333,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -355,13 +258,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -379,12 +275,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -401,13 +291,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -425,12 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -447,13 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -471,12 +341,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -493,13 +357,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -517,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -539,13 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -563,12 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -585,13 +423,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -610,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -632,13 +457,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -656,12 +474,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -678,13 +490,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -702,12 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -724,13 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -748,12 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -770,13 +556,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -794,12 +573,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -816,13 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -841,12 +607,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -863,13 +623,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -887,12 +640,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -909,13 +656,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -933,12 +673,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -955,13 +689,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -979,12 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1001,13 +722,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1026,12 +740,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1048,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1072,12 +773,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1094,13 +789,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1118,12 +806,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1140,13 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1164,12 +839,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1186,13 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1210,12 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1232,13 +888,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1256,12 +905,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1278,13 +921,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1302,12 +938,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,13 +954,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1348,12 +971,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1370,13 +987,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1394,12 +1004,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1416,13 +1020,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,12 +1037,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1462,13 +1053,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1486,12 +1070,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1508,13 +1086,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1532,12 +1103,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1554,13 +1119,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1578,12 +1136,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1152,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1624,12 +1169,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1646,13 +1185,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1670,12 +1202,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1692,13 +1218,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1716,12 +1235,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1738,13 +1251,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1762,12 +1268,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1784,13 +1284,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1808,12 +1301,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1830,13 +1317,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1854,12 +1334,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1888,13 +1362,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1924,12 +1391,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1958,13 +1419,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1994,12 +1448,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2028,13 +1476,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,12 +1505,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2098,13 +1533,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll index 24b47da8d0b4b..8c0d7ffb5084c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll @@ -10,12 +10,6 @@ ; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vmulhu -declare @llvm.riscv.vmulhu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -32,13 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -56,12 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -102,12 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -124,13 +92,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -170,13 +125,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -194,12 +142,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -216,13 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -240,12 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,13 +224,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -333,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -355,13 +258,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -379,12 +275,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -401,13 +291,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -425,12 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -447,13 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -471,12 +341,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -493,13 +357,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -517,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -539,13 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -563,12 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -585,13 +423,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -610,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -632,13 +457,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -656,12 +474,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -678,13 +490,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -702,12 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -724,13 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -748,12 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -770,13 +556,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -794,12 +573,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -816,13 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -841,12 +607,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -863,13 +623,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -887,12 +640,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -909,13 +656,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -933,12 +673,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -955,13 +689,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -979,12 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1001,13 +722,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1026,12 +740,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1048,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1072,12 +773,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1094,13 +789,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1118,12 +806,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1140,13 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1164,12 +839,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1186,13 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1210,12 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1232,13 +888,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1256,12 +905,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1278,13 +921,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1302,12 +938,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,13 +954,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1348,12 +971,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1370,13 +987,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1394,12 +1004,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1416,13 +1020,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,12 +1037,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1462,13 +1053,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1486,12 +1070,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1508,13 +1086,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1532,12 +1103,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1554,13 +1119,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1578,12 +1136,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1152,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1624,12 +1169,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1646,13 +1185,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1670,12 +1202,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1692,13 +1218,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1716,12 +1235,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1738,13 +1251,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1762,12 +1268,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1784,13 +1284,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1808,12 +1301,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1830,13 +1317,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1854,12 +1334,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1888,13 +1362,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1924,12 +1391,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1958,13 +1419,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1994,12 +1448,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2028,13 +1476,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,12 +1505,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2098,13 +1533,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x.ll index 4629db26ca034..4afe4c360be31 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmv.s.x.nxv1i8(, i8, iXLen); - define @intrinsic_vmv.s.x_x_nxv1i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17,8 +15,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv2i8(, i8, iXLen); - define @intrinsic_vmv.s.x_x_nxv2i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -30,8 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv4i8(, i8, iXLen); - define @intrinsic_vmv.s.x_x_nxv4i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -43,8 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv8i8(, i8, iXLen); - define @intrinsic_vmv.s.x_x_nxv8i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -56,8 +48,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv16i8(, i8, iXLen); - define @intrinsic_vmv.s.x_x_nxv16i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -69,8 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv32i8(, i8, iXLen); - define @intrinsic_vmv.s.x_x_nxv32i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -82,8 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv64i8(, i8, iXLen); - define @intrinsic_vmv.s.x_x_nxv64i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -95,8 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv1i16(, i16, iXLen); - define @intrinsic_vmv.s.x_x_nxv1i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -108,8 +92,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv2i16(, i16, iXLen); - define @intrinsic_vmv.s.x_x_nxv2i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -121,8 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv4i16(, i16, iXLen); - define @intrinsic_vmv.s.x_x_nxv4i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -134,8 +114,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv8i16(, i16, iXLen); - define @intrinsic_vmv.s.x_x_nxv8i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -147,8 +125,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv16i16(, i16, iXLen); - define @intrinsic_vmv.s.x_x_nxv16i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -160,8 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv32i16(, i16, iXLen); - define @intrinsic_vmv.s.x_x_nxv32i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -173,8 +147,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv1i32(, i32, iXLen); - define @intrinsic_vmv.s.x_x_nxv1i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -186,8 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv2i32(, i32, iXLen); - define @intrinsic_vmv.s.x_x_nxv2i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -199,8 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv4i32(, i32, iXLen); - define @intrinsic_vmv.s.x_x_nxv4i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -212,8 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv8i32(, i32, iXLen); - define @intrinsic_vmv.s.x_x_nxv8i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -225,8 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv16i32(, i32, iXLen); - define @intrinsic_vmv.s.x_x_nxv16i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -238,8 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv1i64(, i64, iXLen); - define @intrinsic_vmv.s.x_x_nxv1i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmv.s.x_x_nxv1i64: ; RV32: # %bb.0: # %entry @@ -264,8 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv2i64(, i64, iXLen); - define @intrinsic_vmv.s.x_x_nxv2i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmv.s.x_x_nxv2i64: ; RV32: # %bb.0: # %entry @@ -290,8 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv4i64(, i64, iXLen); - define @intrinsic_vmv.s.x_x_nxv4i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmv.s.x_x_nxv4i64: ; RV32: # %bb.0: # %entry @@ -316,8 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv8i64(, i64, iXLen); - define @intrinsic_vmv.s.x_x_nxv8i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmv.s.x_x_nxv8i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v.ll index 784b807a6a2e5..bfb44e0944d59 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmv.v.v.nxv1i8( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv2i8( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv4i8( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv8i8( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv16i8( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv32i8( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv64i8( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -144,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1i16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -164,11 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv2i16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -184,11 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv4i16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -204,11 +154,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv8i16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -224,11 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv16i16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -244,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv32i16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -264,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1i32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -284,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv2i32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -304,11 +229,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv4i32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -324,11 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv8i32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -344,11 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv16i32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -364,11 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1i64( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -384,11 +289,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv2i64( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -404,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv4i64( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -424,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv8i64( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -444,11 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1f16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -464,11 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv2f16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -484,11 +364,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv4f16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -504,11 +379,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv8f16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -524,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv16f16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -544,11 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv32f16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -564,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1bf16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -584,11 +439,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv2bf16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -604,11 +454,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv4bf16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -624,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv8bf16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -644,11 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv16bf16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -664,11 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv32bf16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -684,11 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1f32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -704,11 +529,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv2f32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -724,11 +544,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv4f32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -744,11 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv8f32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -764,11 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv16f32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -784,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1f64( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -804,11 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv2f64( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -824,11 +619,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv4f64( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -844,11 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv8f64( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x.ll index 472cd6ec07e23..79f4bb72c01aa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmv.v.x.nxv1i8( - , - i8, - iXLen); - define @intrinsic_vmv.v.x_x_nxv1i8(i8 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv2i8( - , - i8, - iXLen); - define @intrinsic_vmv.v.x_x_nxv2i8(i8 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv4i8( - , - i8, - iXLen); - define @intrinsic_vmv.v.x_x_nxv4i8(i8 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv8i8( - , - i8, - iXLen); - define @intrinsic_vmv.v.x_x_nxv8i8(i8 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv16i8( - , - i8, - iXLen); - define @intrinsic_vmv.v.x_x_nxv16i8(i8 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv32i8( - , - i8, - iXLen); - define @intrinsic_vmv.v.x_x_nxv32i8(i8 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv64i8( - , - i8, - iXLen); - define @intrinsic_vmv.v.x_x_nxv64i8(i8 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -144,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv1i16( - , - i16, - iXLen); - define @intrinsic_vmv.v.x_x_nxv1i16(i16 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -164,11 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv2i16( - , - i16, - iXLen); - define @intrinsic_vmv.v.x_x_nxv2i16(i16 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -184,11 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv4i16( - , - i16, - iXLen); - define @intrinsic_vmv.v.x_x_nxv4i16(i16 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -204,11 +154,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv8i16( - , - i16, - iXLen); - define @intrinsic_vmv.v.x_x_nxv8i16(i16 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -224,11 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv16i16( - , - i16, - iXLen); - define @intrinsic_vmv.v.x_x_nxv16i16(i16 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -244,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv32i16( - , - i16, - iXLen); - define @intrinsic_vmv.v.x_x_nxv32i16(i16 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -264,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv1i32( - , - i32, - iXLen); - define @intrinsic_vmv.v.x_x_nxv1i32(i32 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -284,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv2i32( - , - i32, - iXLen); - define @intrinsic_vmv.v.x_x_nxv2i32(i32 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -304,11 +229,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv4i32( - , - i32, - iXLen); - define @intrinsic_vmv.v.x_x_nxv4i32(i32 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -324,11 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv8i32( - , - i32, - iXLen); - define @intrinsic_vmv.v.x_x_nxv8i32(i32 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -344,11 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv16i32( - , - i32, - iXLen); - define @intrinsic_vmv.v.x_x_nxv16i32(i32 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -364,11 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv1i64( - , - i64, - iXLen); - define @intrinsic_vmv.v.x_x_nxv1i64(i64 %0, iXLen %1) nounwind { ; RV32-LABEL: intrinsic_vmv.v.x_x_nxv1i64: ; RV32: # %bb.0: # %entry @@ -395,11 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv2i64( - , - i64, - iXLen); - define @intrinsic_vmv.v.x_x_nxv2i64(i64 %0, iXLen %1) nounwind { ; RV32-LABEL: intrinsic_vmv.v.x_x_nxv2i64: ; RV32: # %bb.0: # %entry @@ -426,11 +326,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv4i64( - , - i64, - iXLen); - define @intrinsic_vmv.v.x_x_nxv4i64(i64 %0, iXLen %1) nounwind { ; RV32-LABEL: intrinsic_vmv.v.x_x_nxv4i64: ; RV32: # %bb.0: # %entry @@ -457,11 +352,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv8i64( - , - i64, - iXLen); - define @intrinsic_vmv.v.x_x_nxv8i64(i64 %0, iXLen %1) nounwind { ; RV32-LABEL: intrinsic_vmv.v.x_x_nxv8i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s.ll index 0ec9439e04a08..df3cbe101658d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare i8 @llvm.riscv.vmv.x.s.nxv1i8() - define signext i8 @intrinsic_vmv.x.s_s_nxv1i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17,8 +15,6 @@ entry: ret i8 %a } -declare i8 @llvm.riscv.vmv.x.s.nxv2i8() - define signext i8 @intrinsic_vmv.x.s_s_nxv2i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -30,8 +26,6 @@ entry: ret i8 %a } -declare i8 @llvm.riscv.vmv.x.s.nxv4i8() - define signext i8 @intrinsic_vmv.x.s_s_nxv4i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -43,8 +37,6 @@ entry: ret i8 %a } -declare i8 @llvm.riscv.vmv.x.s.nxv8i8() - define signext i8 @intrinsic_vmv.x.s_s_nxv8i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -56,8 +48,6 @@ entry: ret i8 %a } -declare i8 @llvm.riscv.vmv.x.s.nxv16i8() - define signext i8 @intrinsic_vmv.x.s_s_nxv16i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -69,8 +59,6 @@ entry: ret i8 %a } -declare i8 @llvm.riscv.vmv.x.s.nxv32i8() - define signext i8 @intrinsic_vmv.x.s_s_nxv32i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -82,8 +70,6 @@ entry: ret i8 %a } -declare i8 @llvm.riscv.vmv.x.s.nxv64i8() - define signext i8 @intrinsic_vmv.x.s_s_nxv64i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -95,8 +81,6 @@ entry: ret i8 %a } -declare i16 @llvm.riscv.vmv.x.s.nxv1i16() - define signext i16 @intrinsic_vmv.x.s_s_nxv1i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -108,8 +92,6 @@ entry: ret i16 %a } -declare i16 @llvm.riscv.vmv.x.s.nxv2i16() - define signext i16 @intrinsic_vmv.x.s_s_nxv2i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -121,8 +103,6 @@ entry: ret i16 %a } -declare i16 @llvm.riscv.vmv.x.s.nxv4i16() - define signext i16 @intrinsic_vmv.x.s_s_nxv4i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -134,8 +114,6 @@ entry: ret i16 %a } -declare i16 @llvm.riscv.vmv.x.s.nxv8i16() - define signext i16 @intrinsic_vmv.x.s_s_nxv8i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -147,8 +125,6 @@ entry: ret i16 %a } -declare i16 @llvm.riscv.vmv.x.s.nxv16i16() - define signext i16 @intrinsic_vmv.x.s_s_nxv16i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -160,8 +136,6 @@ entry: ret i16 %a } -declare i16 @llvm.riscv.vmv.x.s.nxv32i16( ) - define signext i16 @intrinsic_vmv.x.s_s_nxv32i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -173,8 +147,6 @@ entry: ret i16 %a } -declare i32 @llvm.riscv.vmv.x.s.nxv1i32( ) - define signext i32 @intrinsic_vmv.x.s_s_nxv1i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -186,8 +158,6 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vmv.x.s.nxv2i32( ) - define signext i32 @intrinsic_vmv.x.s_s_nxv2i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -199,8 +169,6 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vmv.x.s.nxv4i32( ) - define signext i32 @intrinsic_vmv.x.s_s_nxv4i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -212,8 +180,6 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vmv.x.s.nxv8i32( ) - define signext i32 @intrinsic_vmv.x.s_s_nxv8i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -225,8 +191,6 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vmv.x.s.nxv16i32( ) - define signext i32 @intrinsic_vmv.x.s_s_nxv16i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -238,8 +202,6 @@ entry: ret i32 %a } -declare i64 @llvm.riscv.vmv.x.s.nxv1i64( ) - define i64 @intrinsic_vmv.x.s_s_nxv1i64( %0) nounwind { ; RV32-LABEL: intrinsic_vmv.x.s_s_nxv1i64: ; RV32: # %bb.0: # %entry @@ -260,8 +222,6 @@ entry: ret i64 %a } -declare i64 @llvm.riscv.vmv.x.s.nxv2i64( ) - define i64 @intrinsic_vmv.x.s_s_nxv2i64( %0) nounwind { ; RV32-LABEL: intrinsic_vmv.x.s_s_nxv2i64: ; RV32: # %bb.0: # %entry @@ -282,8 +242,6 @@ entry: ret i64 %a } -declare i64 @llvm.riscv.vmv.x.s.nxv4i64( ) - define i64 @intrinsic_vmv.x.s_s_nxv4i64( %0) nounwind { ; RV32-LABEL: intrinsic_vmv.x.s_s_nxv4i64: ; RV32: # %bb.0: # %entry @@ -304,8 +262,6 @@ entry: ret i64 %a } -declare i64 @llvm.riscv.vmv.x.s.nxv8i64() - define i64 @intrinsic_vmv.x.s_s_nxv8i64( %0) nounwind { ; RV32-LABEL: intrinsic_vmv.x.s_s_nxv8i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmxnor.ll b/llvm/test/CodeGen/RISCV/rvv/vmxnor.ll index fc1bb4feedc4a..9107a08cb1a05 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmxnor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmxnor.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmxnor.nxv1i1( - , - , - iXLen); - define @intrinsic_vmxnor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmxnor.nxv2i1( - , - , - iXLen); - define @intrinsic_vmxnor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmxnor.nxv4i1( - , - , - iXLen); - define @intrinsic_vmxnor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmxnor.nxv8i1( - , - , - iXLen); - define @intrinsic_vmxnor_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmxnor.nxv16i1( - , - , - iXLen); - define @intrinsic_vmxnor_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmxnor.nxv32i1( - , - , - iXLen); - define @intrinsic_vmxnor_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmxnor.nxv64i1( - , - , - iXLen); - define @intrinsic_vmxnor_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmxor.ll b/llvm/test/CodeGen/RISCV/rvv/vmxor.ll index dc75fc3e7cd38..3c6a4aabcaf95 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmxor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmxor.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmxor.nxv1i1( - , - , - iXLen); - define @intrinsic_vmxor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmxor.nxv2i1( - , - , - iXLen); - define @intrinsic_vmxor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmxor.nxv4i1( - , - , - iXLen); - define @intrinsic_vmxor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmxor.nxv8i1( - , - , - iXLen); - define @intrinsic_vmxor_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmxor.nxv16i1( - , - , - iXLen); - define @intrinsic_vmxor_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmxor.nxv32i1( - , - , - iXLen); - define @intrinsic_vmxor_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmxor.nxv64i1( - , - , - iXLen); - define @intrinsic_vmxor_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip.ll index 1c389f522e844..05bd6b9123b5e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnclip.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,13 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,13 +127,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +145,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,13 +163,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +181,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,13 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +217,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,13 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,13 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +287,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,13 +305,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +323,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,13 +341,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +359,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,13 +377,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,13 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +430,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,13 +520,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,11 +538,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv1i8.nxv1i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv1i8_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -755,13 +555,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -780,11 +573,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv2i8.nxv2i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv2i8_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -802,13 +590,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -827,11 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv4i8.nxv4i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv4i8_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -849,13 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -874,11 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv8i8.nxv8i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv8i8_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -897,13 +661,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -922,11 +679,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv16i8.nxv16i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv16i8_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -945,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -970,11 +715,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv32i8.nxv32i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv32i8_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -993,13 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1018,11 +751,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv1i16.nxv1i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv1i16_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1040,13 +768,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1065,11 +786,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv2i16.nxv2i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv2i16_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1087,13 +803,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1112,11 +821,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv4i16.nxv4i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv4i16_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1135,13 +839,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1160,11 +857,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv8i16.nxv8i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv8i16_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1183,13 +875,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1208,11 +893,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv16i16.nxv16i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv16i16_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1231,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1256,11 +929,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv1i32.nxv1i64( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv1i32_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1278,13 +946,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1303,11 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv2i32.nxv2i64( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv2i32_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1326,13 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1351,11 +1000,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv4i32.nxv4i64( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv4i32_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1374,13 +1018,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1399,11 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv8i32.nxv8i64( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv8i32_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1422,13 +1054,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll index 21dc859c3bf23..0e7682cc9411a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,13 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,13 +127,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +145,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,13 +163,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +181,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,13 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +217,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,13 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,13 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +287,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,13 +305,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +323,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,13 +341,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +359,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,13 +377,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,13 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +430,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,13 +520,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,11 +538,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv1i8_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -755,13 +555,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -780,11 +573,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv2i8_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -802,13 +590,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -827,11 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv4i8_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -849,13 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -874,11 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv8i8_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -897,13 +661,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -922,11 +679,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv16i8_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -945,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -970,11 +715,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv32i8_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -993,13 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1018,11 +751,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv1i16_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1040,13 +768,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1065,11 +786,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv2i16_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1087,13 +803,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1112,11 +821,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv4i16_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1135,13 +839,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1160,11 +857,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv8i16_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1183,13 +875,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1208,11 +893,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv16i16_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1231,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1256,11 +929,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv1i32_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1278,13 +946,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1303,11 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv2i32_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1326,13 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1351,11 +1000,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv4i32_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1374,13 +1018,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1399,11 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv8i32_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1422,13 +1054,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll index 3484d288088a0..1c4294990f90a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.mul.nxv1i8(, , , i32) -declare @llvm.vp.sub.nxv1i8(, , , i32) -declare @llvm.vp.merge.nxv1i8(, , , i32) -declare @llvm.vp.select.nxv1i8(, , , i32) - define @vnmsac_vv_nxv1i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv1i8: ; CHECK: # %bb.0: @@ -93,11 +88,6 @@ define @vnmsac_vx_nxv1i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv2i8(, , , i32) -declare @llvm.vp.sub.nxv2i8(, , , i32) -declare @llvm.vp.merge.nxv2i8(, , , i32) -declare @llvm.vp.select.nxv2i8(, , , i32) - define @vnmsac_vv_nxv2i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv2i8: ; CHECK: # %bb.0: @@ -182,11 +172,6 @@ define @vnmsac_vx_nxv2i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv4i8(, , , i32) -declare @llvm.vp.sub.nxv4i8(, , , i32) -declare @llvm.vp.merge.nxv4i8(, , , i32) -declare @llvm.vp.select.nxv4i8(, , , i32) - define @vnmsac_vv_nxv4i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv4i8: ; CHECK: # %bb.0: @@ -271,11 +256,6 @@ define @vnmsac_vx_nxv4i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv8i8(, , , i32) -declare @llvm.vp.sub.nxv8i8(, , , i32) -declare @llvm.vp.merge.nxv8i8(, , , i32) -declare @llvm.vp.select.nxv8i8(, , , i32) - define @vnmsac_vv_nxv8i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv8i8: ; CHECK: # %bb.0: @@ -360,11 +340,6 @@ define @vnmsac_vx_nxv8i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv16i8(, , , i32) -declare @llvm.vp.sub.nxv16i8(, , , i32) -declare @llvm.vp.merge.nxv16i8(, , , i32) -declare @llvm.vp.select.nxv16i8(, , , i32) - define @vnmsac_vv_nxv16i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv16i8: ; CHECK: # %bb.0: @@ -449,11 +424,6 @@ define @vnmsac_vx_nxv16i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv32i8(, , , i32) -declare @llvm.vp.sub.nxv32i8(, , , i32) -declare @llvm.vp.merge.nxv32i8(, , , i32) -declare @llvm.vp.select.nxv32i8(, , , i32) - define @vnmsac_vv_nxv32i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv32i8: ; CHECK: # %bb.0: @@ -538,11 +508,6 @@ define @vnmsac_vx_nxv32i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv64i8(, , , i32) -declare @llvm.vp.sub.nxv64i8(, , , i32) -declare @llvm.vp.merge.nxv64i8(, , , i32) -declare @llvm.vp.select.nxv64i8(, , , i32) - define @vnmsac_vv_nxv64i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv64i8: ; CHECK: # %bb.0: @@ -630,11 +595,6 @@ define @vnmsac_vx_nxv64i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv1i16(, , , i32) -declare @llvm.vp.sub.nxv1i16(, , , i32) -declare @llvm.vp.merge.nxv1i16(, , , i32) -declare @llvm.vp.select.nxv1i16(, , , i32) - define @vnmsac_vv_nxv1i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv1i16: ; CHECK: # %bb.0: @@ -719,11 +679,6 @@ define @vnmsac_vx_nxv1i16_ta( %a, i16 %b, < ret %u } -declare @llvm.vp.mul.nxv2i16(, , , i32) -declare @llvm.vp.sub.nxv2i16(, , , i32) -declare @llvm.vp.merge.nxv2i16(, , , i32) -declare @llvm.vp.select.nxv2i16(, , , i32) - define @vnmsac_vv_nxv2i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv2i16: ; CHECK: # %bb.0: @@ -808,11 +763,6 @@ define @vnmsac_vx_nxv2i16_ta( %a, i16 %b, < ret %u } -declare @llvm.vp.mul.nxv4i16(, , , i32) -declare @llvm.vp.sub.nxv4i16(, , , i32) -declare @llvm.vp.merge.nxv4i16(, , , i32) -declare @llvm.vp.select.nxv4i16(, , , i32) - define @vnmsac_vv_nxv4i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv4i16: ; CHECK: # %bb.0: @@ -897,11 +847,6 @@ define @vnmsac_vx_nxv4i16_ta( %a, i16 %b, < ret %u } -declare @llvm.vp.mul.nxv8i16(, , , i32) -declare @llvm.vp.sub.nxv8i16(, , , i32) -declare @llvm.vp.merge.nxv8i16(, , , i32) -declare @llvm.vp.select.nxv8i16(, , , i32) - define @vnmsac_vv_nxv8i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv8i16: ; CHECK: # %bb.0: @@ -986,11 +931,6 @@ define @vnmsac_vx_nxv8i16_ta( %a, i16 %b, < ret %u } -declare @llvm.vp.mul.nxv16i16(, , , i32) -declare @llvm.vp.sub.nxv16i16(, , , i32) -declare @llvm.vp.merge.nxv16i16(, , , i32) -declare @llvm.vp.select.nxv16i16(, , , i32) - define @vnmsac_vv_nxv16i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv16i16: ; CHECK: # %bb.0: @@ -1075,11 +1015,6 @@ define @vnmsac_vx_nxv16i16_ta( %a, i16 %b ret %u } -declare @llvm.vp.mul.nxv32i16(, , , i32) -declare @llvm.vp.sub.nxv32i16(, , , i32) -declare @llvm.vp.merge.nxv32i16(, , , i32) -declare @llvm.vp.select.nxv32i16(, , , i32) - define @vnmsac_vv_nxv32i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1167,11 +1102,6 @@ define @vnmsac_vx_nxv32i16_ta( %a, i16 %b ret %u } -declare @llvm.vp.mul.nxv1i32(, , , i32) -declare @llvm.vp.sub.nxv1i32(, , , i32) -declare @llvm.vp.merge.nxv1i32(, , , i32) -declare @llvm.vp.select.nxv1i32(, , , i32) - define @vnmsac_vv_nxv1i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1256,11 +1186,6 @@ define @vnmsac_vx_nxv1i32_ta( %a, i32 %b, < ret %u } -declare @llvm.vp.mul.nxv2i32(, , , i32) -declare @llvm.vp.sub.nxv2i32(, , , i32) -declare @llvm.vp.merge.nxv2i32(, , , i32) -declare @llvm.vp.select.nxv2i32(, , , i32) - define @vnmsac_vv_nxv2i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1345,11 +1270,6 @@ define @vnmsac_vx_nxv2i32_ta( %a, i32 %b, < ret %u } -declare @llvm.vp.mul.nxv4i32(, , , i32) -declare @llvm.vp.sub.nxv4i32(, , , i32) -declare @llvm.vp.merge.nxv4i32(, , , i32) -declare @llvm.vp.select.nxv4i32(, , , i32) - define @vnmsac_vv_nxv4i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1434,11 +1354,6 @@ define @vnmsac_vx_nxv4i32_ta( %a, i32 %b, < ret %u } -declare @llvm.vp.mul.nxv8i32(, , , i32) -declare @llvm.vp.sub.nxv8i32(, , , i32) -declare @llvm.vp.merge.nxv8i32(, , , i32) -declare @llvm.vp.select.nxv8i32(, , , i32) - define @vnmsac_vv_nxv8i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1523,11 +1438,6 @@ define @vnmsac_vx_nxv8i32_ta( %a, i32 %b, < ret %u } -declare @llvm.vp.mul.nxv16i32(, , , i32) -declare @llvm.vp.sub.nxv16i32(, , , i32) -declare @llvm.vp.merge.nxv16i32(, , , i32) -declare @llvm.vp.select.nxv16i32(, , , i32) - define @vnmsac_vv_nxv16i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1615,11 +1525,6 @@ define @vnmsac_vx_nxv16i32_ta( %a, i32 %b ret %u } -declare @llvm.vp.mul.nxv1i64(, , , i32) -declare @llvm.vp.sub.nxv1i64(, , , i32) -declare @llvm.vp.merge.nxv1i64(, , , i32) -declare @llvm.vp.select.nxv1i64(, , , i32) - define @vnmsac_vv_nxv1i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1751,11 +1656,6 @@ define @vnmsac_vx_nxv1i64_ta( %a, i64 %b, < ret %u } -declare @llvm.vp.mul.nxv2i64(, , , i32) -declare @llvm.vp.sub.nxv2i64(, , , i32) -declare @llvm.vp.merge.nxv2i64(, , , i32) -declare @llvm.vp.select.nxv2i64(, , , i32) - define @vnmsac_vv_nxv2i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1887,11 +1787,6 @@ define @vnmsac_vx_nxv2i64_ta( %a, i64 %b, < ret %u } -declare @llvm.vp.mul.nxv4i64(, , , i32) -declare @llvm.vp.sub.nxv4i64(, , , i32) -declare @llvm.vp.merge.nxv4i64(, , , i32) -declare @llvm.vp.select.nxv4i64(, , , i32) - define @vnmsac_vv_nxv4i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv4i64: ; CHECK: # %bb.0: @@ -2023,11 +1918,6 @@ define @vnmsac_vx_nxv4i64_ta( %a, i64 %b, < ret %u } -declare @llvm.vp.mul.nxv8i64(, , , i32) -declare @llvm.vp.sub.nxv8i64(, , , i32) -declare @llvm.vp.merge.nxv8i64(, , , i32) -declare @llvm.vp.select.nxv8i64(, , , i32) - define @vnmsac_vv_nxv8i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac.ll index 505443d93720b..767caf94f16f8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnmsac.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vnmsac.nxv1i8.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv2i8.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv4i8.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv8i8.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv16i8.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -215,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv32i8.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv1i16.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -333,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv2i16.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -380,13 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv4i16.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -427,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv8i16.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -474,13 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv16i16.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -521,13 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv1i32.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -544,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -568,13 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv2i32.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -591,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -615,13 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -638,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -662,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -685,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -709,13 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv1i64.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -732,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -756,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv2i64.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -779,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -803,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv4i64.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -826,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -850,13 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv1i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -873,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv1i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -897,13 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv2i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -920,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv2i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -944,13 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv4i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -967,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv4i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv8i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1014,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv8i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1038,13 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv16i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1061,13 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv16i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1085,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv32i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1108,13 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv32i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1132,13 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv1i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1155,13 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv1i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1179,13 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv2i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1202,13 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv2i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1226,13 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv4i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1249,13 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv4i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1273,13 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv8i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1296,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv8i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1320,13 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv16i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1343,13 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv16i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1367,13 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv1i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1390,13 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv1i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1414,13 +994,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv2i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1437,13 +1010,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv2i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1461,13 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv4i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1484,13 +1043,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv4i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1508,13 +1060,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv8i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1531,13 +1076,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv8i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1555,13 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv1i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -1591,13 +1122,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv1i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -1628,13 +1152,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv2i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -1664,13 +1181,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv2i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -1701,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv4i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64: ; RV32: # %bb.0: # %entry @@ -1737,13 +1240,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv4i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub.ll index d9c7560830fec..8e858f1143d43 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnmsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vnmsub.nxv1i8.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv2i8.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv4i8.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv8i8.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv16i8.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -215,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv32i8.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv1i16.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -333,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv2i16.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -380,13 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv4i16.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -427,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv8i16.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -474,13 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv16i16.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -521,13 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv1i32.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -544,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -568,13 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv2i32.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -591,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -615,13 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -638,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -662,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -685,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -709,13 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv1i64.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -732,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -756,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv2i64.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -779,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -803,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv4i64.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -826,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -850,13 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv1i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -873,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv1i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -897,13 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv2i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -920,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv2i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -944,13 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv4i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -967,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv4i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv8i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1014,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv8i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1038,13 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv16i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1061,13 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv16i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1085,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv32i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1108,13 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv32i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1132,13 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv1i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1155,13 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv1i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1179,13 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv2i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1202,13 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv2i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1226,13 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv4i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1249,13 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv4i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1273,13 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv8i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1296,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv8i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1320,13 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv16i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1343,13 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv16i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1367,13 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv1i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1390,13 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv1i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1414,13 +994,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv2i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1437,13 +1010,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv2i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1461,13 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv4i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1484,13 +1043,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv4i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1508,13 +1060,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv8i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1531,13 +1076,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv8i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1555,13 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv1i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -1591,13 +1122,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv1i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -1628,13 +1152,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv2i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -1664,13 +1181,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv2i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -1701,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv4i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64: ; RV32: # %bb.0: # %entry @@ -1737,13 +1240,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv4i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-vp.ll index cb7a020d0b964..9b35e3e62aa87 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-vp.ll @@ -2,10 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.sext.nxv1i32.nxv1i16(, , i32) -declare @llvm.vp.trunc.nxv1i16.nxv1i32(, , i32) -declare @llvm.vp.ashr.nxv1i32(, , , i32) - define @vsra_vv_nxv1i16( %a, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i16: ; CHECK: # %bb.0: @@ -18,7 +14,6 @@ define @vsra_vv_nxv1i16( %a, %vr } - define @vsra_vv_nxv1i16_unmasked( %a, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: @@ -31,10 +26,6 @@ define @vsra_vv_nxv1i16_unmasked( %a, %vr } -declare @llvm.vp.sext.nxv1i64.nxv1i32(, , i32) -declare @llvm.vp.trunc.nxv1i32.nxv1i64(, , i32) -declare @llvm.vp.ashr.nxv1i64(, , , i32) - define @vsra_vv_nxv1i64( %a, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra.ll index 78c31d3403471..ac94aefc91c2d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsra.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,14 +120,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -193,12 +137,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -216,14 +154,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -241,12 +171,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -264,14 +188,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -289,12 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -311,14 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -336,12 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -358,14 +254,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -383,12 +271,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -406,14 +288,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -431,12 +305,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,14 +322,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -479,12 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -502,14 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -527,12 +373,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -549,14 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -574,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -597,14 +423,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -622,12 +440,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -645,14 +457,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -670,12 +474,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -693,14 +491,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -718,12 +508,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv1i8.nxv1i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv1i8_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -740,14 +524,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -765,12 +541,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv2i8.nxv2i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv2i8_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -787,14 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -812,12 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv4i8.nxv4i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv4i8_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -834,14 +590,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -859,12 +607,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv8i8.nxv8i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv8i8_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -882,14 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -907,12 +641,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv16i8.nxv16i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv16i8_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -930,14 +658,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -955,12 +675,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv32i8.nxv32i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv32i8_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -978,14 +692,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1003,12 +709,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv1i16.nxv1i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv1i16_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1025,14 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1050,12 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv2i16.nxv2i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv2i16_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1072,14 +758,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1097,12 +775,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv4i16.nxv4i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv4i16_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1120,14 +792,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1145,12 +809,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv8i16.nxv8i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv8i16_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1168,14 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1193,12 +843,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv16i16.nxv16i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv16i16_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1216,14 +860,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1241,12 +877,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv1i32.nxv1i64( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv1i32_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1263,14 +893,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1288,12 +910,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv2i32.nxv2i64( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv2i32_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1311,14 +927,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1336,12 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv4i32.nxv4i64( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv4i32_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1359,14 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1384,12 +978,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv8i32.nxv8i64( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv8i32_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1407,14 +995,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-vp.ll index e6e86011745b4..bffd30df68353 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-vp.ll @@ -2,10 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.sext.nxv1i32.nxv1i16(, , i32) -declare @llvm.vp.trunc.nxv1i16.nxv1i32(, , i32) -declare @llvm.vp.lshr.nxv1i32(, , , i32) - define @vsra_vv_nxv1i16( %a, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i16: ; CHECK: # %bb.0: @@ -18,7 +14,6 @@ define @vsra_vv_nxv1i16( %a, %vr } - define @vsra_vv_nxv1i16_unmasked( %a, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: @@ -31,10 +26,6 @@ define @vsra_vv_nxv1i16_unmasked( %a, %vr } -declare @llvm.vp.sext.nxv1i64.nxv1i32(, , i32) -declare @llvm.vp.trunc.nxv1i32.nxv1i64(, , i32) -declare @llvm.vp.lshr.nxv1i64(, , , i32) - define @vsra_vv_nxv1i64( %a, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl.ll index d5586d333a554..4e4dfc4de7eb7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,14 +120,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -193,12 +137,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -216,14 +154,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -241,12 +171,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -264,14 +188,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -289,12 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -311,14 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -336,12 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -358,14 +254,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -383,12 +271,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -406,14 +288,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -431,12 +305,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,14 +322,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -479,12 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -502,14 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -527,12 +373,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -549,14 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -574,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -597,14 +423,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -622,12 +440,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -645,14 +457,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -670,12 +474,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -693,14 +491,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -718,12 +508,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv1i8_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -740,14 +524,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -765,12 +541,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv2i8_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -787,14 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -812,12 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv4i8_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -834,14 +590,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -859,12 +607,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv8i8_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -882,14 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -907,12 +641,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv16i8_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -930,14 +658,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -955,12 +675,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv32i8_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -978,14 +692,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1003,12 +709,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv1i16_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1025,14 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1050,12 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv2i16_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1072,14 +758,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1097,12 +775,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv4i16_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1120,14 +792,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1145,12 +809,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv8i16_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1168,14 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1193,12 +843,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv16i16_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1216,14 +860,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1241,12 +877,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv1i32_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1263,14 +893,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1288,12 +910,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv2i32_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1311,14 +927,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1336,12 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv4i32_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1359,14 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1384,12 +978,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv8i32_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1407,14 +995,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll index e864d71fdad11..f13a85d29b099 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.or.nxv8i7(, , , i32) - define @vor_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv8i7: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define @vor_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.or.nxv1i8(, , , i32) - define @vor_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv1i8: ; CHECK: # %bb.0: @@ -84,8 +80,6 @@ define @vor_vi_nxv1i8_unmasked( %va, i32 zero ret %v } -declare @llvm.vp.or.nxv2i8(, , , i32) - define @vor_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv2i8: ; CHECK: # %bb.0: @@ -150,8 +144,6 @@ define @vor_vi_nxv2i8_unmasked( %va, i32 zero ret %v } -declare @llvm.vp.or.nxv4i8(, , , i32) - define @vor_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv4i8: ; CHECK: # %bb.0: @@ -216,8 +208,6 @@ define @vor_vi_nxv4i8_unmasked( %va, i32 zero ret %v } -declare @llvm.vp.or.nxv8i8(, , , i32) - define @vor_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv8i8: ; CHECK: # %bb.0: @@ -282,8 +272,6 @@ define @vor_vi_nxv8i8_unmasked( %va, i32 zero ret %v } -declare @llvm.vp.or.nxv16i8(, , , i32) - define @vor_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv16i8: ; CHECK: # %bb.0: @@ -348,8 +336,6 @@ define @vor_vi_nxv16i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv32i8(, , , i32) - define @vor_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv32i8: ; CHECK: # %bb.0: @@ -414,8 +400,6 @@ define @vor_vi_nxv32i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv64i8(, , , i32) - define @vor_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv64i8: ; CHECK: # %bb.0: @@ -480,8 +464,6 @@ define @vor_vi_nxv64i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv1i16(, , , i32) - define @vor_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv1i16: ; CHECK: # %bb.0: @@ -546,8 +528,6 @@ define @vor_vi_nxv1i16_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv2i16(, , , i32) - define @vor_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv2i16: ; CHECK: # %bb.0: @@ -612,8 +592,6 @@ define @vor_vi_nxv2i16_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv4i16(, , , i32) - define @vor_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv4i16: ; CHECK: # %bb.0: @@ -678,8 +656,6 @@ define @vor_vi_nxv4i16_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv8i16(, , , i32) - define @vor_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv8i16: ; CHECK: # %bb.0: @@ -744,8 +720,6 @@ define @vor_vi_nxv8i16_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv16i16(, , , i32) - define @vor_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv16i16: ; CHECK: # %bb.0: @@ -810,8 +784,6 @@ define @vor_vi_nxv16i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.or.nxv32i16(, , , i32) - define @vor_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv32i16: ; CHECK: # %bb.0: @@ -876,8 +848,6 @@ define @vor_vi_nxv32i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.or.nxv1i32(, , , i32) - define @vor_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv1i32: ; CHECK: # %bb.0: @@ -942,8 +912,6 @@ define @vor_vi_nxv1i32_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv2i32(, , , i32) - define @vor_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1032,8 +1000,6 @@ define @vor_vi_nxv2i32_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv4i32(, , , i32) - define @vor_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1098,8 +1064,6 @@ define @vor_vi_nxv4i32_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv8i32(, , , i32) - define @vor_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1164,8 +1128,6 @@ define @vor_vi_nxv8i32_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv10i32(, , , i32) - define @vor_vv_nxv10i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv10i32: ; CHECK: # %bb.0: @@ -1230,8 +1192,6 @@ define @vor_vi_nxv10i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.or.nxv16i32(, , , i32) - define @vor_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1296,8 +1256,6 @@ define @vor_vi_nxv16i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.or.nxv1i64(, , , i32) - define @vor_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1390,8 +1348,6 @@ define @vor_vi_nxv1i64_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv2i64(, , , i32) - define @vor_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1484,8 +1440,6 @@ define @vor_vi_nxv2i64_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv4i64(, , , i32) - define @vor_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1578,8 +1532,6 @@ define @vor_vi_nxv4i64_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv8i64(, , , i32) - define @vor_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vor.ll b/llvm/test/CodeGen/RISCV/rvv/vor.ll index 3b5c6ff2abe7e..f5d0f2383cce3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vor.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vor_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vor_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vor_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vor_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vor_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vor_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vor_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vor_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vor_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vor_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vor_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vor_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vor_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vor_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vor_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vor_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vor_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vor_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vor_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vor_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vor_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vor_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vor_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vor_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vor_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vor_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-combine-reverse-load.ll b/llvm/test/CodeGen/RISCV/rvv/vp-combine-reverse-load.ll index 24d8e56fa17fe..75c60ad9382b5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-combine-reverse-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-combine-reverse-load.ll @@ -75,6 +75,3 @@ define @test_different_evl(* %ptr, %rev } -declare @llvm.vp.load.nxv2f32.p0nxv2f32(* nocapture, , i32) -declare @llvm.experimental.vp.reverse.nxv2f32(, , i32) -declare @llvm.experimental.vp.reverse.nxv2i1(, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-combine-store-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/vp-combine-store-reverse.ll index a2466c48b0ab7..5fa29dac69601 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-combine-store-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-combine-store-reverse.ll @@ -77,6 +77,3 @@ define void @test_different_evl( %val, * ret void } -declare @llvm.experimental.vp.reverse.nxv2f32(, , i32) -declare @llvm.experimental.vp.reverse.nxv2i1(, , i32) -declare void @llvm.vp.store.nxv2f32.p0nxv2f32(, * nocapture, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll b/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll index b316f5f878816..8e8622b3d71d4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll @@ -236,7 +236,3 @@ define iXLen @fixed_v2i64_zero_poison(<2 x i64> %src, <2 x i1> %m, i32 %evl) { ret iXLen %r } -declare iXLen @llvm.vp.cttz.elts.iXLen.nxv2i1(, i1, , i32) -declare iXLen @llvm.vp.cttz.elts.iXLen.nxv2i32(, i1, , i32) -declare iXLen @llvm.vp.cttz.elts.iXLen.nxv2i64(, i1, , i32) -declare iXLen @llvm.vp.cttz.elts.iXLen.v2i64(<2 x i64>, i1, <2 x i1>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-inttoptr-ptrtoint.ll b/llvm/test/CodeGen/RISCV/rvv/vp-inttoptr-ptrtoint.ll index df003907dc360..4f4db95f058a3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-inttoptr-ptrtoint.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-inttoptr-ptrtoint.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s -declare @llvm.vp.inttoptr.nxv4p0.nxv4i8(, , i32) - define @inttoptr_nxv4p0_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: inttoptr_nxv4p0_nxv4i8: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define @inttoptr_nxv4p0_nxv4i8( %va, %v } -declare @llvm.vp.inttoptr.nxv4p0.nxv4i16(, , i32) - define @inttoptr_nxv4p0_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: inttoptr_nxv4p0_nxv4i16: ; CHECK: # %bb.0: @@ -27,8 +23,6 @@ define @inttoptr_nxv4p0_nxv4i16( %va, %v } -declare @llvm.vp.inttoptr.nxv4p0.nxv4i32(, , i32) - define @inttoptr_nxv4p0_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: inttoptr_nxv4p0_nxv4i32: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define @inttoptr_nxv4p0_nxv4i32( %va, %v } -declare @llvm.vp.inttoptr.nxv4p0.nxv4i64(, , i32) - define @inttoptr_nxv4p0_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: inttoptr_nxv4p0_nxv4i64: ; CHECK: # %bb.0: @@ -50,8 +42,6 @@ define @inttoptr_nxv4p0_nxv4i64( %va, %v } -declare @llvm.vp.ptrtoint.nxv4i8.nxv4p0(, , i32) - define @ptrtoint_nxv4i8_nxv4p0( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: ptrtoint_nxv4i8_nxv4p0: ; CHECK: # %bb.0: @@ -66,8 +56,6 @@ define @ptrtoint_nxv4i8_nxv4p0( %va, %v } -declare @llvm.vp.ptrtoint.nxv4i16.nxv4p0(, , i32) - define @ptrtoint_nxv4i16_nxv4p0( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: ptrtoint_nxv4i16_nxv4p0: ; CHECK: # %bb.0: @@ -80,8 +68,6 @@ define @ptrtoint_nxv4i16_nxv4p0( %va, %v } -declare @llvm.vp.ptrtoint.nxv4i32.nxv4p0(, , i32) - define @ptrtoint_nxv4i32_nxv4p0( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: ptrtoint_nxv4i32_nxv4p0: ; CHECK: # %bb.0: @@ -93,8 +79,6 @@ define @ptrtoint_nxv4i32_nxv4p0( %va, %v } -declare @llvm.vp.ptrtoint.nxv4i64.nxv4p0(, , i32) - define @ptrtoint_nxv4i64_nxv4p0( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: ptrtoint_nxv4i64_nxv4p0: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll index 09d92c3c039f9..4b8effb70586e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll @@ -158,7 +158,3 @@ define <16 x i1> @test_vp_reverse_v16i1(<16 x i1> %src, i32 zeroext %evl) { ret <16 x i1> %dst } -declare <2 x i1> @llvm.experimental.vp.reverse.v2i1(<2 x i1>,<2 x i1>,i32) -declare <4 x i1> @llvm.experimental.vp.reverse.v4i1(<4 x i1>,<4 x i1>,i32) -declare <8 x i1> @llvm.experimental.vp.reverse.v8i1(<8 x i1>,<8 x i1>,i32) -declare <16 x i1> @llvm.experimental.vp.reverse.v16i1(<16 x i1>,<16 x i1>,i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll index 8e44d76e7010f..bb15a2241faa6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll @@ -304,10 +304,3 @@ define @test_vp_reverse_nxv64i1( %src, i32 ret %dst } -declare @llvm.experimental.vp.reverse.nxv1i1(,,i32) -declare @llvm.experimental.vp.reverse.nxv2i1(,,i32) -declare @llvm.experimental.vp.reverse.nxv4i1(,,i32) -declare @llvm.experimental.vp.reverse.nxv8i1(,,i32) -declare @llvm.experimental.vp.reverse.nxv16i1(,,i32) -declare @llvm.experimental.vp.reverse.nxv32i1(,,i32) -declare @llvm.experimental.vp.reverse.nxv64i1(,,i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll index dec68fa970c99..aa9854f7681f1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+vl-dependent-latency -verify-machineinstrs \ ; RUN: < %s | FileCheck %s --check-prefix=VLDEP -declare <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1>, <2 x i1>, i32, <2 x i1>, i32, i32) -declare <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1>, <4 x i1>, i32, <4 x i1>, i32, i32) -declare <8 x i1> @llvm.experimental.vp.splice.v8i1(<8 x i1>, <8 x i1>, i32, <8 x i1>, i32, i32) -declare <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1>, <16 x i1>, i32, <16 x i1>, i32, i32) - define <2 x i1> @test_vp_splice_v2i1(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; NOVLDEP-LABEL: test_vp_splice_v2i1: ; NOVLDEP: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll index 36f2f4e6269d8..3215b4548243c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll @@ -2,14 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefix=NOVLDEP ; RUN: llc -mtriple=riscv64 -mattr=+v,+vl-dependent-latency -verify-machineinstrs < %s | FileCheck %s --check-prefix=VLDEP -declare @llvm.experimental.vp.splice.nxv1i1(, , i32, , i32, i32) -declare @llvm.experimental.vp.splice.nxv2i1(, , i32, , i32, i32) -declare @llvm.experimental.vp.splice.nxv4i1(, , i32, , i32, i32) -declare @llvm.experimental.vp.splice.nxv8i1(, , i32, , i32, i32) -declare @llvm.experimental.vp.splice.nxv16i1(, , i32, , i32, i32) -declare @llvm.experimental.vp.splice.nxv32i1(, , i32, , i32, i32) -declare @llvm.experimental.vp.splice.nxv64i1(, , i32, , i32, i32) - define @test_vp_splice_nxv1i1( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 { ; NOVLDEP-LABEL: test_vp_splice_nxv1i1: ; NOVLDEP: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-vaaddu.ll b/llvm/test/CodeGen/RISCV/rvv/vp-vaaddu.ll index 989fbb7fcea8b..d1f1538570011 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-vaaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-vaaddu.ll @@ -1,17 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare @llvm.vp.zext.nxv2i16.nxv2i8(, , i32) -declare @llvm.vp.zext.nxv2i32.nxv2i8(, , i32) -declare @llvm.vp.zext.nxv2i32.nxv2i16(, , i32) -declare @llvm.vp.trunc.nxv2i8.nxv2i16(, , i32) -declare @llvm.vp.trunc.nxv2i16.nxv2i32(, , i32) -declare @llvm.vp.trunc.nxv2i8.nxv2i32(, , i32) -declare @llvm.vp.add.nxv2i16(, , , i32) -declare @llvm.vp.lshr.nxv2i16(, , , i32) -declare @llvm.vp.add.nxv2i32(, , , i32) -declare @llvm.vp.lshr.nxv2i32(, , , i32) - define @vaaddu_1( %x, %y, %m, i32 zeroext %vl) { ; CHECK-LABEL: vaaddu_1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll index b6ec7906885ff..a075bba81d3c6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 -declare @llvm.vp.gather.nxv1i8.nxv1p0(, , i32) - define @vpgather_nxv1i8( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1i8: ; RV32: # %bb.0: @@ -28,8 +26,6 @@ define @vpgather_nxv1i8( %ptrs, %v } -declare @llvm.vp.gather.nxv2i8.nxv2p0(, , i32) - define @vpgather_nxv2i8( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i8: ; RV32: # %bb.0: @@ -174,8 +170,6 @@ define @vpgather_nxv2i8_zextload_nxv2i64( % ret %ev } -declare @llvm.vp.gather.nxv4i8.nxv4p0(, , i32) - define @vpgather_nxv4i8( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4i8: ; RV32: # %bb.0: @@ -212,8 +206,6 @@ define @vpgather_truemask_nxv4i8( %ptrs, i32 ret %v } -declare @llvm.vp.gather.nxv8i8.nxv8p0(, , i32) - define @vpgather_nxv8i8( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8i8: ; RV32: # %bb.0: @@ -253,8 +245,6 @@ define @vpgather_baseidx_nxv8i8(ptr %base, % ret %v } -declare @llvm.vp.gather.nxv32i8.nxv32p0(, , i32) - define @vpgather_baseidx_nxv32i8(ptr %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv32i8: ; RV32: # %bb.0: @@ -344,8 +334,6 @@ define @vpgather_baseidx_nxv32i8(ptr %base, %v } -declare @llvm.vp.gather.nxv1i16.nxv1p0(, , i32) - define @vpgather_nxv1i16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1i16: ; RV32: # %bb.0: @@ -364,8 +352,6 @@ define @vpgather_nxv1i16( %ptrs, %v } -declare @llvm.vp.gather.nxv2i16.nxv2p0(, , i32) - define @vpgather_nxv2i16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i16: ; RV32: # %bb.0: @@ -468,8 +454,6 @@ define @vpgather_nxv2i16_zextload_nxv2i64( ret %ev } -declare @llvm.vp.gather.nxv4i16.nxv4p0(, , i32) - define @vpgather_nxv4i16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4i16: ; RV32: # %bb.0: @@ -506,8 +490,6 @@ define @vpgather_truemask_nxv4i16( %ptrs, i ret %v } -declare @llvm.vp.gather.nxv8i16.nxv8p0(, , i32) - define @vpgather_nxv8i16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8i16: ; RV32: # %bb.0: @@ -616,8 +598,6 @@ define @vpgather_baseidx_nxv8i16(ptr %base, %v } -declare @llvm.vp.gather.nxv1i32.nxv1p0(, , i32) - define @vpgather_nxv1i32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1i32: ; RV32: # %bb.0: @@ -635,8 +615,6 @@ define @vpgather_nxv1i32( %ptrs, %v } -declare @llvm.vp.gather.nxv2i32.nxv2p0(, , i32) - define @vpgather_nxv2i32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i32: ; RV32: # %bb.0: @@ -696,8 +674,6 @@ define @vpgather_nxv2i32_zextload_nxv2i64( ret %ev } -declare @llvm.vp.gather.nxv4i32.nxv4p0(, , i32) - define @vpgather_nxv4i32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4i32: ; RV32: # %bb.0: @@ -732,8 +708,6 @@ define @vpgather_truemask_nxv4i32( %ptrs, i ret %v } -declare @llvm.vp.gather.nxv8i32.nxv8p0(, , i32) - define @vpgather_nxv8i32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8i32: ; RV32: # %bb.0: @@ -911,8 +885,6 @@ define @vpgather_baseidx_nxv8i32(ptr %base, %v } -declare @llvm.vp.gather.nxv1i64.nxv1p0(, , i32) - define @vpgather_nxv1i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1i64: ; RV32: # %bb.0: @@ -930,8 +902,6 @@ define @vpgather_nxv1i64( %ptrs, %v } -declare @llvm.vp.gather.nxv2i64.nxv2p0(, , i32) - define @vpgather_nxv2i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i64: ; RV32: # %bb.0: @@ -949,8 +919,6 @@ define @vpgather_nxv2i64( %ptrs, %v } -declare @llvm.vp.gather.nxv4i64.nxv4p0(, , i32) - define @vpgather_nxv4i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4i64: ; RV32: # %bb.0: @@ -985,8 +953,6 @@ define @vpgather_truemask_nxv4i64( %ptrs, i ret %v } -declare @llvm.vp.gather.nxv8i64.nxv8p0(, , i32) - define @vpgather_nxv8i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8i64: ; RV32: # %bb.0: @@ -1231,8 +1197,6 @@ define @vpgather_baseidx_nxv8i64(ptr %base, %v } -declare @llvm.vp.gather.nxv1bf16.nxv1p0(, , i32) - define @vpgather_nxv1bf16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1bf16: ; RV32: # %bb.0: @@ -1251,8 +1215,6 @@ define @vpgather_nxv1bf16( %ptrs, %v } -declare @llvm.vp.gather.nxv2bf16.nxv2p0(, , i32) - define @vpgather_nxv2bf16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2bf16: ; RV32: # %bb.0: @@ -1271,8 +1233,6 @@ define @vpgather_nxv2bf16( %ptrs, %v } -declare @llvm.vp.gather.nxv4bf16.nxv4p0(, , i32) - define @vpgather_nxv4bf16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4bf16: ; RV32: # %bb.0: @@ -1309,8 +1269,6 @@ define @vpgather_truemask_nxv4bf16( %ptr ret %v } -declare @llvm.vp.gather.nxv8bf16.nxv8p0(, , i32) - define @vpgather_nxv8bf16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8bf16: ; RV32: # %bb.0: @@ -1419,8 +1377,6 @@ define @vpgather_baseidx_nxv8bf16(ptr %base, %v } -declare @llvm.vp.gather.nxv1f16.nxv1p0(, , i32) - define @vpgather_nxv1f16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1f16: ; RV32: # %bb.0: @@ -1439,8 +1395,6 @@ define @vpgather_nxv1f16( %ptrs, %v } -declare @llvm.vp.gather.nxv2f16.nxv2p0(, , i32) - define @vpgather_nxv2f16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2f16: ; RV32: # %bb.0: @@ -1459,8 +1413,6 @@ define @vpgather_nxv2f16( %ptrs, %v } -declare @llvm.vp.gather.nxv4f16.nxv4p0(, , i32) - define @vpgather_nxv4f16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4f16: ; RV32: # %bb.0: @@ -1497,8 +1449,6 @@ define @vpgather_truemask_nxv4f16( %ptrs, ret %v } -declare @llvm.vp.gather.nxv8f16.nxv8p0(, , i32) - define @vpgather_nxv8f16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8f16: ; RV32: # %bb.0: @@ -1607,8 +1557,6 @@ define @vpgather_baseidx_nxv8f16(ptr %base, %v } -declare @llvm.vp.gather.nxv1f32.nxv1p0(, , i32) - define @vpgather_nxv1f32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1f32: ; RV32: # %bb.0: @@ -1626,8 +1574,6 @@ define @vpgather_nxv1f32( %ptrs, %v } -declare @llvm.vp.gather.nxv2f32.nxv2p0(, , i32) - define @vpgather_nxv2f32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2f32: ; RV32: # %bb.0: @@ -1645,8 +1591,6 @@ define @vpgather_nxv2f32( %ptrs, %v } -declare @llvm.vp.gather.nxv4f32.nxv4p0(, , i32) - define @vpgather_nxv4f32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4f32: ; RV32: # %bb.0: @@ -1681,8 +1625,6 @@ define @vpgather_truemask_nxv4f32( %ptrs, ret %v } -declare @llvm.vp.gather.nxv8f32.nxv8p0(, , i32) - define @vpgather_nxv8f32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8f32: ; RV32: # %bb.0: @@ -1860,8 +1802,6 @@ define @vpgather_baseidx_nxv8f32(ptr %base, %v } -declare @llvm.vp.gather.nxv1f64.nxv1p0(, , i32) - define @vpgather_nxv1f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1f64: ; RV32: # %bb.0: @@ -1879,8 +1819,6 @@ define @vpgather_nxv1f64( %ptrs, %v } -declare @llvm.vp.gather.nxv2f64.nxv2p0(, , i32) - define @vpgather_nxv2f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2f64: ; RV32: # %bb.0: @@ -1898,8 +1836,6 @@ define @vpgather_nxv2f64( %ptrs, %v } -declare @llvm.vp.gather.nxv4f64.nxv4p0(, , i32) - define @vpgather_nxv4f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4f64: ; RV32: # %bb.0: @@ -1934,8 +1870,6 @@ define @vpgather_truemask_nxv4f64( %ptrs ret %v } -declare @llvm.vp.gather.nxv6f64.nxv6p0(, , i32) - define @vpgather_nxv6f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv6f64: ; RV32: # %bb.0: @@ -2180,8 +2114,6 @@ define @vpgather_baseidx_nxv6f64(ptr %base, %v } -declare @llvm.vp.gather.nxv8f64.nxv8p0(, , i32) - define @vpgather_nxv8f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8f64: ; RV32: # %bb.0: @@ -2426,8 +2358,6 @@ define @vpgather_baseidx_nxv8f64(ptr %base, %v } -declare @llvm.vp.gather.nxv16f64.nxv16p0(, , i32) - define @vpgather_nxv16f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv16f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll index 3a26af0279d50..2ece316c7e54a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.load.nxv1i8.p0(ptr, , i32) - define @vpload_nxv1i8(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1i8: ; CHECK: # %bb.0: @@ -41,8 +39,6 @@ define @vpload_nxv1i8_passthru(ptr %ptr, %m, ret %merge } -declare @llvm.vp.load.nxv2i8.p0(ptr, , i32) - define @vpload_nxv2i8(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2i8: ; CHECK: # %bb.0: @@ -53,8 +49,6 @@ define @vpload_nxv2i8(ptr %ptr, %m, i32 zero ret %load } -declare @llvm.vp.load.nxv3i8.p0(ptr, , i32) - define @vpload_nxv3i8(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv3i8: ; CHECK: # %bb.0: @@ -65,8 +59,6 @@ define @vpload_nxv3i8(ptr %ptr, %m, i32 zero ret %load } -declare @llvm.vp.load.nxv4i6.nxv4i6.p0(*, , i32) - define @vpload_nxv4i6(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4i6: ; CHECK: # %bb.0: @@ -77,8 +69,6 @@ define @vpload_nxv4i6(* %ptr, %load } -declare @llvm.vp.load.nxv4i8.p0(ptr, , i32) - define @vpload_nxv4i8(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4i8: ; CHECK: # %bb.0: @@ -89,8 +79,6 @@ define @vpload_nxv4i8(ptr %ptr, %m, i32 zero ret %load } -declare @llvm.vp.load.nxv8i8.p0(ptr, , i32) - define @vpload_nxv8i8(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8i8: ; CHECK: # %bb.0: @@ -111,8 +99,6 @@ define @vpload_nxv8i8_allones_mask(ptr %ptr, i32 zeroext %evl) ret %load } -declare @llvm.vp.load.nxv1i16.p0(ptr, , i32) - define @vpload_nxv1i16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1i16: ; CHECK: # %bb.0: @@ -123,8 +109,6 @@ define @vpload_nxv1i16(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv2i16.p0(ptr, , i32) - define @vpload_nxv2i16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2i16: ; CHECK: # %bb.0: @@ -145,8 +129,6 @@ define @vpload_nxv2i16_allones_mask(ptr %ptr, i32 zeroext %ev ret %load } -declare @llvm.vp.load.nxv4i16.p0(ptr, , i32) - define @vpload_nxv4i16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4i16: ; CHECK: # %bb.0: @@ -157,8 +139,6 @@ define @vpload_nxv4i16(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv8i16.p0(ptr, , i32) - define @vpload_nxv8i16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8i16: ; CHECK: # %bb.0: @@ -169,8 +149,6 @@ define @vpload_nxv8i16(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv1i32.p0(ptr, , i32) - define @vpload_nxv1i32(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1i32: ; CHECK: # %bb.0: @@ -181,8 +159,6 @@ define @vpload_nxv1i32(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv2i32.p0(ptr, , i32) - define @vpload_nxv2i32(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2i32: ; CHECK: # %bb.0: @@ -193,8 +169,6 @@ define @vpload_nxv2i32(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv4i32.p0(ptr, , i32) - define @vpload_nxv4i32(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4i32: ; CHECK: # %bb.0: @@ -215,8 +189,6 @@ define @vpload_nxv4i32_allones_mask(ptr %ptr, i32 zeroext %ev ret %load } -declare @llvm.vp.load.nxv8i32.p0(ptr, , i32) - define @vpload_nxv8i32(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8i32: ; CHECK: # %bb.0: @@ -227,8 +199,6 @@ define @vpload_nxv8i32(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv1i64.p0(ptr, , i32) - define @vpload_nxv1i64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1i64: ; CHECK: # %bb.0: @@ -249,8 +219,6 @@ define @vpload_nxv1i64_allones_mask(ptr %ptr, i32 zeroext %ev ret %load } -declare @llvm.vp.load.nxv2i64.p0(ptr, , i32) - define @vpload_nxv2i64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2i64: ; CHECK: # %bb.0: @@ -261,8 +229,6 @@ define @vpload_nxv2i64(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv4i64.p0(ptr, , i32) - define @vpload_nxv4i64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4i64: ; CHECK: # %bb.0: @@ -273,8 +239,6 @@ define @vpload_nxv4i64(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv8i64.p0(ptr, , i32) - define @vpload_nxv8i64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8i64: ; CHECK: # %bb.0: @@ -285,8 +249,6 @@ define @vpload_nxv8i64(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv1bf16.p0(ptr, , i32) - define @vpload_nxv1bf16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1bf16: ; CHECK: # %bb.0: @@ -297,8 +259,6 @@ define @vpload_nxv1bf16(ptr %ptr, %m, i3 ret %load } -declare @llvm.vp.load.nxv2bf16.p0(ptr, , i32) - define @vpload_nxv2bf16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2bf16: ; CHECK: # %bb.0: @@ -319,8 +279,6 @@ define @vpload_nxv2bf16_allones_mask(ptr %ptr, i32 zeroext ret %load } -declare @llvm.vp.load.nxv4bf16.p0(ptr, , i32) - define @vpload_nxv4bf16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4bf16: ; CHECK: # %bb.0: @@ -331,8 +289,6 @@ define @vpload_nxv4bf16(ptr %ptr, %m, i3 ret %load } -declare @llvm.vp.load.nxv8bf16.p0(ptr, , i32) - define @vpload_nxv8bf16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8bf16: ; CHECK: # %bb.0: @@ -343,8 +299,6 @@ define @vpload_nxv8bf16(ptr %ptr, %m, i3 ret %load } -declare @llvm.vp.load.nxv1f16.p0(ptr, , i32) - define @vpload_nxv1f16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1f16: ; CHECK: # %bb.0: @@ -355,8 +309,6 @@ define @vpload_nxv1f16(ptr %ptr, %m, i32 z ret %load } -declare @llvm.vp.load.nxv2f16.p0(ptr, , i32) - define @vpload_nxv2f16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2f16: ; CHECK: # %bb.0: @@ -377,8 +329,6 @@ define @vpload_nxv2f16_allones_mask(ptr %ptr, i32 zeroext %e ret %load } -declare @llvm.vp.load.nxv4f16.p0(ptr, , i32) - define @vpload_nxv4f16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4f16: ; CHECK: # %bb.0: @@ -389,8 +339,6 @@ define @vpload_nxv4f16(ptr %ptr, %m, i32 z ret %load } -declare @llvm.vp.load.nxv8f16.p0(ptr, , i32) - define @vpload_nxv8f16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8f16: ; CHECK: # %bb.0: @@ -401,8 +349,6 @@ define @vpload_nxv8f16(ptr %ptr, %m, i32 z ret %load } -declare @llvm.vp.load.nxv1f32.p0(ptr, , i32) - define @vpload_nxv1f32(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1f32: ; CHECK: # %bb.0: @@ -413,8 +359,6 @@ define @vpload_nxv1f32(ptr %ptr, %m, i32 ret %load } -declare @llvm.vp.load.nxv2f32.p0(ptr, , i32) - define @vpload_nxv2f32(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2f32: ; CHECK: # %bb.0: @@ -425,8 +369,6 @@ define @vpload_nxv2f32(ptr %ptr, %m, i32 ret %load } -declare @llvm.vp.load.nxv4f32.p0(ptr, , i32) - define @vpload_nxv4f32(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4f32: ; CHECK: # %bb.0: @@ -437,8 +379,6 @@ define @vpload_nxv4f32(ptr %ptr, %m, i32 ret %load } -declare @llvm.vp.load.nxv8f32.p0(ptr, , i32) - define @vpload_nxv8f32(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8f32: ; CHECK: # %bb.0: @@ -459,8 +399,6 @@ define @vpload_nxv8f32_allones_mask(ptr %ptr, i32 zeroext % ret %load } -declare @llvm.vp.load.nxv1f64.p0(ptr, , i32) - define @vpload_nxv1f64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1f64: ; CHECK: # %bb.0: @@ -471,8 +409,6 @@ define @vpload_nxv1f64(ptr %ptr, %m, i32 ret %load } -declare @llvm.vp.load.nxv2f64.p0(ptr, , i32) - define @vpload_nxv2f64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2f64: ; CHECK: # %bb.0: @@ -483,8 +419,6 @@ define @vpload_nxv2f64(ptr %ptr, %m, i32 ret %load } -declare @llvm.vp.load.nxv4f64.p0(ptr, , i32) - define @vpload_nxv4f64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4f64: ; CHECK: # %bb.0: @@ -505,8 +439,6 @@ define @vpload_nxv4f64_allones_mask(ptr %ptr, i32 zeroext ret %load } -declare @llvm.vp.load.nxv8f64.p0(ptr, , i32) - define @vpload_nxv8f64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8f64: ; CHECK: # %bb.0: @@ -517,8 +449,6 @@ define @vpload_nxv8f64(ptr %ptr, %m, i32 ret %load } -declare @llvm.vp.load.nxv16f64.p0(ptr, , i32) - define @vpload_nxv16f64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv16f64: ; CHECK: # %bb.0: @@ -547,11 +477,6 @@ define @vpload_nxv16f64(ptr %ptr, %m, ret %load } -declare @llvm.vp.load.nxv17f64.p0(ptr, , i32) - -declare @llvm.vector.extract.nxv1f64( %vec, i64 %idx) -declare @llvm.vector.extract.nxv16f64( %vec, i64 %idx) - ; Note: We can't return as that introduces a vector ; store can't yet be legalized through widening. In order to test purely the ; vp.load legalization, manually split it. diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode-bf16.ll index 16201da1a509a..8a1a3cdd90d72 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode-bf16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode-bf16.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+zvfh,+m,+zfbfmin,+zvfbfmin -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.merge.nxv1bf16(, , , i32) - define @vpmerge_vv_nxv1bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -36,8 +34,6 @@ define @vpmerge_vf_nxv1bf16(bfloat %a, %v } -declare @llvm.vp.merge.nxv2bf16(, , , i32) - define @vpmerge_vv_nxv2bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -64,8 +60,6 @@ define @vpmerge_vf_nxv2bf16(bfloat %a, %v } -declare @llvm.vp.merge.nxv4bf16(, , , i32) - define @vpmerge_vv_nxv4bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -92,8 +86,6 @@ define @vpmerge_vf_nxv4bf16(bfloat %a, %v } -declare @llvm.vp.merge.nxv8bf16(, , , i32) - define @vpmerge_vv_nxv8bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -120,8 +112,6 @@ define @vpmerge_vf_nxv8bf16(bfloat %a, %v } -declare @llvm.vp.merge.nxv16bf16(, , , i32) - define @vpmerge_vv_nxv16bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -148,8 +138,6 @@ define @vpmerge_vf_nxv16bf16(bfloat %a, %v } -declare @llvm.vp.merge.nxv32bf16(, , , i32) - define @vpmerge_vv_nxv32bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv32bf16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll index 6e0aee18c6c74..03697aafea45d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll @@ -7,7 +7,6 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVFHMIN -declare @llvm.vp.merge.nxv1i1(, , , i32) define @vpmerge_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { ; RV32-LABEL: vpmerge_nxv1i1: @@ -232,8 +231,6 @@ define @vpmerge_nxv128i1( %va, %v } -declare @llvm.vp.merge.nxv1i8(, , , i32) - define @vpmerge_vv_nxv1i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1i8: ; CHECK: # %bb.0: @@ -267,8 +264,6 @@ define @vpmerge_vi_nxv1i8( %vb, %v } -declare @llvm.vp.merge.nxv2i8(, , , i32) - define @vpmerge_vv_nxv2i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2i8: ; CHECK: # %bb.0: @@ -302,8 +297,6 @@ define @vpmerge_vi_nxv2i8( %vb, %v } -declare @llvm.vp.merge.nxv3i8(, , , i32) - define @vpmerge_vv_nxv3i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv3i8: ; CHECK: # %bb.0: @@ -337,8 +330,6 @@ define @vpmerge_vi_nxv3i8( %vb, %v } -declare @llvm.vp.merge.nxv4i8(, , , i32) - define @vpmerge_vv_nxv4i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4i8: ; CHECK: # %bb.0: @@ -372,8 +363,6 @@ define @vpmerge_vi_nxv4i8( %vb, %v } -declare @llvm.vp.merge.nxv8i7(, , , i32) - define @vpmerge_vv_nxv8i7( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8i7: ; CHECK: # %bb.0: @@ -407,8 +396,6 @@ define @vpmerge_vi_nxv8i7( %vb, %v } -declare @llvm.vp.merge.nxv8i8(, , , i32) - define @vpmerge_vv_nxv8i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8i8: ; CHECK: # %bb.0: @@ -442,8 +429,6 @@ define @vpmerge_vi_nxv8i8( %vb, %v } -declare @llvm.vp.merge.nxv16i8(, , , i32) - define @vpmerge_vv_nxv16i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16i8: ; CHECK: # %bb.0: @@ -477,8 +462,6 @@ define @vpmerge_vi_nxv16i8( %vb, %v } -declare @llvm.vp.merge.nxv32i8(, , , i32) - define @vpmerge_vv_nxv32i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv32i8: ; CHECK: # %bb.0: @@ -512,8 +495,6 @@ define @vpmerge_vi_nxv32i8( %vb, %v } -declare @llvm.vp.merge.nxv64i8(, , , i32) - define @vpmerge_vv_nxv64i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv64i8: ; CHECK: # %bb.0: @@ -547,8 +528,6 @@ define @vpmerge_vi_nxv64i8( %vb, %v } -declare @llvm.vp.merge.nxv128i8(, , , i32) - define @vpmerge_vv_nxv128i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv128i8: ; CHECK: # %bb.0: @@ -649,8 +628,6 @@ define @vpmerge_vi_nxv128i8( %vb, %v } -declare @llvm.vp.merge.nxv1i16(, , , i32) - define @vpmerge_vv_nxv1i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1i16: ; CHECK: # %bb.0: @@ -684,8 +661,6 @@ define @vpmerge_vi_nxv1i16( %vb, %v } -declare @llvm.vp.merge.nxv2i16(, , , i32) - define @vpmerge_vv_nxv2i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2i16: ; CHECK: # %bb.0: @@ -719,8 +694,6 @@ define @vpmerge_vi_nxv2i16( %vb, %v } -declare @llvm.vp.merge.nxv4i16(, , , i32) - define @vpmerge_vv_nxv4i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4i16: ; CHECK: # %bb.0: @@ -754,8 +727,6 @@ define @vpmerge_vi_nxv4i16( %vb, %v } -declare @llvm.vp.merge.nxv8i16(, , , i32) - define @vpmerge_vv_nxv8i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8i16: ; CHECK: # %bb.0: @@ -789,8 +760,6 @@ define @vpmerge_vi_nxv8i16( %vb, %v } -declare @llvm.vp.merge.nxv16i16(, , , i32) - define @vpmerge_vv_nxv16i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16i16: ; CHECK: # %bb.0: @@ -824,8 +793,6 @@ define @vpmerge_vi_nxv16i16( %vb, %v } -declare @llvm.vp.merge.nxv32i16(, , , i32) - define @vpmerge_vv_nxv32i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv32i16: ; CHECK: # %bb.0: @@ -859,8 +826,6 @@ define @vpmerge_vi_nxv32i16( %vb, %v } -declare @llvm.vp.merge.nxv1i32(, , , i32) - define @vpmerge_vv_nxv1i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1i32: ; CHECK: # %bb.0: @@ -894,8 +859,6 @@ define @vpmerge_vi_nxv1i32( %vb, %v } -declare @llvm.vp.merge.nxv2i32(, , , i32) - define @vpmerge_vv_nxv2i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2i32: ; CHECK: # %bb.0: @@ -929,8 +892,6 @@ define @vpmerge_vi_nxv2i32( %vb, %v } -declare @llvm.vp.merge.nxv4i32(, , , i32) - define @vpmerge_vv_nxv4i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4i32: ; CHECK: # %bb.0: @@ -964,8 +925,6 @@ define @vpmerge_vi_nxv4i32( %vb, %v } -declare @llvm.vp.merge.nxv8i32(, , , i32) - define @vpmerge_vv_nxv8i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8i32: ; CHECK: # %bb.0: @@ -999,8 +958,6 @@ define @vpmerge_vi_nxv8i32( %vb, %v } -declare @llvm.vp.merge.nxv16i32(, , , i32) - define @vpmerge_vv_nxv16i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1034,8 +991,6 @@ define @vpmerge_vi_nxv16i32( %vb, %v } -declare @llvm.vp.merge.nxv1i64(, , , i32) - define @vpmerge_vv_nxv1i64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1082,8 +1037,6 @@ define @vpmerge_vi_nxv1i64( %vb, %v } -declare @llvm.vp.merge.nxv2i64(, , , i32) - define @vpmerge_vv_nxv2i64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1130,8 +1083,6 @@ define @vpmerge_vi_nxv2i64( %vb, %v } -declare @llvm.vp.merge.nxv4i64(, , , i32) - define @vpmerge_vv_nxv4i64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1178,8 +1129,6 @@ define @vpmerge_vi_nxv4i64( %vb, %v } -declare @llvm.vp.merge.nxv8i64(, , , i32) - define @vpmerge_vv_nxv8i64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8i64: ; CHECK: # %bb.0: @@ -1226,8 +1175,6 @@ define @vpmerge_vi_nxv8i64( %vb, %v } -declare @llvm.vp.merge.nxv1f16(, , , i32) - define @vpmerge_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1f16: ; CHECK: # %bb.0: @@ -1275,8 +1222,6 @@ define @vpmerge_vf_nxv1f16(half %a, %vb, ret %v } -declare @llvm.vp.merge.nxv2f16(, , , i32) - define @vpmerge_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2f16: ; CHECK: # %bb.0: @@ -1324,8 +1269,6 @@ define @vpmerge_vf_nxv2f16(half %a, %vb, ret %v } -declare @llvm.vp.merge.nxv4f16(, , , i32) - define @vpmerge_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4f16: ; CHECK: # %bb.0: @@ -1373,8 +1316,6 @@ define @vpmerge_vf_nxv4f16(half %a, %vb, ret %v } -declare @llvm.vp.merge.nxv8f16(, , , i32) - define @vpmerge_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8f16: ; CHECK: # %bb.0: @@ -1422,8 +1363,6 @@ define @vpmerge_vf_nxv8f16(half %a, %vb, ret %v } -declare @llvm.vp.merge.nxv16f16(, , , i32) - define @vpmerge_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16f16: ; CHECK: # %bb.0: @@ -1471,8 +1410,6 @@ define @vpmerge_vf_nxv16f16(half %a, % ret %v } -declare @llvm.vp.merge.nxv32f16(, , , i32) - define @vpmerge_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv32f16: ; CHECK: # %bb.0: @@ -1520,8 +1457,6 @@ define @vpmerge_vf_nxv32f16(half %a, % ret %v } -declare @llvm.vp.merge.nxv1f32(, , , i32) - define @vpmerge_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1f32: ; CHECK: # %bb.0: @@ -1545,8 +1480,6 @@ define @vpmerge_vf_nxv1f32(float %a, % ret %v } -declare @llvm.vp.merge.nxv2f32(, , , i32) - define @vpmerge_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1570,8 +1503,6 @@ define @vpmerge_vf_nxv2f32(float %a, % ret %v } -declare @llvm.vp.merge.nxv4f32(, , , i32) - define @vpmerge_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1595,8 +1526,6 @@ define @vpmerge_vf_nxv4f32(float %a, % ret %v } -declare @llvm.vp.merge.nxv8f32(, , , i32) - define @vpmerge_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1620,8 +1549,6 @@ define @vpmerge_vf_nxv8f32(float %a, % ret %v } -declare @llvm.vp.merge.nxv16f32(, , , i32) - define @vpmerge_vv_nxv16f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1645,8 +1572,6 @@ define @vpmerge_vf_nxv16f32(float %a, %v } -declare @llvm.vp.merge.nxv1f64(, , , i32) - define @vpmerge_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1670,8 +1595,6 @@ define @vpmerge_vf_nxv1f64(double %a, %v } -declare @llvm.vp.merge.nxv2f64(, , , i32) - define @vpmerge_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1695,8 +1618,6 @@ define @vpmerge_vf_nxv2f64(double %a, %v } -declare @llvm.vp.merge.nxv4f64(, , , i32) - define @vpmerge_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1720,8 +1641,6 @@ define @vpmerge_vf_nxv4f64(double %a, %v } -declare @llvm.vp.merge.nxv8f64(, , , i32) - define @vpmerge_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll index cf8c06fb91089..7e4a60095d7cc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v,+m \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 -declare void @llvm.vp.scatter.nxv1i8.nxv1p0(, , , i32) - define void @vpscatter_nxv1i8( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1i8: ; RV32: # %bb.0: @@ -26,8 +24,6 @@ define void @vpscatter_nxv1i8( %val, %ptrs, ret void } -declare void @llvm.vp.scatter.nxv2i8.nxv2p0(, , , i32) - define void @vpscatter_nxv2i8( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i8: ; RV32: # %bb.0: @@ -113,8 +109,6 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i8( %val, , , , i32) - define void @vpscatter_nxv4i8( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4i8: ; RV32: # %bb.0: @@ -147,8 +141,6 @@ define void @vpscatter_truemask_nxv4i8( %val, , , , i32) - define void @vpscatter_nxv8i8( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8i8: ; RV32: # %bb.0: @@ -186,8 +178,6 @@ define void @vpscatter_baseidx_nxv8i8( %val, ptr %base, , , , i32) - define void @vpscatter_nxv1i16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1i16: ; RV32: # %bb.0: @@ -204,8 +194,6 @@ define void @vpscatter_nxv1i16( %val, %ptrs ret void } -declare void @llvm.vp.scatter.nxv2i16.nxv2p0(, , , i32) - define void @vpscatter_nxv2i16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i16: ; RV32: # %bb.0: @@ -264,8 +252,6 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i16( %val, , , , i32) - define void @vpscatter_nxv4i16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4i16: ; RV32: # %bb.0: @@ -298,8 +284,6 @@ define void @vpscatter_truemask_nxv4i16( %val, , , , i32) - define void @vpscatter_nxv8i16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8i16: ; RV32: # %bb.0: @@ -406,7 +390,6 @@ define void @vpscatter_baseidx_nxv8i16( %val, ptr %base, @llvm.vp.sext.nxv8i16.nxv8i32(, , i32) define void @vpscatter_baseidx_vpsext_nxv8i16_nxv8i16( %val, ptr %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_vpsext_nxv8i16_nxv8i16: ; RV32: # %bb.0: @@ -429,7 +412,6 @@ define void @vpscatter_baseidx_vpsext_nxv8i16_nxv8i16( %val, p ret void } -declare @llvm.vp.zext.nxv8i16.nxv8i32(, , i32) define void @vpscatter_baseidx_vpzext_nxv8i16_nxv8i16( %val, ptr %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_vpzext_nxv8i16_nxv8i16: ; RV32: # %bb.0: @@ -452,7 +434,6 @@ define void @vpscatter_baseidx_vpzext_nxv8i16_nxv8i16( %val, p ret void } -declare @llvm.vp.sext.nxv8i32.nxv8i64(, , i32) define void @vpscatter_baseidx_vpsext_nxv8i32_nxv8i16( %val, ptr %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_vpsext_nxv8i32_nxv8i16: ; RV32: # %bb.0: @@ -478,7 +459,6 @@ define void @vpscatter_baseidx_vpsext_nxv8i32_nxv8i16( %val, p ret void } -declare @llvm.vp.zext.nxv8i32.nxv8i64(, , i32) define void @vpscatter_baseidx_vpzext_nxv8i32_nxv8i16( %val, ptr %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_vpzext_nxv8i32_nxv8i16: ; RV32: # %bb.0: @@ -504,8 +484,6 @@ define void @vpscatter_baseidx_vpzext_nxv8i32_nxv8i16( %val, p ret void } -declare void @llvm.vp.scatter.nxv1i32.nxv1p0(, , , i32) - define void @vpscatter_nxv1i32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1i32: ; RV32: # %bb.0: @@ -522,8 +500,6 @@ define void @vpscatter_nxv1i32( %val, %ptrs ret void } -declare void @llvm.vp.scatter.nxv2i32.nxv2p0(, , , i32) - define void @vpscatter_nxv2i32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i32: ; RV32: # %bb.0: @@ -559,8 +535,6 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i32( %val, , , , i32) - define void @vpscatter_nxv4i32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4i32: ; RV32: # %bb.0: @@ -593,8 +567,6 @@ define void @vpscatter_truemask_nxv4i32( %val, , , , i32) - define void @vpscatter_nxv8i32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8i32: ; RV32: # %bb.0: @@ -771,8 +743,6 @@ define void @vpscatter_baseidx_nxv8i32( %val, ptr %base, , , , i32) - define void @vpscatter_nxv1i64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1i64: ; RV32: # %bb.0: @@ -789,8 +759,6 @@ define void @vpscatter_nxv1i64( %val, %ptrs ret void } -declare void @llvm.vp.scatter.nxv2i64.nxv2p0(, , , i32) - define void @vpscatter_nxv2i64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i64: ; RV32: # %bb.0: @@ -807,8 +775,6 @@ define void @vpscatter_nxv2i64( %val, %ptrs ret void } -declare void @llvm.vp.scatter.nxv4i64.nxv4p0(, , , i32) - define void @vpscatter_nxv4i64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4i64: ; RV32: # %bb.0: @@ -841,8 +807,6 @@ define void @vpscatter_truemask_nxv4i64( %val, , , , i32) - define void @vpscatter_nxv8i64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8i64: ; RV32: # %bb.0: @@ -1086,8 +1050,6 @@ define void @vpscatter_baseidx_nxv8i64( %val, ptr %base, , , , i32) - define void @vpscatter_nxv1bf16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1bf16: ; RV32: # %bb.0: @@ -1104,8 +1066,6 @@ define void @vpscatter_nxv1bf16( %val, % ret void } -declare void @llvm.vp.scatter.nxv2bf16.nxv2p0(, , , i32) - define void @vpscatter_nxv2bf16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2bf16: ; RV32: # %bb.0: @@ -1122,8 +1082,6 @@ define void @vpscatter_nxv2bf16( %val, % ret void } -declare void @llvm.vp.scatter.nxv4bf16.nxv4p0(, , , i32) - define void @vpscatter_nxv4bf16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4bf16: ; RV32: # %bb.0: @@ -1156,8 +1114,6 @@ define void @vpscatter_truemask_nxv4bf16( %val, , , , i32) - define void @vpscatter_nxv8bf16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8bf16: ; RV32: # %bb.0: @@ -1264,8 +1220,6 @@ define void @vpscatter_baseidx_nxv8bf16( %val, ptr %base, < ret void } -declare void @llvm.vp.scatter.nxv1f16.nxv1p0(, , , i32) - define void @vpscatter_nxv1f16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1f16: ; RV32: # %bb.0: @@ -1282,8 +1236,6 @@ define void @vpscatter_nxv1f16( %val, %ptr ret void } -declare void @llvm.vp.scatter.nxv2f16.nxv2p0(, , , i32) - define void @vpscatter_nxv2f16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2f16: ; RV32: # %bb.0: @@ -1300,8 +1252,6 @@ define void @vpscatter_nxv2f16( %val, %ptr ret void } -declare void @llvm.vp.scatter.nxv4f16.nxv4p0(, , , i32) - define void @vpscatter_nxv4f16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4f16: ; RV32: # %bb.0: @@ -1334,8 +1284,6 @@ define void @vpscatter_truemask_nxv4f16( %val, , , , i32) - define void @vpscatter_nxv8f16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8f16: ; RV32: # %bb.0: @@ -1442,8 +1390,6 @@ define void @vpscatter_baseidx_nxv8f16( %val, ptr %base, , , , i32) - define void @vpscatter_nxv1f32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1f32: ; RV32: # %bb.0: @@ -1460,8 +1406,6 @@ define void @vpscatter_nxv1f32( %val, %pt ret void } -declare void @llvm.vp.scatter.nxv2f32.nxv2p0(, , , i32) - define void @vpscatter_nxv2f32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2f32: ; RV32: # %bb.0: @@ -1478,8 +1422,6 @@ define void @vpscatter_nxv2f32( %val, %pt ret void } -declare void @llvm.vp.scatter.nxv4f32.nxv4p0(, , , i32) - define void @vpscatter_nxv4f32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4f32: ; RV32: # %bb.0: @@ -1512,8 +1454,6 @@ define void @vpscatter_truemask_nxv4f32( %val, , , , i32) - define void @vpscatter_nxv8f32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8f32: ; RV32: # %bb.0: @@ -1690,8 +1630,6 @@ define void @vpscatter_baseidx_nxv8f32( %val, ptr %base, , , , i32) - define void @vpscatter_nxv1f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1f64: ; RV32: # %bb.0: @@ -1708,8 +1646,6 @@ define void @vpscatter_nxv1f64( %val, %p ret void } -declare void @llvm.vp.scatter.nxv2f64.nxv2p0(, , , i32) - define void @vpscatter_nxv2f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2f64: ; RV32: # %bb.0: @@ -1726,8 +1662,6 @@ define void @vpscatter_nxv2f64( %val, %p ret void } -declare void @llvm.vp.scatter.nxv4f64.nxv4p0(, , , i32) - define void @vpscatter_nxv4f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4f64: ; RV32: # %bb.0: @@ -1760,8 +1694,6 @@ define void @vpscatter_truemask_nxv4f64( %val, , , , i32) - define void @vpscatter_nxv6f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv6f64: ; RV32: # %bb.0: @@ -2005,8 +1937,6 @@ define void @vpscatter_baseidx_nxv6f64( %val, ptr %base, , , , i32) - define void @vpscatter_nxv8f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8f64: ; RV32: # %bb.0: @@ -2250,8 +2180,6 @@ define void @vpscatter_baseidx_nxv8f64( %val, ptr %base, , , , i32) - define void @vpscatter_nxv16f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv16f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll index 982ec218e4688..9fd8b9d23cb5e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.vp.store.nxv1i8.p0(, ptr, , i32) - define void @vpstore_nxv1i8( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1i8: ; CHECK: # %bb.0: @@ -20,8 +18,6 @@ define void @vpstore_nxv1i8( %val, ptr %ptr, ret void } -declare void @llvm.vp.store.nxv2i8.p0(, ptr, , i32) - define void @vpstore_nxv2i8( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2i8: ; CHECK: # %bb.0: @@ -32,8 +28,6 @@ define void @vpstore_nxv2i8( %val, ptr %ptr, ret void } -declare void @llvm.vp.store.nxv3i8.p0(, ptr, , i32) - define void @vpstore_nxv3i8( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv3i8: ; CHECK: # %bb.0: @@ -44,8 +38,6 @@ define void @vpstore_nxv3i8( %val, ptr %ptr, ret void } -declare void @llvm.vp.store.nxv4i8.p0(, ptr, , i32) - define void @vpstore_nxv4i8( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4i8: ; CHECK: # %bb.0: @@ -56,8 +48,6 @@ define void @vpstore_nxv4i8( %val, ptr %ptr, ret void } -declare void @llvm.vp.store.nxv8i8.p0(, ptr, , i32) - define void @vpstore_nxv8i8( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8i8: ; CHECK: # %bb.0: @@ -68,8 +58,6 @@ define void @vpstore_nxv8i8( %val, ptr %ptr, ret void } -declare void @llvm.vp.store.nxv1i16.p0(, ptr, , i32) - define void @vpstore_nxv1i16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1i16: ; CHECK: # %bb.0: @@ -80,8 +68,6 @@ define void @vpstore_nxv1i16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv2i16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2i16: ; CHECK: # %bb.0: @@ -92,8 +78,6 @@ define void @vpstore_nxv2i16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv4i16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4i16: ; CHECK: # %bb.0: @@ -104,8 +88,6 @@ define void @vpstore_nxv4i16( %val, ptr %ptr, , *, , i32) - define void @vpstore_nxv8i12( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8i12: ; CHECK: # %bb.0: @@ -116,8 +98,6 @@ define void @vpstore_nxv8i12( %val, * %ptr, ret void } -declare void @llvm.vp.store.nxv8i16.p0(, ptr, , i32) - define void @vpstore_nxv8i16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8i16: ; CHECK: # %bb.0: @@ -128,8 +108,6 @@ define void @vpstore_nxv8i16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv1i32( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1i32: ; CHECK: # %bb.0: @@ -140,8 +118,6 @@ define void @vpstore_nxv1i32( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv2i32( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2i32: ; CHECK: # %bb.0: @@ -152,8 +128,6 @@ define void @vpstore_nxv2i32( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv4i32( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4i32: ; CHECK: # %bb.0: @@ -164,8 +138,6 @@ define void @vpstore_nxv4i32( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv8i32( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8i32: ; CHECK: # %bb.0: @@ -176,8 +148,6 @@ define void @vpstore_nxv8i32( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv1i64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1i64: ; CHECK: # %bb.0: @@ -188,8 +158,6 @@ define void @vpstore_nxv1i64( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv2i64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2i64: ; CHECK: # %bb.0: @@ -200,8 +168,6 @@ define void @vpstore_nxv2i64( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv4i64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4i64: ; CHECK: # %bb.0: @@ -212,8 +178,6 @@ define void @vpstore_nxv4i64( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv8i64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8i64: ; CHECK: # %bb.0: @@ -224,8 +188,6 @@ define void @vpstore_nxv8i64( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv1bf16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1bf16: ; CHECK: # %bb.0: @@ -236,8 +198,6 @@ define void @vpstore_nxv1bf16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv2bf16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2bf16: ; CHECK: # %bb.0: @@ -248,8 +208,6 @@ define void @vpstore_nxv2bf16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv4bf16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4bf16: ; CHECK: # %bb.0: @@ -260,8 +218,6 @@ define void @vpstore_nxv4bf16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv8bf16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8bf16: ; CHECK: # %bb.0: @@ -272,8 +228,6 @@ define void @vpstore_nxv8bf16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv1f16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1f16: ; CHECK: # %bb.0: @@ -284,8 +238,6 @@ define void @vpstore_nxv1f16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv2f16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2f16: ; CHECK: # %bb.0: @@ -296,8 +248,6 @@ define void @vpstore_nxv2f16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv4f16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4f16: ; CHECK: # %bb.0: @@ -308,8 +258,6 @@ define void @vpstore_nxv4f16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv8f16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8f16: ; CHECK: # %bb.0: @@ -320,8 +268,6 @@ define void @vpstore_nxv8f16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv1f32( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1f32: ; CHECK: # %bb.0: @@ -332,8 +278,6 @@ define void @vpstore_nxv1f32( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv2f32( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2f32: ; CHECK: # %bb.0: @@ -344,8 +288,6 @@ define void @vpstore_nxv2f32( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv4f32( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4f32: ; CHECK: # %bb.0: @@ -356,8 +298,6 @@ define void @vpstore_nxv4f32( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv8f32( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8f32: ; CHECK: # %bb.0: @@ -368,8 +308,6 @@ define void @vpstore_nxv8f32( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv1f64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1f64: ; CHECK: # %bb.0: @@ -380,8 +318,6 @@ define void @vpstore_nxv1f64( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv2f64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2f64: ; CHECK: # %bb.0: @@ -392,8 +328,6 @@ define void @vpstore_nxv2f64( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv4f64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4f64: ; CHECK: # %bb.0: @@ -404,8 +338,6 @@ define void @vpstore_nxv4f64( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv8f64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8f64: ; CHECK: # %bb.0: @@ -426,8 +358,6 @@ define void @vpstore_nxv1i8_allones_mask( %val, ptr %ptr, i32 z ret void } -declare void @llvm.vp.store.nxv16f64.p0(, ptr, , i32) - define void @vpstore_nxv16f64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv16f64: ; CHECK: # %bb.0: @@ -455,8 +385,6 @@ define void @vpstore_nxv16f64( %val, ptr %ptr, , ptr, , i32) - ; Widen to nxv32f64 then split into 4 x nxv8f64, of which 1 is empty. define void @vpstore_nxv17f64( %val, ptr %ptr, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vqdot.ll b/llvm/test/CodeGen/RISCV/rvv/vqdot.ll index c04b1925b749a..6014c8aceb599 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vqdot.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vqdot.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64x,+experimental-zvqdotq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vqdot.nxv1i32.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vqdot_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -26,12 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv2i32.nxv2i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vv_nxv2i32_nxv2i32: @@ -48,12 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vv_nxv4i32_nxv4i32: @@ -70,12 +51,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vv_nxv8i32_nxv8i32: @@ -92,12 +67,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv16i32.nxv16i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vv_nxv16i32_nxv16i32: @@ -115,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vv_nxv1i32_nxv1i32: @@ -139,13 +101,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vv_nxv2i32_nxv2i32: @@ -163,13 +118,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vv_nxv4i32_nxv4i32: @@ -187,13 +135,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vv_nxv8i32_nxv8i32: @@ -211,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vv_nxv16i32_nxv16i32: @@ -236,12 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv1i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdot_vx_nxv1i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vx_nxv1i32_i32: @@ -258,12 +186,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv2i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdot_vx_nxv2i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vx_nxv2i32_i32: @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv4i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdot_vx_nxv4i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vx_nxv4i32_i32: @@ -302,12 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv8i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdot_vx_nxv8i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vx_nxv8i32_i32: @@ -324,12 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv16i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdot_vx_nxv16i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vx_nxv16i32_i32: @@ -346,13 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vx_nxv1i32_i32: @@ -370,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vx_nxv2i32_i32: @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vx_nxv4i32_i32: @@ -418,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vx_nxv8i32_i32: @@ -442,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vx_nxv16i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vx_nxv16i32_i32: diff --git a/llvm/test/CodeGen/RISCV/rvv/vqdotsu.ll b/llvm/test/CodeGen/RISCV/rvv/vqdotsu.ll index 904ff293d3847..f54ec9b4308fd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vqdotsu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vqdotsu.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64x,+experimental-zvqdotq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vqdotsu.nxv1i32.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vqdotsu_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -26,12 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv2i32.nxv2i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vv_nxv2i32_nxv2i32: @@ -48,12 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vv_nxv4i32_nxv4i32: @@ -70,12 +51,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vv_nxv8i32_nxv8i32: @@ -92,12 +67,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv16i32.nxv16i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vv_nxv16i32_nxv16i32: @@ -115,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vv_nxv1i32_nxv1i32: @@ -139,13 +101,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vv_nxv2i32_nxv2i32: @@ -163,13 +118,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vv_nxv4i32_nxv4i32: @@ -187,13 +135,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vv_nxv8i32_nxv8i32: @@ -211,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vv_nxv16i32_nxv16i32: @@ -236,12 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv1i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotsu_vx_nxv1i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vx_nxv1i32_i32: @@ -258,12 +186,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv2i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotsu_vx_nxv2i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vx_nxv2i32_i32: @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv4i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotsu_vx_nxv4i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vx_nxv4i32_i32: @@ -302,12 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv8i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotsu_vx_nxv8i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vx_nxv8i32_i32: @@ -324,12 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv16i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotsu_vx_nxv16i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vx_nxv16i32_i32: @@ -346,13 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vx_nxv1i32_i32: @@ -370,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vx_nxv2i32_i32: @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vx_nxv4i32_i32: @@ -418,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vx_nxv8i32_i32: @@ -442,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vx_nxv16i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vx_nxv16i32_i32: diff --git a/llvm/test/CodeGen/RISCV/rvv/vqdotu.ll b/llvm/test/CodeGen/RISCV/rvv/vqdotu.ll index 2e6528da43b35..1c9f42ead5f70 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vqdotu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vqdotu.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64x,+experimental-zvqdotq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vqdotu.nxv1i32.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vqdotu_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -26,12 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv2i32.nxv2i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vv_nxv2i32_nxv2i32: @@ -48,12 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vv_nxv4i32_nxv4i32: @@ -70,12 +51,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vv_nxv8i32_nxv8i32: @@ -92,12 +67,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv16i32.nxv16i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vv_nxv16i32_nxv16i32: @@ -115,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vv_nxv1i32_nxv1i32: @@ -139,13 +101,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vv_nxv2i32_nxv2i32: @@ -163,13 +118,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vv_nxv4i32_nxv4i32: @@ -187,13 +135,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vv_nxv8i32_nxv8i32: @@ -211,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vv_nxv16i32_nxv16i32: @@ -236,12 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv1i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotu_vx_nxv1i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vx_nxv1i32_i32: @@ -258,12 +186,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv2i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotu_vx_nxv2i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vx_nxv2i32_i32: @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv4i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotu_vx_nxv4i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vx_nxv4i32_i32: @@ -302,12 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv8i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotu_vx_nxv8i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vx_nxv8i32_i32: @@ -324,12 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv16i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotu_vx_nxv16i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vx_nxv16i32_i32: @@ -346,13 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vx_nxv1i32_i32: @@ -370,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vx_nxv2i32_i32: @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vx_nxv4i32_i32: @@ -418,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vx_nxv8i32_i32: @@ -442,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vx_nxv16i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vx_nxv16i32_i32: diff --git a/llvm/test/CodeGen/RISCV/rvv/vqdotus.ll b/llvm/test/CodeGen/RISCV/rvv/vqdotus.ll index 94413369dd995..e4df6e146eef2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vqdotus.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vqdotus.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64x,+experimental-zvqdotq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vqdotus.nxv1i32.i32( - , - , - i32, - iXLen, - iXLen); - define @intrinsic_vqdotus_vx_nxv1i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -26,12 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.nxv2i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotus_vx_nxv2i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_vx_nxv2i32_i32: @@ -48,12 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.nxv4i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotus_vx_nxv4i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_vx_nxv4i32_i32: @@ -70,12 +51,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.nxv8i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotus_vx_nxv8i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_vx_nxv8i32_i32: @@ -92,12 +67,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.nxv16i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotus_vx_nxv16i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_vx_nxv16i32_i32: @@ -114,13 +83,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotus_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_mask_vx_nxv1i32_i32: @@ -138,13 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotus_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_mask_vx_nxv2i32_i32: @@ -162,13 +117,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotus_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_mask_vx_nxv4i32_i32: @@ -186,13 +134,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotus_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_mask_vx_nxv8i32_i32: @@ -210,13 +151,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotus_mask_vx_nxv16i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_mask_vx_nxv16i32_i32: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredand.ll b/llvm/test/CodeGen/RISCV/rvv/vredand.ll index e33a821e38487..b10cf0ad763c0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredand.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredand.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vredand.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv8i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv8i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv8i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv8i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv4i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv4i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv4i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv4i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv4i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv2i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv2i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv2i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv2i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv1i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv1i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv1i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv1i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv1i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv1i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmax.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax.ll index 52ace9b687b80..faa14eba96513 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmax.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vredmax.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv8i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv8i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv8i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv8i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv4i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv4i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv4i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv4i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv4i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv2i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv2i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv2i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv2i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv1i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv1i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv1i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu.ll index 1a56a66aaa306..dbafe1a9172dd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vredmaxu.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv8i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv8i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv8i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv8i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv4i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv4i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv4i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv4i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv4i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv2i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv2i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv2i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv2i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv1i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv1i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv1i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmin.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin.ll index 26c7ea86c1e84..b177122a63caf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmin.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vredmin.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv8i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv8i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv8i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv8i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv4i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv4i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv4i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv4i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv4i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv2i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv2i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv2i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv2i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv1i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv1i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv1i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vredminu.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu.ll index 24c16176ecce4..7b3a7a01bcc37 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredminu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredminu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vredminu.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv8i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv8i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv8i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv8i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv4i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv4i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv4i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv4i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv4i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv2i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv2i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv2i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv2i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv1i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv1i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv1i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vredor.ll b/llvm/test/CodeGen/RISCV/rvv/vredor.ll index c25e4de414c4f..7b4b900c2645f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredor.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vredor.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv8i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv8i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv8i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv8i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv4i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv4i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv4i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv4i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv4i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv2i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv2i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv2i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv2i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv1i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv1i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv1i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv1i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv1i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv1i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vredsum.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum.ll index 3fb2ea3a48095..ce452ed4cf5dc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredsum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredsum.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vredsum.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv8i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv8i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv8i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv8i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv4i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv4i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv4i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv4i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv4i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv2i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv2i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv2i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv2i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv1i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv1i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv1i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll index 274ac18deb273..70150d59e729c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare half @llvm.vector.reduce.fadd.nxv1f16(half, ) - define half @vreduce_fadd_nxv1f16( %v, half %s) { ; CHECK-LABEL: vreduce_fadd_nxv1f16: ; CHECK: # %bb.0: @@ -30,8 +28,6 @@ define half @vreduce_ord_fadd_nxv1f16( %v, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.nxv2f16(half, ) - define half @vreduce_fadd_nxv2f16( %v, half %s) { ; CHECK-LABEL: vreduce_fadd_nxv2f16: ; CHECK: # %bb.0: @@ -56,8 +52,6 @@ define half @vreduce_ord_fadd_nxv2f16( %v, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.nxv4f16(half, ) - define half @vreduce_fadd_nxv4f16( %v, half %s) { ; CHECK-LABEL: vreduce_fadd_nxv4f16: ; CHECK: # %bb.0: @@ -82,8 +76,6 @@ define half @vreduce_ord_fadd_nxv4f16( %v, half %s) { ret half %red } -declare float @llvm.vector.reduce.fadd.nxv1f32(float, ) - define float @vreduce_fadd_nxv1f32( %v, float %s) { ; CHECK-LABEL: vreduce_fadd_nxv1f32: ; CHECK: # %bb.0: @@ -138,8 +130,6 @@ define float @vreduce_ord_fwadd_nxv1f32( %v, float %s) { ret float %red } -declare float @llvm.vector.reduce.fadd.nxv2f32(float, ) - define float @vreduce_fadd_nxv2f32( %v, float %s) { ; CHECK-LABEL: vreduce_fadd_nxv2f32: ; CHECK: # %bb.0: @@ -194,8 +184,6 @@ define float @vreduce_ord_fwadd_nxv2f32( %v, float %s) { ret float %red } -declare float @llvm.vector.reduce.fadd.nxv4f32(float, ) - define float @vreduce_fadd_nxv4f32( %v, float %s) { ; CHECK-LABEL: vreduce_fadd_nxv4f32: ; CHECK: # %bb.0: @@ -250,8 +238,6 @@ define float @vreduce_ord_fwadd_nxv4f32( %v, float %s) { ret float %red } -declare double @llvm.vector.reduce.fadd.nxv1f64(double, ) - define double @vreduce_fadd_nxv1f64( %v, double %s) { ; CHECK-LABEL: vreduce_fadd_nxv1f64: ; CHECK: # %bb.0: @@ -306,8 +292,6 @@ define double @vreduce_ord_fwadd_nxv1f64( %v, double %s) { ret double %red } -declare double @llvm.vector.reduce.fadd.nxv2f64(double, ) - define double @vreduce_fadd_nxv2f64( %v, double %s) { ; CHECK-LABEL: vreduce_fadd_nxv2f64: ; CHECK: # %bb.0: @@ -362,8 +346,6 @@ define double @vreduce_ord_fwadd_nxv2f64( %v, double %s) { ret double %red } -declare double @llvm.vector.reduce.fadd.nxv4f64(double, ) - define double @vreduce_fadd_nxv4f64( %v, double %s) { ; CHECK-LABEL: vreduce_fadd_nxv4f64: ; CHECK: # %bb.0: @@ -418,8 +400,6 @@ define double @vreduce_ord_fwadd_nxv4f64( %v, double %s) { ret double %red } -declare half @llvm.vector.reduce.fmin.nxv1f16() - define half @vreduce_fmin_nxv1f16( %v) { ; CHECK-LABEL: vreduce_fmin_nxv1f16: ; CHECK: # %bb.0: @@ -453,8 +433,6 @@ define half @vreduce_fmin_nxv1f16_nonans_noinfs( %v) #1 { ret half %red } -declare half @llvm.vector.reduce.fmin.nxv2f16() - define half @vreduce_fmin_nxv2f16( %v) { ; CHECK-LABEL: vreduce_fmin_nxv2f16: ; CHECK: # %bb.0: @@ -466,8 +444,6 @@ define half @vreduce_fmin_nxv2f16( %v) { ret half %red } -declare half @llvm.vector.reduce.fmin.nxv4f16() - define half @vreduce_fmin_nxv4f16( %v) { ; CHECK-LABEL: vreduce_fmin_nxv4f16: ; CHECK: # %bb.0: @@ -479,8 +455,6 @@ define half @vreduce_fmin_nxv4f16( %v) { ret half %red } -declare half @llvm.vector.reduce.fmin.nxv64f16() - define half @vreduce_fmin_nxv64f16( %v) { ; CHECK-LABEL: vreduce_fmin_nxv64f16: ; CHECK: # %bb.0: @@ -493,8 +467,6 @@ define half @vreduce_fmin_nxv64f16( %v) { ret half %red } -declare float @llvm.vector.reduce.fmin.nxv1f32() - define float @vreduce_fmin_nxv1f32( %v) { ; CHECK-LABEL: vreduce_fmin_nxv1f32: ; CHECK: # %bb.0: @@ -528,8 +500,6 @@ define float @vreduce_fmin_nxv1f32_nonans_noinfs( %v) { ret float %red } -declare float @llvm.vector.reduce.fmin.nxv2f32() - define float @vreduce_fmin_nxv2f32( %v) { ; CHECK-LABEL: vreduce_fmin_nxv2f32: ; CHECK: # %bb.0: @@ -541,8 +511,6 @@ define float @vreduce_fmin_nxv2f32( %v) { ret float %red } -declare float @llvm.vector.reduce.fmin.nxv4f32() - define float @vreduce_fmin_nxv4f32( %v) { ; CHECK-LABEL: vreduce_fmin_nxv4f32: ; CHECK: # %bb.0: @@ -554,8 +522,6 @@ define float @vreduce_fmin_nxv4f32( %v) { ret float %red } -declare float @llvm.vector.reduce.fmin.nxv32f32() - define float @vreduce_fmin_nxv32f32( %v) { ; CHECK-LABEL: vreduce_fmin_nxv32f32: ; CHECK: # %bb.0: @@ -568,8 +534,6 @@ define float @vreduce_fmin_nxv32f32( %v) { ret float %red } -declare double @llvm.vector.reduce.fmin.nxv1f64() - define double @vreduce_fmin_nxv1f64( %v) { ; CHECK-LABEL: vreduce_fmin_nxv1f64: ; CHECK: # %bb.0: @@ -603,8 +567,6 @@ define double @vreduce_fmin_nxv1f64_nonans_noinfs( %v) { ret double %red } -declare double @llvm.vector.reduce.fmin.nxv2f64() - define double @vreduce_fmin_nxv2f64( %v) { ; CHECK-LABEL: vreduce_fmin_nxv2f64: ; CHECK: # %bb.0: @@ -616,8 +578,6 @@ define double @vreduce_fmin_nxv2f64( %v) { ret double %red } -declare double @llvm.vector.reduce.fmin.nxv4f64() - define double @vreduce_fmin_nxv4f64( %v) { ; CHECK-LABEL: vreduce_fmin_nxv4f64: ; CHECK: # %bb.0: @@ -629,8 +589,6 @@ define double @vreduce_fmin_nxv4f64( %v) { ret double %red } -declare double @llvm.vector.reduce.fmin.nxv16f64() - define double @vreduce_fmin_nxv16f64( %v) { ; CHECK-LABEL: vreduce_fmin_nxv16f64: ; CHECK: # %bb.0: @@ -643,8 +601,6 @@ define double @vreduce_fmin_nxv16f64( %v) { ret double %red } -declare half @llvm.vector.reduce.fmax.nxv1f16() - define half @vreduce_fmax_nxv1f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv1f16: ; CHECK: # %bb.0: @@ -678,8 +634,6 @@ define half @vreduce_fmax_nxv1f16_nonans_noinfs( %v) #1 { ret half %red } -declare half @llvm.vector.reduce.fmax.nxv2f16() - define half @vreduce_fmax_nxv2f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv2f16: ; CHECK: # %bb.0: @@ -691,8 +645,6 @@ define half @vreduce_fmax_nxv2f16( %v) { ret half %red } -declare half @llvm.vector.reduce.fmax.nxv4f16() - define half @vreduce_fmax_nxv4f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv4f16: ; CHECK: # %bb.0: @@ -704,8 +656,6 @@ define half @vreduce_fmax_nxv4f16( %v) { ret half %red } -declare half @llvm.vector.reduce.fmax.nxv64f16() - define half @vreduce_fmax_nxv64f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv64f16: ; CHECK: # %bb.0: @@ -718,8 +668,6 @@ define half @vreduce_fmax_nxv64f16( %v) { ret half %red } -declare float @llvm.vector.reduce.fmax.nxv1f32() - define float @vreduce_fmax_nxv1f32( %v) { ; CHECK-LABEL: vreduce_fmax_nxv1f32: ; CHECK: # %bb.0: @@ -753,8 +701,6 @@ define float @vreduce_fmax_nxv1f32_nonans_noinfs( %v) { ret float %red } -declare float @llvm.vector.reduce.fmax.nxv2f32() - define float @vreduce_fmax_nxv2f32( %v) { ; CHECK-LABEL: vreduce_fmax_nxv2f32: ; CHECK: # %bb.0: @@ -766,8 +712,6 @@ define float @vreduce_fmax_nxv2f32( %v) { ret float %red } -declare float @llvm.vector.reduce.fmax.nxv4f32() - define float @vreduce_fmax_nxv4f32( %v) { ; CHECK-LABEL: vreduce_fmax_nxv4f32: ; CHECK: # %bb.0: @@ -779,8 +723,6 @@ define float @vreduce_fmax_nxv4f32( %v) { ret float %red } -declare float @llvm.vector.reduce.fmax.nxv32f32() - define float @vreduce_fmax_nxv32f32( %v) { ; CHECK-LABEL: vreduce_fmax_nxv32f32: ; CHECK: # %bb.0: @@ -793,8 +735,6 @@ define float @vreduce_fmax_nxv32f32( %v) { ret float %red } -declare double @llvm.vector.reduce.fmax.nxv1f64() - define double @vreduce_fmax_nxv1f64( %v) { ; CHECK-LABEL: vreduce_fmax_nxv1f64: ; CHECK: # %bb.0: @@ -828,8 +768,6 @@ define double @vreduce_fmax_nxv1f64_nonans_noinfs( %v) { ret double %red } -declare double @llvm.vector.reduce.fmax.nxv2f64() - define double @vreduce_fmax_nxv2f64( %v) { ; CHECK-LABEL: vreduce_fmax_nxv2f64: ; CHECK: # %bb.0: @@ -841,8 +779,6 @@ define double @vreduce_fmax_nxv2f64( %v) { ret double %red } -declare double @llvm.vector.reduce.fmax.nxv4f64() - define double @vreduce_fmax_nxv4f64( %v) { ; CHECK-LABEL: vreduce_fmax_nxv4f64: ; CHECK: # %bb.0: @@ -854,8 +790,6 @@ define double @vreduce_fmax_nxv4f64( %v) { ret double %red } -declare double @llvm.vector.reduce.fmax.nxv16f64() - define double @vreduce_fmax_nxv16f64( %v) { ; CHECK-LABEL: vreduce_fmax_nxv16f64: ; CHECK: # %bb.0: @@ -881,7 +815,6 @@ define float @vreduce_nsz_fadd_nxv1f32( %v, float %s) { } ; Test Widen VECREDUCE_SEQ_FADD -declare half @llvm.vector.reduce.fadd.nxv3f16(half, ) define half @vreduce_ord_fadd_nxv3f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv3f16: @@ -900,8 +833,6 @@ define half @vreduce_ord_fadd_nxv3f16( %v, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.nxv6f16(half, ) - define half @vreduce_ord_fadd_nxv6f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv6f16: ; CHECK: # %bb.0: @@ -918,8 +849,6 @@ define half @vreduce_ord_fadd_nxv6f16( %v, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.nxv10f16(half, ) - define half @vreduce_ord_fadd_nxv10f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv10f16: ; CHECK: # %bb.0: @@ -936,8 +865,6 @@ define half @vreduce_ord_fadd_nxv10f16( %v, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.nxv12f16(half, ) - define half @vreduce_ord_fadd_nxv12f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv12f16: ; CHECK: # %bb.0: @@ -992,8 +919,6 @@ define half @vreduce_fadd_nxv6f16( %v, half %s) { ret half %red } -declare half @llvm.vector.reduce.fmin.nxv10f16() - define half @vreduce_fmin_nxv10f16( %v) { ; CHECK-LABEL: vreduce_fmin_nxv10f16: ; CHECK: # %bb.0: @@ -1012,8 +937,6 @@ define half @vreduce_fmin_nxv10f16( %v) { ret half %red } -declare half @llvm.vector.reduce.fmax.nxv12f16() - define half @vreduce_fmax_nxv12f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv12f16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll index 012fca0b1fe3d..df97f19df7f99 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare half @llvm.vp.reduce.fadd.nxv1f16(half, , , i32) - define half @vpreduce_fadd_nxv1f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv1f16: ; CHECK: # %bb.0: @@ -32,8 +30,6 @@ define half @vpreduce_ord_fadd_nxv1f16(half %s, %v, , , i32) - define half @vpreduce_fadd_nxv2f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv2f16: ; CHECK: # %bb.0: @@ -60,8 +56,6 @@ define half @vpreduce_ord_fadd_nxv2f16(half %s, %v, , , i32) - define half @vpreduce_fadd_nxv4f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv4f16: ; CHECK: # %bb.0: @@ -88,8 +82,6 @@ define half @vpreduce_ord_fadd_nxv4f16(half %s, %v, , , i32) - define half @vpreduce_fadd_nxv64f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv64f16: ; CHECK: # %bb.0: @@ -148,8 +140,6 @@ define half @vpreduce_ord_fadd_nxv64f16(half %s, %v, , , i32) - define float @vpreduce_fadd_nxv1f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv1f32: ; CHECK: # %bb.0: @@ -176,8 +166,6 @@ define float @vpreduce_ord_fadd_nxv1f32(float %s, %v, , , i32) - define float @vpreduce_fadd_nxv2f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv2f32: ; CHECK: # %bb.0: @@ -204,8 +192,6 @@ define float @vpreduce_ord_fadd_nxv2f32(float %s, %v, , , i32) - define float @vpreduce_fadd_nxv4f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv4f32: ; CHECK: # %bb.0: @@ -232,8 +218,6 @@ define float @vpreduce_ord_fadd_nxv4f32(float %s, %v, , , i32) - define double @vpreduce_fadd_nxv1f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv1f64: ; CHECK: # %bb.0: @@ -260,8 +244,6 @@ define double @vpreduce_ord_fadd_nxv1f64(double %s, %v, , , i32) - define double @vpreduce_fadd_nxv2f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv2f64: ; CHECK: # %bb.0: @@ -288,8 +270,6 @@ define double @vpreduce_ord_fadd_nxv2f64(double %s, %v, , , i32) - define double @vpreduce_fadd_nxv3f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv3f64: ; CHECK: # %bb.0: @@ -316,8 +296,6 @@ define double @vpreduce_ord_fadd_nxv3f64(double %s, %v, , , i32) - define double @vpreduce_fadd_nxv4f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv4f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll index 7c6782fc1dcd4..7eea35afe0aa0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare i8 @llvm.vp.reduce.add.nxv1i8(i8, , , i32) - define signext i8 @vpreduce_add_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv1i8: ; CHECK: # %bb.0: @@ -19,8 +17,6 @@ define signext i8 @vpreduce_add_nxv1i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_umax_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv1i8: ; CHECK: # %bb.0: @@ -34,8 +30,6 @@ define signext i8 @vpreduce_umax_nxv1i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_smax_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv1i8: ; CHECK: # %bb.0: @@ -49,8 +43,6 @@ define signext i8 @vpreduce_smax_nxv1i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_umin_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv1i8: ; CHECK: # %bb.0: @@ -64,8 +56,6 @@ define signext i8 @vpreduce_umin_nxv1i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_smin_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv1i8: ; CHECK: # %bb.0: @@ -79,8 +69,6 @@ define signext i8 @vpreduce_smin_nxv1i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_and_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv1i8: ; CHECK: # %bb.0: @@ -94,8 +82,6 @@ define signext i8 @vpreduce_and_nxv1i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_or_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv1i8: ; CHECK: # %bb.0: @@ -109,8 +95,6 @@ define signext i8 @vpreduce_or_nxv1i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_xor_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv1i8: ; CHECK: # %bb.0: @@ -124,8 +108,6 @@ define signext i8 @vpreduce_xor_nxv1i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_add_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv2i8: ; CHECK: # %bb.0: @@ -139,8 +121,6 @@ define signext i8 @vpreduce_add_nxv2i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_umax_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv2i8: ; CHECK: # %bb.0: @@ -154,8 +134,6 @@ define signext i8 @vpreduce_umax_nxv2i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_smax_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv2i8: ; CHECK: # %bb.0: @@ -169,8 +147,6 @@ define signext i8 @vpreduce_smax_nxv2i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_umin_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv2i8: ; CHECK: # %bb.0: @@ -184,8 +160,6 @@ define signext i8 @vpreduce_umin_nxv2i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_smin_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv2i8: ; CHECK: # %bb.0: @@ -199,8 +173,6 @@ define signext i8 @vpreduce_smin_nxv2i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_and_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv2i8: ; CHECK: # %bb.0: @@ -214,8 +186,6 @@ define signext i8 @vpreduce_and_nxv2i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_or_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv2i8: ; CHECK: # %bb.0: @@ -229,8 +199,6 @@ define signext i8 @vpreduce_or_nxv2i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_xor_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv2i8: ; CHECK: # %bb.0: @@ -244,8 +212,6 @@ define signext i8 @vpreduce_xor_nxv2i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_smax_nxv3i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv3i8: ; CHECK: # %bb.0: @@ -259,8 +225,6 @@ define signext i8 @vpreduce_smax_nxv3i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_add_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv4i8: ; CHECK: # %bb.0: @@ -274,8 +238,6 @@ define signext i8 @vpreduce_add_nxv4i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_umax_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv4i8: ; CHECK: # %bb.0: @@ -289,8 +251,6 @@ define signext i8 @vpreduce_umax_nxv4i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_smax_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv4i8: ; CHECK: # %bb.0: @@ -304,8 +264,6 @@ define signext i8 @vpreduce_smax_nxv4i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_umin_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv4i8: ; CHECK: # %bb.0: @@ -319,8 +277,6 @@ define signext i8 @vpreduce_umin_nxv4i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_smin_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv4i8: ; CHECK: # %bb.0: @@ -334,8 +290,6 @@ define signext i8 @vpreduce_smin_nxv4i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_and_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv4i8: ; CHECK: # %bb.0: @@ -349,8 +303,6 @@ define signext i8 @vpreduce_and_nxv4i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_or_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv4i8: ; CHECK: # %bb.0: @@ -364,8 +316,6 @@ define signext i8 @vpreduce_or_nxv4i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_xor_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv4i8: ; CHECK: # %bb.0: @@ -379,8 +329,6 @@ define signext i8 @vpreduce_xor_nxv4i8(i8 signext %s, %v, , , i32) - define signext i16 @vpreduce_add_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv1i16: ; CHECK: # %bb.0: @@ -394,8 +342,6 @@ define signext i16 @vpreduce_add_nxv1i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.umax.nxv1i16(i16, , , i32) - define signext i16 @vpreduce_umax_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv1i16: ; CHECK: # %bb.0: @@ -409,8 +355,6 @@ define signext i16 @vpreduce_umax_nxv1i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.smax.nxv1i16(i16, , , i32) - define signext i16 @vpreduce_smax_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv1i16: ; CHECK: # %bb.0: @@ -424,8 +368,6 @@ define signext i16 @vpreduce_smax_nxv1i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.umin.nxv1i16(i16, , , i32) - define signext i16 @vpreduce_umin_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv1i16: ; CHECK: # %bb.0: @@ -439,8 +381,6 @@ define signext i16 @vpreduce_umin_nxv1i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.smin.nxv1i16(i16, , , i32) - define signext i16 @vpreduce_smin_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv1i16: ; CHECK: # %bb.0: @@ -454,8 +394,6 @@ define signext i16 @vpreduce_smin_nxv1i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.and.nxv1i16(i16, , , i32) - define signext i16 @vpreduce_and_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv1i16: ; CHECK: # %bb.0: @@ -469,8 +407,6 @@ define signext i16 @vpreduce_and_nxv1i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.or.nxv1i16(i16, , , i32) - define signext i16 @vpreduce_or_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv1i16: ; CHECK: # %bb.0: @@ -484,8 +420,6 @@ define signext i16 @vpreduce_or_nxv1i16(i16 signext %s, %v, < ret i16 %r } -declare i16 @llvm.vp.reduce.xor.nxv1i16(i16, , , i32) - define signext i16 @vpreduce_xor_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv1i16: ; CHECK: # %bb.0: @@ -499,8 +433,6 @@ define signext i16 @vpreduce_xor_nxv1i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.add.nxv2i16(i16, , , i32) - define signext i16 @vpreduce_add_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv2i16: ; CHECK: # %bb.0: @@ -514,8 +446,6 @@ define signext i16 @vpreduce_add_nxv2i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.umax.nxv2i16(i16, , , i32) - define signext i16 @vpreduce_umax_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv2i16: ; CHECK: # %bb.0: @@ -529,8 +459,6 @@ define signext i16 @vpreduce_umax_nxv2i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.smax.nxv2i16(i16, , , i32) - define signext i16 @vpreduce_smax_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv2i16: ; CHECK: # %bb.0: @@ -544,8 +472,6 @@ define signext i16 @vpreduce_smax_nxv2i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.umin.nxv2i16(i16, , , i32) - define signext i16 @vpreduce_umin_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv2i16: ; CHECK: # %bb.0: @@ -559,8 +485,6 @@ define signext i16 @vpreduce_umin_nxv2i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.smin.nxv2i16(i16, , , i32) - define signext i16 @vpreduce_smin_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv2i16: ; CHECK: # %bb.0: @@ -574,8 +498,6 @@ define signext i16 @vpreduce_smin_nxv2i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.and.nxv2i16(i16, , , i32) - define signext i16 @vpreduce_and_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv2i16: ; CHECK: # %bb.0: @@ -589,8 +511,6 @@ define signext i16 @vpreduce_and_nxv2i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.or.nxv2i16(i16, , , i32) - define signext i16 @vpreduce_or_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv2i16: ; CHECK: # %bb.0: @@ -604,8 +524,6 @@ define signext i16 @vpreduce_or_nxv2i16(i16 signext %s, %v, < ret i16 %r } -declare i16 @llvm.vp.reduce.xor.nxv2i16(i16, , , i32) - define signext i16 @vpreduce_xor_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv2i16: ; CHECK: # %bb.0: @@ -619,8 +537,6 @@ define signext i16 @vpreduce_xor_nxv2i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.add.nxv4i16(i16, , , i32) - define signext i16 @vpreduce_add_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv4i16: ; CHECK: # %bb.0: @@ -634,8 +550,6 @@ define signext i16 @vpreduce_add_nxv4i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.umax.nxv4i16(i16, , , i32) - define signext i16 @vpreduce_umax_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv4i16: ; CHECK: # %bb.0: @@ -649,8 +563,6 @@ define signext i16 @vpreduce_umax_nxv4i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.smax.nxv4i16(i16, , , i32) - define signext i16 @vpreduce_smax_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv4i16: ; CHECK: # %bb.0: @@ -664,8 +576,6 @@ define signext i16 @vpreduce_smax_nxv4i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.umin.nxv4i16(i16, , , i32) - define signext i16 @vpreduce_umin_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv4i16: ; CHECK: # %bb.0: @@ -679,8 +589,6 @@ define signext i16 @vpreduce_umin_nxv4i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.smin.nxv4i16(i16, , , i32) - define signext i16 @vpreduce_smin_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv4i16: ; CHECK: # %bb.0: @@ -694,8 +602,6 @@ define signext i16 @vpreduce_smin_nxv4i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.and.nxv4i16(i16, , , i32) - define signext i16 @vpreduce_and_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv4i16: ; CHECK: # %bb.0: @@ -709,8 +615,6 @@ define signext i16 @vpreduce_and_nxv4i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.or.nxv4i16(i16, , , i32) - define signext i16 @vpreduce_or_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv4i16: ; CHECK: # %bb.0: @@ -724,8 +628,6 @@ define signext i16 @vpreduce_or_nxv4i16(i16 signext %s, %v, < ret i16 %r } -declare i16 @llvm.vp.reduce.xor.nxv4i16(i16, , , i32) - define signext i16 @vpreduce_xor_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv4i16: ; CHECK: # %bb.0: @@ -739,8 +641,6 @@ define signext i16 @vpreduce_xor_nxv4i16(i16 signext %s, %v, ret i16 %r } -declare i32 @llvm.vp.reduce.add.nxv1i32(i32, , , i32) - define signext i32 @vpreduce_add_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv1i32: ; CHECK: # %bb.0: @@ -754,8 +654,6 @@ define signext i32 @vpreduce_add_nxv1i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.umax.nxv1i32(i32, , , i32) - define signext i32 @vpreduce_umax_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv1i32: ; CHECK: # %bb.0: @@ -769,8 +667,6 @@ define signext i32 @vpreduce_umax_nxv1i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.smax.nxv1i32(i32, , , i32) - define signext i32 @vpreduce_smax_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv1i32: ; CHECK: # %bb.0: @@ -784,8 +680,6 @@ define signext i32 @vpreduce_smax_nxv1i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.umin.nxv1i32(i32, , , i32) - define signext i32 @vpreduce_umin_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv1i32: ; CHECK: # %bb.0: @@ -799,8 +693,6 @@ define signext i32 @vpreduce_umin_nxv1i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.smin.nxv1i32(i32, , , i32) - define signext i32 @vpreduce_smin_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv1i32: ; CHECK: # %bb.0: @@ -814,8 +706,6 @@ define signext i32 @vpreduce_smin_nxv1i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.and.nxv1i32(i32, , , i32) - define signext i32 @vpreduce_and_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv1i32: ; CHECK: # %bb.0: @@ -829,8 +719,6 @@ define signext i32 @vpreduce_and_nxv1i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.or.nxv1i32(i32, , , i32) - define signext i32 @vpreduce_or_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv1i32: ; CHECK: # %bb.0: @@ -844,8 +732,6 @@ define signext i32 @vpreduce_or_nxv1i32(i32 signext %s, %v, < ret i32 %r } -declare i32 @llvm.vp.reduce.xor.nxv1i32(i32, , , i32) - define signext i32 @vpreduce_xor_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv1i32: ; CHECK: # %bb.0: @@ -859,8 +745,6 @@ define signext i32 @vpreduce_xor_nxv1i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.add.nxv2i32(i32, , , i32) - define signext i32 @vpreduce_add_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv2i32: ; CHECK: # %bb.0: @@ -874,8 +758,6 @@ define signext i32 @vpreduce_add_nxv2i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.umax.nxv2i32(i32, , , i32) - define signext i32 @vpreduce_umax_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv2i32: ; CHECK: # %bb.0: @@ -889,8 +771,6 @@ define signext i32 @vpreduce_umax_nxv2i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.smax.nxv2i32(i32, , , i32) - define signext i32 @vpreduce_smax_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv2i32: ; CHECK: # %bb.0: @@ -904,8 +784,6 @@ define signext i32 @vpreduce_smax_nxv2i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.umin.nxv2i32(i32, , , i32) - define signext i32 @vpreduce_umin_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv2i32: ; CHECK: # %bb.0: @@ -919,8 +797,6 @@ define signext i32 @vpreduce_umin_nxv2i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.smin.nxv2i32(i32, , , i32) - define signext i32 @vpreduce_smin_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv2i32: ; CHECK: # %bb.0: @@ -934,8 +810,6 @@ define signext i32 @vpreduce_smin_nxv2i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.and.nxv2i32(i32, , , i32) - define signext i32 @vpreduce_and_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv2i32: ; CHECK: # %bb.0: @@ -949,8 +823,6 @@ define signext i32 @vpreduce_and_nxv2i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.or.nxv2i32(i32, , , i32) - define signext i32 @vpreduce_or_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv2i32: ; CHECK: # %bb.0: @@ -964,8 +836,6 @@ define signext i32 @vpreduce_or_nxv2i32(i32 signext %s, %v, < ret i32 %r } -declare i32 @llvm.vp.reduce.xor.nxv2i32(i32, , , i32) - define signext i32 @vpreduce_xor_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv2i32: ; CHECK: # %bb.0: @@ -979,8 +849,6 @@ define signext i32 @vpreduce_xor_nxv2i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.add.nxv4i32(i32, , , i32) - define signext i32 @vpreduce_add_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv4i32: ; CHECK: # %bb.0: @@ -994,8 +862,6 @@ define signext i32 @vpreduce_add_nxv4i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.umax.nxv4i32(i32, , , i32) - define signext i32 @vpreduce_umax_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv4i32: ; CHECK: # %bb.0: @@ -1009,8 +875,6 @@ define signext i32 @vpreduce_umax_nxv4i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.umax.nxv32i32(i32, , , i32) - define signext i32 @vpreduce_umax_nxv32i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv32i32: ; CHECK: # %bb.0: @@ -1040,8 +904,6 @@ define signext i32 @vpreduce_umax_nxv32i32(i32 signext %s, % ret i32 %r } -declare i32 @llvm.vp.reduce.smax.nxv4i32(i32, , , i32) - define signext i32 @vpreduce_smax_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv4i32: ; CHECK: # %bb.0: @@ -1055,8 +917,6 @@ define signext i32 @vpreduce_smax_nxv4i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.umin.nxv4i32(i32, , , i32) - define signext i32 @vpreduce_umin_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv4i32: ; CHECK: # %bb.0: @@ -1070,8 +930,6 @@ define signext i32 @vpreduce_umin_nxv4i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.smin.nxv4i32(i32, , , i32) - define signext i32 @vpreduce_smin_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv4i32: ; CHECK: # %bb.0: @@ -1085,8 +943,6 @@ define signext i32 @vpreduce_smin_nxv4i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.and.nxv4i32(i32, , , i32) - define signext i32 @vpreduce_and_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv4i32: ; CHECK: # %bb.0: @@ -1100,8 +956,6 @@ define signext i32 @vpreduce_and_nxv4i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.or.nxv4i32(i32, , , i32) - define signext i32 @vpreduce_or_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv4i32: ; CHECK: # %bb.0: @@ -1115,8 +969,6 @@ define signext i32 @vpreduce_or_nxv4i32(i32 signext %s, %v, < ret i32 %r } -declare i32 @llvm.vp.reduce.xor.nxv4i32(i32, , , i32) - define signext i32 @vpreduce_xor_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv4i32: ; CHECK: # %bb.0: @@ -1130,8 +982,6 @@ define signext i32 @vpreduce_xor_nxv4i32(i32 signext %s, %v, ret i32 %r } -declare i64 @llvm.vp.reduce.add.nxv1i64(i64, , , i32) - define signext i64 @vpreduce_add_nxv1i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_add_nxv1i64: ; RV32: # %bb.0: @@ -1235,8 +1085,6 @@ define signext i64 @vpwreduce_uadd_nxv1i32(i64 signext %s, %v ret i64 %r } -declare i64 @llvm.vp.reduce.umax.nxv1i64(i64, , , i32) - define signext i64 @vpreduce_umax_nxv1i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_nxv1i64: ; RV32: # %bb.0: @@ -1270,8 +1118,6 @@ define signext i64 @vpreduce_umax_nxv1i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.smax.nxv1i64(i64, , , i32) - define signext i64 @vpreduce_smax_nxv1i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smax_nxv1i64: ; RV32: # %bb.0: @@ -1305,8 +1151,6 @@ define signext i64 @vpreduce_smax_nxv1i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.umin.nxv1i64(i64, , , i32) - define signext i64 @vpreduce_umin_nxv1i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_nxv1i64: ; RV32: # %bb.0: @@ -1340,8 +1184,6 @@ define signext i64 @vpreduce_umin_nxv1i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.smin.nxv1i64(i64, , , i32) - define signext i64 @vpreduce_smin_nxv1i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smin_nxv1i64: ; RV32: # %bb.0: @@ -1375,8 +1217,6 @@ define signext i64 @vpreduce_smin_nxv1i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.and.nxv1i64(i64, , , i32) - define signext i64 @vpreduce_and_nxv1i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_and_nxv1i64: ; RV32: # %bb.0: @@ -1410,8 +1250,6 @@ define signext i64 @vpreduce_and_nxv1i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.or.nxv1i64(i64, , , i32) - define signext i64 @vpreduce_or_nxv1i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_or_nxv1i64: ; RV32: # %bb.0: @@ -1445,8 +1283,6 @@ define signext i64 @vpreduce_or_nxv1i64(i64 signext %s, %v, < ret i64 %r } -declare i64 @llvm.vp.reduce.xor.nxv1i64(i64, , , i32) - define signext i64 @vpreduce_xor_nxv1i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_xor_nxv1i64: ; RV32: # %bb.0: @@ -1480,8 +1316,6 @@ define signext i64 @vpreduce_xor_nxv1i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.add.nxv2i64(i64, , , i32) - define signext i64 @vpreduce_add_nxv2i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_add_nxv2i64: ; RV32: # %bb.0: @@ -1585,8 +1419,6 @@ define signext i64 @vwpreduce_uadd_nxv2i32(i64 signext %s, %v ret i64 %r } -declare i64 @llvm.vp.reduce.umax.nxv2i64(i64, , , i32) - define signext i64 @vpreduce_umax_nxv2i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_nxv2i64: ; RV32: # %bb.0: @@ -1620,8 +1452,6 @@ define signext i64 @vpreduce_umax_nxv2i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.smax.nxv2i64(i64, , , i32) - define signext i64 @vpreduce_smax_nxv2i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smax_nxv2i64: ; RV32: # %bb.0: @@ -1655,8 +1485,6 @@ define signext i64 @vpreduce_smax_nxv2i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.umin.nxv2i64(i64, , , i32) - define signext i64 @vpreduce_umin_nxv2i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_nxv2i64: ; RV32: # %bb.0: @@ -1690,8 +1518,6 @@ define signext i64 @vpreduce_umin_nxv2i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.smin.nxv2i64(i64, , , i32) - define signext i64 @vpreduce_smin_nxv2i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smin_nxv2i64: ; RV32: # %bb.0: @@ -1725,8 +1551,6 @@ define signext i64 @vpreduce_smin_nxv2i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.and.nxv2i64(i64, , , i32) - define signext i64 @vpreduce_and_nxv2i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_and_nxv2i64: ; RV32: # %bb.0: @@ -1760,8 +1584,6 @@ define signext i64 @vpreduce_and_nxv2i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.or.nxv2i64(i64, , , i32) - define signext i64 @vpreduce_or_nxv2i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_or_nxv2i64: ; RV32: # %bb.0: @@ -1795,8 +1617,6 @@ define signext i64 @vpreduce_or_nxv2i64(i64 signext %s, %v, < ret i64 %r } -declare i64 @llvm.vp.reduce.xor.nxv2i64(i64, , , i32) - define signext i64 @vpreduce_xor_nxv2i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_xor_nxv2i64: ; RV32: # %bb.0: @@ -1830,8 +1650,6 @@ define signext i64 @vpreduce_xor_nxv2i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.add.nxv4i64(i64, , , i32) - define signext i64 @vpreduce_add_nxv4i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_add_nxv4i64: ; RV32: # %bb.0: @@ -1935,8 +1753,6 @@ define signext i64 @vpwreduce_uadd_nxv4i32(i64 signext %s, %v ret i64 %r } -declare i64 @llvm.vp.reduce.umax.nxv4i64(i64, , , i32) - define signext i64 @vpreduce_umax_nxv4i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_nxv4i64: ; RV32: # %bb.0: @@ -1970,8 +1786,6 @@ define signext i64 @vpreduce_umax_nxv4i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.smax.nxv4i64(i64, , , i32) - define signext i64 @vpreduce_smax_nxv4i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smax_nxv4i64: ; RV32: # %bb.0: @@ -2005,8 +1819,6 @@ define signext i64 @vpreduce_smax_nxv4i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.umin.nxv4i64(i64, , , i32) - define signext i64 @vpreduce_umin_nxv4i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_nxv4i64: ; RV32: # %bb.0: @@ -2040,8 +1852,6 @@ define signext i64 @vpreduce_umin_nxv4i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.smin.nxv4i64(i64, , , i32) - define signext i64 @vpreduce_smin_nxv4i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smin_nxv4i64: ; RV32: # %bb.0: @@ -2075,8 +1885,6 @@ define signext i64 @vpreduce_smin_nxv4i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.and.nxv4i64(i64, , , i32) - define signext i64 @vpreduce_and_nxv4i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_and_nxv4i64: ; RV32: # %bb.0: @@ -2110,8 +1918,6 @@ define signext i64 @vpreduce_and_nxv4i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.or.nxv4i64(i64, , , i32) - define signext i64 @vpreduce_or_nxv4i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_or_nxv4i64: ; RV32: # %bb.0: @@ -2145,8 +1951,6 @@ define signext i64 @vpreduce_or_nxv4i64(i64 signext %s, %v, < ret i64 %r } -declare i64 @llvm.vp.reduce.xor.nxv4i64(i64, , , i32) - define signext i64 @vpreduce_xor_nxv4i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_xor_nxv4i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll index fac5e31ecf94e..d575b6c69dc3b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare i8 @llvm.vector.reduce.add.nxv1i8() - define signext i8 @vreduce_add_nxv1i8( %v) { ; CHECK-LABEL: vreduce_add_nxv1i8: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define signext i8 @vreduce_add_nxv1i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.nxv1i8() - define signext i8 @vreduce_umax_nxv1i8( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i8: ; CHECK: # %bb.0: @@ -31,8 +27,6 @@ define signext i8 @vreduce_umax_nxv1i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.nxv1i8() - define signext i8 @vreduce_smax_nxv1i8( %v) { ; CHECK-LABEL: vreduce_smax_nxv1i8: ; CHECK: # %bb.0: @@ -44,8 +38,6 @@ define signext i8 @vreduce_smax_nxv1i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.nxv1i8() - define signext i8 @vreduce_umin_nxv1i8( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i8: ; CHECK: # %bb.0: @@ -57,8 +49,6 @@ define signext i8 @vreduce_umin_nxv1i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.nxv1i8() - define signext i8 @vreduce_smin_nxv1i8( %v) { ; CHECK-LABEL: vreduce_smin_nxv1i8: ; CHECK: # %bb.0: @@ -70,8 +60,6 @@ define signext i8 @vreduce_smin_nxv1i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.nxv1i8() - define signext i8 @vreduce_and_nxv1i8( %v) { ; CHECK-LABEL: vreduce_and_nxv1i8: ; CHECK: # %bb.0: @@ -83,8 +71,6 @@ define signext i8 @vreduce_and_nxv1i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.nxv1i8() - define signext i8 @vreduce_or_nxv1i8( %v) { ; CHECK-LABEL: vreduce_or_nxv1i8: ; CHECK: # %bb.0: @@ -96,8 +82,6 @@ define signext i8 @vreduce_or_nxv1i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.nxv1i8() - define signext i8 @vreduce_xor_nxv1i8( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i8: ; CHECK: # %bb.0: @@ -110,8 +94,6 @@ define signext i8 @vreduce_xor_nxv1i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.nxv2i8() - define signext i8 @vreduce_add_nxv2i8( %v) { ; CHECK-LABEL: vreduce_add_nxv2i8: ; CHECK: # %bb.0: @@ -124,8 +106,6 @@ define signext i8 @vreduce_add_nxv2i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.nxv2i8() - define signext i8 @vreduce_umax_nxv2i8( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i8: ; CHECK: # %bb.0: @@ -137,8 +117,6 @@ define signext i8 @vreduce_umax_nxv2i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.nxv2i8() - define signext i8 @vreduce_smax_nxv2i8( %v) { ; CHECK-LABEL: vreduce_smax_nxv2i8: ; CHECK: # %bb.0: @@ -150,8 +128,6 @@ define signext i8 @vreduce_smax_nxv2i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.nxv2i8() - define signext i8 @vreduce_umin_nxv2i8( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i8: ; CHECK: # %bb.0: @@ -163,8 +139,6 @@ define signext i8 @vreduce_umin_nxv2i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.nxv2i8() - define signext i8 @vreduce_smin_nxv2i8( %v) { ; CHECK-LABEL: vreduce_smin_nxv2i8: ; CHECK: # %bb.0: @@ -176,8 +150,6 @@ define signext i8 @vreduce_smin_nxv2i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.nxv2i8() - define signext i8 @vreduce_and_nxv2i8( %v) { ; CHECK-LABEL: vreduce_and_nxv2i8: ; CHECK: # %bb.0: @@ -189,8 +161,6 @@ define signext i8 @vreduce_and_nxv2i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.nxv2i8() - define signext i8 @vreduce_or_nxv2i8( %v) { ; CHECK-LABEL: vreduce_or_nxv2i8: ; CHECK: # %bb.0: @@ -202,8 +172,6 @@ define signext i8 @vreduce_or_nxv2i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.nxv2i8() - define signext i8 @vreduce_xor_nxv2i8( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i8: ; CHECK: # %bb.0: @@ -216,8 +184,6 @@ define signext i8 @vreduce_xor_nxv2i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.nxv4i8() - define signext i8 @vreduce_add_nxv4i8( %v) { ; CHECK-LABEL: vreduce_add_nxv4i8: ; CHECK: # %bb.0: @@ -230,8 +196,6 @@ define signext i8 @vreduce_add_nxv4i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.nxv4i8() - define signext i8 @vreduce_umax_nxv4i8( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i8: ; CHECK: # %bb.0: @@ -243,8 +207,6 @@ define signext i8 @vreduce_umax_nxv4i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.nxv4i8() - define signext i8 @vreduce_smax_nxv4i8( %v) { ; CHECK-LABEL: vreduce_smax_nxv4i8: ; CHECK: # %bb.0: @@ -256,8 +218,6 @@ define signext i8 @vreduce_smax_nxv4i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.nxv4i8() - define signext i8 @vreduce_umin_nxv4i8( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i8: ; CHECK: # %bb.0: @@ -269,8 +229,6 @@ define signext i8 @vreduce_umin_nxv4i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.nxv4i8() - define signext i8 @vreduce_smin_nxv4i8( %v) { ; CHECK-LABEL: vreduce_smin_nxv4i8: ; CHECK: # %bb.0: @@ -282,8 +240,6 @@ define signext i8 @vreduce_smin_nxv4i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.nxv4i8() - define signext i8 @vreduce_and_nxv4i8( %v) { ; CHECK-LABEL: vreduce_and_nxv4i8: ; CHECK: # %bb.0: @@ -295,8 +251,6 @@ define signext i8 @vreduce_and_nxv4i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.nxv4i8() - define signext i8 @vreduce_or_nxv4i8( %v) { ; CHECK-LABEL: vreduce_or_nxv4i8: ; CHECK: # %bb.0: @@ -308,8 +262,6 @@ define signext i8 @vreduce_or_nxv4i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.nxv4i8() - define signext i8 @vreduce_xor_nxv4i8( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i8: ; CHECK: # %bb.0: @@ -322,8 +274,6 @@ define signext i8 @vreduce_xor_nxv4i8( %v) { ret i8 %red } -declare i16 @llvm.vector.reduce.add.nxv1i16() - define signext i16 @vreduce_add_nxv1i16( %v) { ; CHECK-LABEL: vreduce_add_nxv1i16: ; CHECK: # %bb.0: @@ -366,8 +316,6 @@ define signext i16 @vwreduce_uadd_nxv1i8( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.nxv1i16() - define signext i16 @vreduce_umax_nxv1i16( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i16: ; CHECK: # %bb.0: @@ -379,8 +327,6 @@ define signext i16 @vreduce_umax_nxv1i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.nxv1i16() - define signext i16 @vreduce_smax_nxv1i16( %v) { ; CHECK-LABEL: vreduce_smax_nxv1i16: ; CHECK: # %bb.0: @@ -392,8 +338,6 @@ define signext i16 @vreduce_smax_nxv1i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.nxv1i16() - define signext i16 @vreduce_umin_nxv1i16( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i16: ; CHECK: # %bb.0: @@ -405,8 +349,6 @@ define signext i16 @vreduce_umin_nxv1i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.nxv1i16() - define signext i16 @vreduce_smin_nxv1i16( %v) { ; CHECK-LABEL: vreduce_smin_nxv1i16: ; CHECK: # %bb.0: @@ -418,8 +360,6 @@ define signext i16 @vreduce_smin_nxv1i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.nxv1i16() - define signext i16 @vreduce_and_nxv1i16( %v) { ; CHECK-LABEL: vreduce_and_nxv1i16: ; CHECK: # %bb.0: @@ -431,8 +371,6 @@ define signext i16 @vreduce_and_nxv1i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.nxv1i16() - define signext i16 @vreduce_or_nxv1i16( %v) { ; CHECK-LABEL: vreduce_or_nxv1i16: ; CHECK: # %bb.0: @@ -444,8 +382,6 @@ define signext i16 @vreduce_or_nxv1i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.nxv1i16() - define signext i16 @vreduce_xor_nxv1i16( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i16: ; CHECK: # %bb.0: @@ -458,8 +394,6 @@ define signext i16 @vreduce_xor_nxv1i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.nxv2i16() - define signext i16 @vreduce_add_nxv2i16( %v) { ; CHECK-LABEL: vreduce_add_nxv2i16: ; CHECK: # %bb.0: @@ -502,8 +436,6 @@ define signext i16 @vwreduce_uadd_nxv2i8( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.nxv2i16() - define signext i16 @vreduce_umax_nxv2i16( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i16: ; CHECK: # %bb.0: @@ -515,8 +447,6 @@ define signext i16 @vreduce_umax_nxv2i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.nxv2i16() - define signext i16 @vreduce_smax_nxv2i16( %v) { ; CHECK-LABEL: vreduce_smax_nxv2i16: ; CHECK: # %bb.0: @@ -528,8 +458,6 @@ define signext i16 @vreduce_smax_nxv2i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.nxv2i16() - define signext i16 @vreduce_umin_nxv2i16( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i16: ; CHECK: # %bb.0: @@ -541,8 +469,6 @@ define signext i16 @vreduce_umin_nxv2i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.nxv2i16() - define signext i16 @vreduce_smin_nxv2i16( %v) { ; CHECK-LABEL: vreduce_smin_nxv2i16: ; CHECK: # %bb.0: @@ -554,8 +480,6 @@ define signext i16 @vreduce_smin_nxv2i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.nxv2i16() - define signext i16 @vreduce_and_nxv2i16( %v) { ; CHECK-LABEL: vreduce_and_nxv2i16: ; CHECK: # %bb.0: @@ -567,8 +491,6 @@ define signext i16 @vreduce_and_nxv2i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.nxv2i16() - define signext i16 @vreduce_or_nxv2i16( %v) { ; CHECK-LABEL: vreduce_or_nxv2i16: ; CHECK: # %bb.0: @@ -580,8 +502,6 @@ define signext i16 @vreduce_or_nxv2i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.nxv2i16() - define signext i16 @vreduce_xor_nxv2i16( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i16: ; CHECK: # %bb.0: @@ -594,8 +514,6 @@ define signext i16 @vreduce_xor_nxv2i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.nxv4i16() - define signext i16 @vreduce_add_nxv4i16( %v) { ; CHECK-LABEL: vreduce_add_nxv4i16: ; CHECK: # %bb.0: @@ -638,8 +556,6 @@ define signext i16 @vwreduce_uadd_nxv4i8( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.nxv4i16() - define signext i16 @vreduce_umax_nxv4i16( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i16: ; CHECK: # %bb.0: @@ -651,8 +567,6 @@ define signext i16 @vreduce_umax_nxv4i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.nxv4i16() - define signext i16 @vreduce_smax_nxv4i16( %v) { ; CHECK-LABEL: vreduce_smax_nxv4i16: ; CHECK: # %bb.0: @@ -664,8 +578,6 @@ define signext i16 @vreduce_smax_nxv4i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.nxv4i16() - define signext i16 @vreduce_umin_nxv4i16( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i16: ; CHECK: # %bb.0: @@ -677,8 +589,6 @@ define signext i16 @vreduce_umin_nxv4i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.nxv4i16() - define signext i16 @vreduce_smin_nxv4i16( %v) { ; CHECK-LABEL: vreduce_smin_nxv4i16: ; CHECK: # %bb.0: @@ -690,8 +600,6 @@ define signext i16 @vreduce_smin_nxv4i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.nxv4i16() - define signext i16 @vreduce_and_nxv4i16( %v) { ; CHECK-LABEL: vreduce_and_nxv4i16: ; CHECK: # %bb.0: @@ -703,8 +611,6 @@ define signext i16 @vreduce_and_nxv4i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.nxv4i16() - define signext i16 @vreduce_or_nxv4i16( %v) { ; CHECK-LABEL: vreduce_or_nxv4i16: ; CHECK: # %bb.0: @@ -716,8 +622,6 @@ define signext i16 @vreduce_or_nxv4i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.nxv4i16() - define signext i16 @vreduce_xor_nxv4i16( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i16: ; CHECK: # %bb.0: @@ -730,8 +634,6 @@ define signext i16 @vreduce_xor_nxv4i16( %v) { ret i16 %red } -declare i32 @llvm.vector.reduce.add.nxv1i32() - define signext i32 @vreduce_add_nxv1i32( %v) { ; CHECK-LABEL: vreduce_add_nxv1i32: ; CHECK: # %bb.0: @@ -774,8 +676,6 @@ define signext i32 @vwreduce_uadd_nxv1i16( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.nxv1i32() - define signext i32 @vreduce_umax_nxv1i32( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i32: ; CHECK: # %bb.0: @@ -787,8 +687,6 @@ define signext i32 @vreduce_umax_nxv1i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.nxv1i32() - define signext i32 @vreduce_smax_nxv1i32( %v) { ; CHECK-LABEL: vreduce_smax_nxv1i32: ; CHECK: # %bb.0: @@ -800,8 +698,6 @@ define signext i32 @vreduce_smax_nxv1i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.nxv1i32() - define signext i32 @vreduce_umin_nxv1i32( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i32: ; CHECK: # %bb.0: @@ -813,8 +709,6 @@ define signext i32 @vreduce_umin_nxv1i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.nxv1i32() - define signext i32 @vreduce_smin_nxv1i32( %v) { ; CHECK-LABEL: vreduce_smin_nxv1i32: ; CHECK: # %bb.0: @@ -826,8 +720,6 @@ define signext i32 @vreduce_smin_nxv1i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.nxv1i32() - define signext i32 @vreduce_and_nxv1i32( %v) { ; CHECK-LABEL: vreduce_and_nxv1i32: ; CHECK: # %bb.0: @@ -839,8 +731,6 @@ define signext i32 @vreduce_and_nxv1i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.nxv1i32() - define signext i32 @vreduce_or_nxv1i32( %v) { ; CHECK-LABEL: vreduce_or_nxv1i32: ; CHECK: # %bb.0: @@ -852,8 +742,6 @@ define signext i32 @vreduce_or_nxv1i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.nxv1i32() - define signext i32 @vreduce_xor_nxv1i32( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i32: ; CHECK: # %bb.0: @@ -866,8 +754,6 @@ define signext i32 @vreduce_xor_nxv1i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.add.nxv2i32() - define signext i32 @vreduce_add_nxv2i32( %v) { ; CHECK-LABEL: vreduce_add_nxv2i32: ; CHECK: # %bb.0: @@ -910,8 +796,6 @@ define signext i32 @vwreduce_uadd_nxv2i16( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.nxv2i32() - define signext i32 @vreduce_umax_nxv2i32( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i32: ; CHECK: # %bb.0: @@ -923,8 +807,6 @@ define signext i32 @vreduce_umax_nxv2i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.nxv2i32() - define signext i32 @vreduce_smax_nxv2i32( %v) { ; CHECK-LABEL: vreduce_smax_nxv2i32: ; CHECK: # %bb.0: @@ -936,8 +818,6 @@ define signext i32 @vreduce_smax_nxv2i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.nxv2i32() - define signext i32 @vreduce_umin_nxv2i32( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i32: ; CHECK: # %bb.0: @@ -949,8 +829,6 @@ define signext i32 @vreduce_umin_nxv2i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.nxv2i32() - define signext i32 @vreduce_smin_nxv2i32( %v) { ; CHECK-LABEL: vreduce_smin_nxv2i32: ; CHECK: # %bb.0: @@ -962,8 +840,6 @@ define signext i32 @vreduce_smin_nxv2i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.nxv2i32() - define signext i32 @vreduce_and_nxv2i32( %v) { ; CHECK-LABEL: vreduce_and_nxv2i32: ; CHECK: # %bb.0: @@ -975,8 +851,6 @@ define signext i32 @vreduce_and_nxv2i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.nxv2i32() - define signext i32 @vreduce_or_nxv2i32( %v) { ; CHECK-LABEL: vreduce_or_nxv2i32: ; CHECK: # %bb.0: @@ -988,8 +862,6 @@ define signext i32 @vreduce_or_nxv2i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.nxv2i32() - define signext i32 @vreduce_xor_nxv2i32( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i32: ; CHECK: # %bb.0: @@ -1002,8 +874,6 @@ define signext i32 @vreduce_xor_nxv2i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.add.nxv4i32() - define signext i32 @vreduce_add_nxv4i32( %v) { ; CHECK-LABEL: vreduce_add_nxv4i32: ; CHECK: # %bb.0: @@ -1046,8 +916,6 @@ define signext i32 @vwreduce_uadd_nxv4i16( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.nxv4i32() - define signext i32 @vreduce_umax_nxv4i32( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i32: ; CHECK: # %bb.0: @@ -1059,8 +927,6 @@ define signext i32 @vreduce_umax_nxv4i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.nxv4i32() - define signext i32 @vreduce_smax_nxv4i32( %v) { ; CHECK-LABEL: vreduce_smax_nxv4i32: ; CHECK: # %bb.0: @@ -1072,8 +938,6 @@ define signext i32 @vreduce_smax_nxv4i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.nxv4i32() - define signext i32 @vreduce_umin_nxv4i32( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i32: ; CHECK: # %bb.0: @@ -1085,8 +949,6 @@ define signext i32 @vreduce_umin_nxv4i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.nxv4i32() - define signext i32 @vreduce_smin_nxv4i32( %v) { ; CHECK-LABEL: vreduce_smin_nxv4i32: ; CHECK: # %bb.0: @@ -1098,8 +960,6 @@ define signext i32 @vreduce_smin_nxv4i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.nxv4i32() - define signext i32 @vreduce_and_nxv4i32( %v) { ; CHECK-LABEL: vreduce_and_nxv4i32: ; CHECK: # %bb.0: @@ -1111,8 +971,6 @@ define signext i32 @vreduce_and_nxv4i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.nxv4i32() - define signext i32 @vreduce_or_nxv4i32( %v) { ; CHECK-LABEL: vreduce_or_nxv4i32: ; CHECK: # %bb.0: @@ -1124,8 +982,6 @@ define signext i32 @vreduce_or_nxv4i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.nxv4i32() - define signext i32 @vreduce_xor_nxv4i32( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i32: ; CHECK: # %bb.0: @@ -1138,8 +994,6 @@ define signext i32 @vreduce_xor_nxv4i32( %v) { ret i32 %red } -declare i64 @llvm.vector.reduce.add.nxv1i64() - define i64 @vreduce_add_nxv1i64( %v) { ; RV32-LABEL: vreduce_add_nxv1i64: ; RV32: # %bb.0: @@ -1220,8 +1074,6 @@ define i64 @vwreduce_uadd_nxv1i32( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.nxv1i64() - define i64 @vreduce_umax_nxv1i64( %v) { ; RV32-LABEL: vreduce_umax_nxv1i64: ; RV32: # %bb.0: @@ -1244,8 +1096,6 @@ define i64 @vreduce_umax_nxv1i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.nxv1i64() - define i64 @vreduce_smax_nxv1i64( %v) { ; RV32-LABEL: vreduce_smax_nxv1i64: ; RV32: # %bb.0: @@ -1268,8 +1118,6 @@ define i64 @vreduce_smax_nxv1i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.nxv1i64() - define i64 @vreduce_umin_nxv1i64( %v) { ; RV32-LABEL: vreduce_umin_nxv1i64: ; RV32: # %bb.0: @@ -1292,8 +1140,6 @@ define i64 @vreduce_umin_nxv1i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.nxv1i64() - define i64 @vreduce_smin_nxv1i64( %v) { ; RV32-LABEL: vreduce_smin_nxv1i64: ; RV32: # %bb.0: @@ -1316,8 +1162,6 @@ define i64 @vreduce_smin_nxv1i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.nxv1i64() - define i64 @vreduce_and_nxv1i64( %v) { ; RV32-LABEL: vreduce_and_nxv1i64: ; RV32: # %bb.0: @@ -1340,8 +1184,6 @@ define i64 @vreduce_and_nxv1i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.nxv1i64() - define i64 @vreduce_or_nxv1i64( %v) { ; RV32-LABEL: vreduce_or_nxv1i64: ; RV32: # %bb.0: @@ -1364,8 +1206,6 @@ define i64 @vreduce_or_nxv1i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.nxv1i64() - define i64 @vreduce_xor_nxv1i64( %v) { ; RV32-LABEL: vreduce_xor_nxv1i64: ; RV32: # %bb.0: @@ -1390,8 +1230,6 @@ define i64 @vreduce_xor_nxv1i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.add.nxv2i64() - define i64 @vreduce_add_nxv2i64( %v) { ; RV32-LABEL: vreduce_add_nxv2i64: ; RV32: # %bb.0: @@ -1472,8 +1310,6 @@ define i64 @vwreduce_uadd_nxv2i32( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.nxv2i64() - define i64 @vreduce_umax_nxv2i64( %v) { ; RV32-LABEL: vreduce_umax_nxv2i64: ; RV32: # %bb.0: @@ -1496,8 +1332,6 @@ define i64 @vreduce_umax_nxv2i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.nxv2i64() - define i64 @vreduce_smax_nxv2i64( %v) { ; RV32-LABEL: vreduce_smax_nxv2i64: ; RV32: # %bb.0: @@ -1520,8 +1354,6 @@ define i64 @vreduce_smax_nxv2i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.nxv2i64() - define i64 @vreduce_umin_nxv2i64( %v) { ; RV32-LABEL: vreduce_umin_nxv2i64: ; RV32: # %bb.0: @@ -1544,8 +1376,6 @@ define i64 @vreduce_umin_nxv2i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.nxv2i64() - define i64 @vreduce_smin_nxv2i64( %v) { ; RV32-LABEL: vreduce_smin_nxv2i64: ; RV32: # %bb.0: @@ -1568,8 +1398,6 @@ define i64 @vreduce_smin_nxv2i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.nxv2i64() - define i64 @vreduce_and_nxv2i64( %v) { ; RV32-LABEL: vreduce_and_nxv2i64: ; RV32: # %bb.0: @@ -1592,8 +1420,6 @@ define i64 @vreduce_and_nxv2i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.nxv2i64() - define i64 @vreduce_or_nxv2i64( %v) { ; RV32-LABEL: vreduce_or_nxv2i64: ; RV32: # %bb.0: @@ -1616,8 +1442,6 @@ define i64 @vreduce_or_nxv2i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.nxv2i64() - define i64 @vreduce_xor_nxv2i64( %v) { ; RV32-LABEL: vreduce_xor_nxv2i64: ; RV32: # %bb.0: @@ -1642,8 +1466,6 @@ define i64 @vreduce_xor_nxv2i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.add.nxv4i64() - define i64 @vreduce_add_nxv4i64( %v) { ; RV32-LABEL: vreduce_add_nxv4i64: ; RV32: # %bb.0: @@ -1724,8 +1546,6 @@ define i64 @vwreduce_uadd_nxv4i32( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.nxv4i64() - define i64 @vreduce_umax_nxv4i64( %v) { ; RV32-LABEL: vreduce_umax_nxv4i64: ; RV32: # %bb.0: @@ -1748,8 +1568,6 @@ define i64 @vreduce_umax_nxv4i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.nxv4i64() - define i64 @vreduce_smax_nxv4i64( %v) { ; RV32-LABEL: vreduce_smax_nxv4i64: ; RV32: # %bb.0: @@ -1772,8 +1590,6 @@ define i64 @vreduce_smax_nxv4i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.nxv4i64() - define i64 @vreduce_umin_nxv4i64( %v) { ; RV32-LABEL: vreduce_umin_nxv4i64: ; RV32: # %bb.0: @@ -1796,8 +1612,6 @@ define i64 @vreduce_umin_nxv4i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.nxv4i64() - define i64 @vreduce_smin_nxv4i64( %v) { ; RV32-LABEL: vreduce_smin_nxv4i64: ; RV32: # %bb.0: @@ -1820,8 +1634,6 @@ define i64 @vreduce_smin_nxv4i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.nxv4i64() - define i64 @vreduce_and_nxv4i64( %v) { ; RV32-LABEL: vreduce_and_nxv4i64: ; RV32: # %bb.0: @@ -1844,8 +1656,6 @@ define i64 @vreduce_and_nxv4i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.nxv4i64() - define i64 @vreduce_or_nxv4i64( %v) { ; RV32-LABEL: vreduce_or_nxv4i64: ; RV32: # %bb.0: @@ -1868,8 +1678,6 @@ define i64 @vreduce_or_nxv4i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.nxv4i64() - define i64 @vreduce_xor_nxv4i64( %v) { ; RV32-LABEL: vreduce_xor_nxv4i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll index 18d20f66987b2..1e629e9d20530 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare i1 @llvm.vp.reduce.and.nxv1i1(i1, , , i32) - define zeroext i1 @vpreduce_and_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv1i1: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define zeroext i1 @vpreduce_and_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv1i1: ; CHECK: # %bb.0: @@ -34,8 +30,6 @@ define zeroext i1 @vpreduce_or_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_xor_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv1i1: ; CHECK: # %bb.0: @@ -50,8 +44,6 @@ define zeroext i1 @vpreduce_xor_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_and_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv2i1: ; CHECK: # %bb.0: @@ -66,8 +58,6 @@ define zeroext i1 @vpreduce_and_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv2i1: ; CHECK: # %bb.0: @@ -82,8 +72,6 @@ define zeroext i1 @vpreduce_or_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_xor_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv2i1: ; CHECK: # %bb.0: @@ -98,8 +86,6 @@ define zeroext i1 @vpreduce_xor_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_and_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv4i1: ; CHECK: # %bb.0: @@ -114,8 +100,6 @@ define zeroext i1 @vpreduce_and_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv4i1: ; CHECK: # %bb.0: @@ -130,8 +114,6 @@ define zeroext i1 @vpreduce_or_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_xor_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv4i1: ; CHECK: # %bb.0: @@ -146,8 +128,6 @@ define zeroext i1 @vpreduce_xor_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_and_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv8i1: ; CHECK: # %bb.0: @@ -162,8 +142,6 @@ define zeroext i1 @vpreduce_and_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv8i1: ; CHECK: # %bb.0: @@ -178,8 +156,6 @@ define zeroext i1 @vpreduce_or_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_xor_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv8i1: ; CHECK: # %bb.0: @@ -194,8 +170,6 @@ define zeroext i1 @vpreduce_xor_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_and_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv16i1: ; CHECK: # %bb.0: @@ -210,8 +184,6 @@ define zeroext i1 @vpreduce_and_nxv16i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv16i1: ; CHECK: # %bb.0: @@ -226,8 +198,6 @@ define zeroext i1 @vpreduce_or_nxv16i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_xor_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv16i1: ; CHECK: # %bb.0: @@ -242,8 +212,6 @@ define zeroext i1 @vpreduce_xor_nxv16i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_and_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv32i1: ; CHECK: # %bb.0: @@ -258,8 +226,6 @@ define zeroext i1 @vpreduce_and_nxv32i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv32i1: ; CHECK: # %bb.0: @@ -274,8 +240,6 @@ define zeroext i1 @vpreduce_or_nxv32i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_xor_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv32i1: ; CHECK: # %bb.0: @@ -290,8 +254,6 @@ define zeroext i1 @vpreduce_xor_nxv32i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv40i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv40i1: ; CHECK: # %bb.0: @@ -306,8 +268,6 @@ define zeroext i1 @vpreduce_or_nxv40i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_and_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv64i1: ; CHECK: # %bb.0: @@ -322,8 +282,6 @@ define zeroext i1 @vpreduce_and_nxv64i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv64i1: ; CHECK: # %bb.0: @@ -338,8 +296,6 @@ define zeroext i1 @vpreduce_or_nxv64i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_xor_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv64i1: ; CHECK: # %bb.0: @@ -354,8 +310,6 @@ define zeroext i1 @vpreduce_xor_nxv64i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv128i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv128i1: ; CHECK: # %bb.0: @@ -386,8 +340,6 @@ define zeroext i1 @vpreduce_or_nxv128i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.add.nxv1i1(i1, , , i32) - define zeroext i1 @vpreduce_add_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv1i1: ; CHECK: # %bb.0: @@ -402,8 +354,6 @@ define zeroext i1 @vpreduce_add_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_add_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv2i1: ; CHECK: # %bb.0: @@ -418,8 +368,6 @@ define zeroext i1 @vpreduce_add_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_add_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv4i1: ; CHECK: # %bb.0: @@ -434,8 +382,6 @@ define zeroext i1 @vpreduce_add_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_add_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv8i1: ; CHECK: # %bb.0: @@ -450,8 +396,6 @@ define zeroext i1 @vpreduce_add_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_add_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv16i1: ; CHECK: # %bb.0: @@ -466,8 +410,6 @@ define zeroext i1 @vpreduce_add_nxv16i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_add_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv32i1: ; CHECK: # %bb.0: @@ -482,8 +424,6 @@ define zeroext i1 @vpreduce_add_nxv32i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_add_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv64i1: ; CHECK: # %bb.0: @@ -498,9 +438,6 @@ define zeroext i1 @vpreduce_add_nxv64i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smax_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv1i1: ; CHECK: # %bb.0: @@ -515,8 +452,6 @@ define zeroext i1 @vpreduce_smax_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smax_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv2i1: ; CHECK: # %bb.0: @@ -531,8 +466,6 @@ define zeroext i1 @vpreduce_smax_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smax_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv4i1: ; CHECK: # %bb.0: @@ -547,8 +480,6 @@ define zeroext i1 @vpreduce_smax_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smax_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv8i1: ; CHECK: # %bb.0: @@ -563,8 +494,6 @@ define zeroext i1 @vpreduce_smax_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smax_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv16i1: ; CHECK: # %bb.0: @@ -579,8 +508,6 @@ define zeroext i1 @vpreduce_smax_nxv16i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.smax.nxv32i1(i1, , , i32) - define zeroext i1 @vpreduce_smax_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv32i1: ; CHECK: # %bb.0: @@ -595,8 +522,6 @@ define zeroext i1 @vpreduce_smax_nxv32i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.smax.nxv64i1(i1, , , i32) - define zeroext i1 @vpreduce_smax_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv64i1: ; CHECK: # %bb.0: @@ -611,8 +536,6 @@ define zeroext i1 @vpreduce_smax_nxv64i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.smin.nxv1i1(i1, , , i32) - define zeroext i1 @vpreduce_smin_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv1i1: ; CHECK: # %bb.0: @@ -627,8 +550,6 @@ define zeroext i1 @vpreduce_smin_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smin_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv2i1: ; CHECK: # %bb.0: @@ -643,8 +564,6 @@ define zeroext i1 @vpreduce_smin_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smin_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv4i1: ; CHECK: # %bb.0: @@ -659,8 +578,6 @@ define zeroext i1 @vpreduce_smin_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smin_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv8i1: ; CHECK: # %bb.0: @@ -675,8 +592,6 @@ define zeroext i1 @vpreduce_smin_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smin_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv16i1: ; CHECK: # %bb.0: @@ -691,8 +606,6 @@ define zeroext i1 @vpreduce_smin_nxv16i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.smin.nxv32i1(i1, , , i32) - define zeroext i1 @vpreduce_smin_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv32i1: ; CHECK: # %bb.0: @@ -707,8 +620,6 @@ define zeroext i1 @vpreduce_smin_nxv32i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.smin.nxv64i1(i1, , , i32) - define zeroext i1 @vpreduce_smin_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv64i1: ; CHECK: # %bb.0: @@ -723,8 +634,6 @@ define zeroext i1 @vpreduce_smin_nxv64i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.umax.nxv1i1(i1, , , i32) - define zeroext i1 @vpreduce_umax_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv1i1: ; CHECK: # %bb.0: @@ -739,8 +648,6 @@ define zeroext i1 @vpreduce_umax_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_umax_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv2i1: ; CHECK: # %bb.0: @@ -755,8 +662,6 @@ define zeroext i1 @vpreduce_umax_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_umax_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv4i1: ; CHECK: # %bb.0: @@ -771,8 +676,6 @@ define zeroext i1 @vpreduce_umax_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_umax_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv8i1: ; CHECK: # %bb.0: @@ -787,8 +690,6 @@ define zeroext i1 @vpreduce_umax_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_umax_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv16i1: ; CHECK: # %bb.0: @@ -803,8 +704,6 @@ define zeroext i1 @vpreduce_umax_nxv16i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.umax.nxv32i1(i1, , , i32) - define zeroext i1 @vpreduce_umax_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv32i1: ; CHECK: # %bb.0: @@ -819,8 +718,6 @@ define zeroext i1 @vpreduce_umax_nxv32i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.umax.nxv64i1(i1, , , i32) - define zeroext i1 @vpreduce_umax_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv64i1: ; CHECK: # %bb.0: @@ -835,8 +732,6 @@ define zeroext i1 @vpreduce_umax_nxv64i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.umin.nxv1i1(i1, , , i32) - define zeroext i1 @vpreduce_umin_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv1i1: ; CHECK: # %bb.0: @@ -851,8 +746,6 @@ define zeroext i1 @vpreduce_umin_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_umin_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv2i1: ; CHECK: # %bb.0: @@ -867,8 +760,6 @@ define zeroext i1 @vpreduce_umin_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_umin_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv4i1: ; CHECK: # %bb.0: @@ -883,8 +774,6 @@ define zeroext i1 @vpreduce_umin_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_umin_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv8i1: ; CHECK: # %bb.0: @@ -899,8 +788,6 @@ define zeroext i1 @vpreduce_umin_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_umin_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv16i1: ; CHECK: # %bb.0: @@ -915,8 +802,6 @@ define zeroext i1 @vpreduce_umin_nxv16i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.umin.nxv32i1(i1, , , i32) - define zeroext i1 @vpreduce_umin_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv32i1: ; CHECK: # %bb.0: @@ -931,8 +816,6 @@ define zeroext i1 @vpreduce_umin_nxv32i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.umin.nxv64i1(i1, , , i32) - define zeroext i1 @vpreduce_umin_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv64i1: ; CHECK: # %bb.0: @@ -947,8 +830,6 @@ define zeroext i1 @vpreduce_umin_nxv64i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.mul.nxv1i1(i1, , , i32) - define zeroext i1 @vpreduce_mul_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv1i1: ; CHECK: # %bb.0: @@ -963,8 +844,6 @@ define zeroext i1 @vpreduce_mul_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_mul_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv2i1: ; CHECK: # %bb.0: @@ -979,8 +858,6 @@ define zeroext i1 @vpreduce_mul_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_mul_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv4i1: ; CHECK: # %bb.0: @@ -995,8 +872,6 @@ define zeroext i1 @vpreduce_mul_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_mul_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv8i1: ; CHECK: # %bb.0: @@ -1011,8 +886,6 @@ define zeroext i1 @vpreduce_mul_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_mul_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv16i1: ; CHECK: # %bb.0: @@ -1027,8 +900,6 @@ define zeroext i1 @vpreduce_mul_nxv16i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_mul_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv32i1: ; CHECK: # %bb.0: @@ -1043,8 +914,6 @@ define zeroext i1 @vpreduce_mul_nxv32i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_mul_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv64i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll index ce9d6c5ab91a8..cc829b32e12e9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s -declare i1 @llvm.vector.reduce.or.nxv1i1() - define zeroext i1 @vreduce_or_nxv1i1( %v) { ; CHECK-LABEL: vreduce_or_nxv1i1: ; CHECK: # %bb.0: @@ -15,8 +13,6 @@ define zeroext i1 @vreduce_or_nxv1i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv1i1() - define zeroext i1 @vreduce_xor_nxv1i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i1: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define zeroext i1 @vreduce_xor_nxv1i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv1i1() - define zeroext i1 @vreduce_and_nxv1i1( %v) { ; CHECK-LABEL: vreduce_and_nxv1i1: ; CHECK: # %bb.0: @@ -42,8 +36,6 @@ define zeroext i1 @vreduce_and_nxv1i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv1i1() - define zeroext i1 @vreduce_umax_nxv1i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i1: ; CHECK: # %bb.0: @@ -55,8 +47,6 @@ define zeroext i1 @vreduce_umax_nxv1i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv1i1() - define zeroext i1 @vreduce_smax_nxv1i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv1i1: ; CHECK: # %bb.0: @@ -69,8 +59,6 @@ define zeroext i1 @vreduce_smax_nxv1i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv1i1() - define zeroext i1 @vreduce_umin_nxv1i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i1: ; CHECK: # %bb.0: @@ -83,8 +71,6 @@ define zeroext i1 @vreduce_umin_nxv1i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv1i1() - define zeroext i1 @vreduce_smin_nxv1i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv1i1: ; CHECK: # %bb.0: @@ -96,8 +82,6 @@ define zeroext i1 @vreduce_smin_nxv1i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv2i1() - define zeroext i1 @vreduce_or_nxv2i1( %v) { ; CHECK-LABEL: vreduce_or_nxv2i1: ; CHECK: # %bb.0: @@ -109,8 +93,6 @@ define zeroext i1 @vreduce_or_nxv2i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv2i1() - define zeroext i1 @vreduce_xor_nxv2i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i1: ; CHECK: # %bb.0: @@ -122,8 +104,6 @@ define zeroext i1 @vreduce_xor_nxv2i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv2i1() - define zeroext i1 @vreduce_and_nxv2i1( %v) { ; CHECK-LABEL: vreduce_and_nxv2i1: ; CHECK: # %bb.0: @@ -136,8 +116,6 @@ define zeroext i1 @vreduce_and_nxv2i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv2i1() - define zeroext i1 @vreduce_umax_nxv2i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i1: ; CHECK: # %bb.0: @@ -149,8 +127,6 @@ define zeroext i1 @vreduce_umax_nxv2i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv2i1() - define zeroext i1 @vreduce_smax_nxv2i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv2i1: ; CHECK: # %bb.0: @@ -163,8 +139,6 @@ define zeroext i1 @vreduce_smax_nxv2i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv2i1() - define zeroext i1 @vreduce_umin_nxv2i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i1: ; CHECK: # %bb.0: @@ -177,8 +151,6 @@ define zeroext i1 @vreduce_umin_nxv2i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv2i1() - define zeroext i1 @vreduce_smin_nxv2i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv2i1: ; CHECK: # %bb.0: @@ -190,8 +162,6 @@ define zeroext i1 @vreduce_smin_nxv2i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv4i1() - define zeroext i1 @vreduce_or_nxv4i1( %v) { ; CHECK-LABEL: vreduce_or_nxv4i1: ; CHECK: # %bb.0: @@ -203,8 +173,6 @@ define zeroext i1 @vreduce_or_nxv4i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv4i1() - define zeroext i1 @vreduce_xor_nxv4i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i1: ; CHECK: # %bb.0: @@ -216,8 +184,6 @@ define zeroext i1 @vreduce_xor_nxv4i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv4i1() - define zeroext i1 @vreduce_and_nxv4i1( %v) { ; CHECK-LABEL: vreduce_and_nxv4i1: ; CHECK: # %bb.0: @@ -230,8 +196,6 @@ define zeroext i1 @vreduce_and_nxv4i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv4i1() - define zeroext i1 @vreduce_umax_nxv4i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i1: ; CHECK: # %bb.0: @@ -243,8 +207,6 @@ define zeroext i1 @vreduce_umax_nxv4i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv4i1() - define zeroext i1 @vreduce_smax_nxv4i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv4i1: ; CHECK: # %bb.0: @@ -257,8 +219,6 @@ define zeroext i1 @vreduce_smax_nxv4i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv4i1() - define zeroext i1 @vreduce_umin_nxv4i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i1: ; CHECK: # %bb.0: @@ -271,8 +231,6 @@ define zeroext i1 @vreduce_umin_nxv4i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv4i1() - define zeroext i1 @vreduce_smin_nxv4i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv4i1: ; CHECK: # %bb.0: @@ -284,8 +242,6 @@ define zeroext i1 @vreduce_smin_nxv4i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv8i1() - define zeroext i1 @vreduce_or_nxv8i1( %v) { ; CHECK-LABEL: vreduce_or_nxv8i1: ; CHECK: # %bb.0: @@ -297,8 +253,6 @@ define zeroext i1 @vreduce_or_nxv8i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv8i1() - define zeroext i1 @vreduce_xor_nxv8i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv8i1: ; CHECK: # %bb.0: @@ -310,8 +264,6 @@ define zeroext i1 @vreduce_xor_nxv8i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv8i1() - define zeroext i1 @vreduce_and_nxv8i1( %v) { ; CHECK-LABEL: vreduce_and_nxv8i1: ; CHECK: # %bb.0: @@ -324,8 +276,6 @@ define zeroext i1 @vreduce_and_nxv8i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv8i1() - define zeroext i1 @vreduce_umax_nxv8i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv8i1: ; CHECK: # %bb.0: @@ -337,8 +287,6 @@ define zeroext i1 @vreduce_umax_nxv8i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv8i1() - define zeroext i1 @vreduce_smax_nxv8i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv8i1: ; CHECK: # %bb.0: @@ -351,8 +299,6 @@ define zeroext i1 @vreduce_smax_nxv8i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv8i1() - define zeroext i1 @vreduce_umin_nxv8i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv8i1: ; CHECK: # %bb.0: @@ -365,8 +311,6 @@ define zeroext i1 @vreduce_umin_nxv8i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv8i1() - define zeroext i1 @vreduce_smin_nxv8i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv8i1: ; CHECK: # %bb.0: @@ -378,8 +322,6 @@ define zeroext i1 @vreduce_smin_nxv8i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv16i1() - define zeroext i1 @vreduce_or_nxv16i1( %v) { ; CHECK-LABEL: vreduce_or_nxv16i1: ; CHECK: # %bb.0: @@ -391,8 +333,6 @@ define zeroext i1 @vreduce_or_nxv16i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv16i1() - define zeroext i1 @vreduce_xor_nxv16i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv16i1: ; CHECK: # %bb.0: @@ -404,8 +344,6 @@ define zeroext i1 @vreduce_xor_nxv16i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv16i1() - define zeroext i1 @vreduce_and_nxv16i1( %v) { ; CHECK-LABEL: vreduce_and_nxv16i1: ; CHECK: # %bb.0: @@ -418,8 +356,6 @@ define zeroext i1 @vreduce_and_nxv16i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv16i1() - define zeroext i1 @vreduce_umax_nxv16i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv16i1: ; CHECK: # %bb.0: @@ -431,8 +367,6 @@ define zeroext i1 @vreduce_umax_nxv16i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv16i1() - define zeroext i1 @vreduce_smax_nxv16i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv16i1: ; CHECK: # %bb.0: @@ -445,8 +379,6 @@ define zeroext i1 @vreduce_smax_nxv16i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv16i1() - define zeroext i1 @vreduce_umin_nxv16i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv16i1: ; CHECK: # %bb.0: @@ -459,8 +391,6 @@ define zeroext i1 @vreduce_umin_nxv16i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv16i1() - define zeroext i1 @vreduce_smin_nxv16i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv16i1: ; CHECK: # %bb.0: @@ -472,8 +402,6 @@ define zeroext i1 @vreduce_smin_nxv16i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv32i1() - define zeroext i1 @vreduce_or_nxv32i1( %v) { ; CHECK-LABEL: vreduce_or_nxv32i1: ; CHECK: # %bb.0: @@ -485,8 +413,6 @@ define zeroext i1 @vreduce_or_nxv32i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv32i1() - define zeroext i1 @vreduce_xor_nxv32i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv32i1: ; CHECK: # %bb.0: @@ -498,8 +424,6 @@ define zeroext i1 @vreduce_xor_nxv32i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv32i1() - define zeroext i1 @vreduce_and_nxv32i1( %v) { ; CHECK-LABEL: vreduce_and_nxv32i1: ; CHECK: # %bb.0: @@ -512,8 +436,6 @@ define zeroext i1 @vreduce_and_nxv32i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv32i1() - define zeroext i1 @vreduce_umax_nxv32i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv32i1: ; CHECK: # %bb.0: @@ -525,8 +447,6 @@ define zeroext i1 @vreduce_umax_nxv32i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv32i1() - define zeroext i1 @vreduce_smax_nxv32i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv32i1: ; CHECK: # %bb.0: @@ -539,8 +459,6 @@ define zeroext i1 @vreduce_smax_nxv32i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv32i1() - define zeroext i1 @vreduce_umin_nxv32i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv32i1: ; CHECK: # %bb.0: @@ -553,8 +471,6 @@ define zeroext i1 @vreduce_umin_nxv32i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv32i1() - define zeroext i1 @vreduce_smin_nxv32i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv32i1: ; CHECK: # %bb.0: @@ -566,8 +482,6 @@ define zeroext i1 @vreduce_smin_nxv32i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv64i1() - define zeroext i1 @vreduce_or_nxv64i1( %v) { ; CHECK-LABEL: vreduce_or_nxv64i1: ; CHECK: # %bb.0: @@ -579,8 +493,6 @@ define zeroext i1 @vreduce_or_nxv64i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv64i1() - define zeroext i1 @vreduce_xor_nxv64i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv64i1: ; CHECK: # %bb.0: @@ -592,8 +504,6 @@ define zeroext i1 @vreduce_xor_nxv64i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv64i1() - define zeroext i1 @vreduce_and_nxv64i1( %v) { ; CHECK-LABEL: vreduce_and_nxv64i1: ; CHECK: # %bb.0: @@ -606,8 +516,6 @@ define zeroext i1 @vreduce_and_nxv64i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv64i1() - define zeroext i1 @vreduce_umax_nxv64i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv64i1: ; CHECK: # %bb.0: @@ -619,8 +527,6 @@ define zeroext i1 @vreduce_umax_nxv64i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv64i1() - define zeroext i1 @vreduce_smax_nxv64i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv64i1: ; CHECK: # %bb.0: @@ -633,8 +539,6 @@ define zeroext i1 @vreduce_smax_nxv64i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv64i1() - define zeroext i1 @vreduce_umin_nxv64i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv64i1: ; CHECK: # %bb.0: @@ -647,8 +551,6 @@ define zeroext i1 @vreduce_umin_nxv64i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv64i1() - define zeroext i1 @vreduce_smin_nxv64i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv64i1: ; CHECK: # %bb.0: @@ -660,8 +562,6 @@ define zeroext i1 @vreduce_smin_nxv64i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.nxv1i1() - define zeroext i1 @vreduce_add_nxv1i1( %v) { ; CHECK-LABEL: vreduce_add_nxv1i1: ; CHECK: # %bb.0: @@ -673,8 +573,6 @@ define zeroext i1 @vreduce_add_nxv1i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.nxv2i1() - define zeroext i1 @vreduce_add_nxv2i1( %v) { ; CHECK-LABEL: vreduce_add_nxv2i1: ; CHECK: # %bb.0: @@ -686,8 +584,6 @@ define zeroext i1 @vreduce_add_nxv2i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.nxv4i1() - define zeroext i1 @vreduce_add_nxv4i1( %v) { ; CHECK-LABEL: vreduce_add_nxv4i1: ; CHECK: # %bb.0: @@ -699,8 +595,6 @@ define zeroext i1 @vreduce_add_nxv4i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.nxv8i1() - define zeroext i1 @vreduce_add_nxv8i1( %v) { ; CHECK-LABEL: vreduce_add_nxv8i1: ; CHECK: # %bb.0: @@ -712,8 +606,6 @@ define zeroext i1 @vreduce_add_nxv8i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.nxv16i1() - define zeroext i1 @vreduce_add_nxv16i1( %v) { ; CHECK-LABEL: vreduce_add_nxv16i1: ; CHECK: # %bb.0: @@ -725,8 +617,6 @@ define zeroext i1 @vreduce_add_nxv16i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.nxv32i1() - define zeroext i1 @vreduce_add_nxv32i1( %v) { ; CHECK-LABEL: vreduce_add_nxv32i1: ; CHECK: # %bb.0: @@ -738,8 +628,6 @@ define zeroext i1 @vreduce_add_nxv32i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.nxv64i1() - define zeroext i1 @vreduce_add_nxv64i1( %v) { ; CHECK-LABEL: vreduce_add_nxv64i1: ; CHECK: # %bb.0: @@ -751,8 +639,6 @@ define zeroext i1 @vreduce_add_nxv64i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv128i1() - define zeroext i1 @vreduce_or_nxv128i1( %v) { ; CHECK-LABEL: vreduce_or_nxv128i1: ; CHECK: # %bb.0: @@ -765,8 +651,6 @@ define zeroext i1 @vreduce_or_nxv128i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv128i1() - define zeroext i1 @vreduce_xor_nxv128i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv128i1: ; CHECK: # %bb.0: @@ -779,8 +663,6 @@ define zeroext i1 @vreduce_xor_nxv128i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv128i1() - define zeroext i1 @vreduce_and_nxv128i1( %v) { ; CHECK-LABEL: vreduce_and_nxv128i1: ; CHECK: # %bb.0: @@ -793,8 +675,6 @@ define zeroext i1 @vreduce_and_nxv128i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv128i1() - define zeroext i1 @vreduce_umax_nxv128i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv128i1: ; CHECK: # %bb.0: @@ -807,8 +687,6 @@ define zeroext i1 @vreduce_umax_nxv128i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv128i1() - define zeroext i1 @vreduce_smax_nxv128i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv128i1: ; CHECK: # %bb.0: @@ -821,8 +699,6 @@ define zeroext i1 @vreduce_smax_nxv128i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv128i1() - define zeroext i1 @vreduce_umin_nxv128i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv128i1: ; CHECK: # %bb.0: @@ -835,8 +711,6 @@ define zeroext i1 @vreduce_umin_nxv128i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv128i1() - define zeroext i1 @vreduce_smin_nxv128i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv128i1: ; CHECK: # %bb.0: @@ -849,8 +723,6 @@ define zeroext i1 @vreduce_smin_nxv128i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv256i1() - define zeroext i1 @vreduce_or_nxv256i1( %v) { ; CHECK-LABEL: vreduce_or_nxv256i1: ; CHECK: # %bb.0: @@ -865,8 +737,6 @@ define zeroext i1 @vreduce_or_nxv256i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv256i1() - define zeroext i1 @vreduce_xor_nxv256i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv256i1: ; CHECK: # %bb.0: @@ -881,8 +751,6 @@ define zeroext i1 @vreduce_xor_nxv256i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv256i1() - define zeroext i1 @vreduce_and_nxv256i1( %v) { ; CHECK-LABEL: vreduce_and_nxv256i1: ; CHECK: # %bb.0: @@ -897,8 +765,6 @@ define zeroext i1 @vreduce_and_nxv256i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv256i1() - define zeroext i1 @vreduce_umax_nxv256i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv256i1: ; CHECK: # %bb.0: @@ -913,8 +779,6 @@ define zeroext i1 @vreduce_umax_nxv256i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv256i1() - define zeroext i1 @vreduce_smax_nxv256i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv256i1: ; CHECK: # %bb.0: @@ -929,8 +793,6 @@ define zeroext i1 @vreduce_smax_nxv256i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv256i1() - define zeroext i1 @vreduce_umin_nxv256i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv256i1: ; CHECK: # %bb.0: @@ -945,8 +807,6 @@ define zeroext i1 @vreduce_umin_nxv256i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv256i1() - define zeroext i1 @vreduce_smin_nxv256i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv256i1: ; CHECK: # %bb.0: @@ -961,8 +821,6 @@ define zeroext i1 @vreduce_smin_nxv256i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv512i1() - define zeroext i1 @vreduce_or_nxv512i1( %v) { ; CHECK-LABEL: vreduce_or_nxv512i1: ; CHECK: # %bb.0: @@ -981,8 +839,6 @@ define zeroext i1 @vreduce_or_nxv512i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv512i1() - define zeroext i1 @vreduce_xor_nxv512i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv512i1: ; CHECK: # %bb.0: @@ -1001,8 +857,6 @@ define zeroext i1 @vreduce_xor_nxv512i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv512i1() - define zeroext i1 @vreduce_and_nxv512i1( %v) { ; CHECK-LABEL: vreduce_and_nxv512i1: ; CHECK: # %bb.0: @@ -1021,8 +875,6 @@ define zeroext i1 @vreduce_and_nxv512i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv512i1() - define zeroext i1 @vreduce_umax_nxv512i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv512i1: ; CHECK: # %bb.0: @@ -1041,8 +893,6 @@ define zeroext i1 @vreduce_umax_nxv512i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv512i1() - define zeroext i1 @vreduce_smax_nxv512i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv512i1: ; CHECK: # %bb.0: @@ -1061,8 +911,6 @@ define zeroext i1 @vreduce_smax_nxv512i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv512i1() - define zeroext i1 @vreduce_umin_nxv512i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv512i1: ; CHECK: # %bb.0: @@ -1081,8 +929,6 @@ define zeroext i1 @vreduce_umin_nxv512i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv512i1() - define zeroext i1 @vreduce_smin_nxv512i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv512i1: ; CHECK: # %bb.0: @@ -1101,8 +947,6 @@ define zeroext i1 @vreduce_smin_nxv512i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv1024i1() - define zeroext i1 @vreduce_or_nxv1024i1( %v) { ; CHECK-LABEL: vreduce_or_nxv1024i1: ; CHECK: # %bb.0: @@ -1129,8 +973,6 @@ define zeroext i1 @vreduce_or_nxv1024i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv1024i1() - define zeroext i1 @vreduce_xor_nxv1024i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv1024i1: ; CHECK: # %bb.0: @@ -1157,8 +999,6 @@ define zeroext i1 @vreduce_xor_nxv1024i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv1024i1() - define zeroext i1 @vreduce_and_nxv1024i1( %v) { ; CHECK-LABEL: vreduce_and_nxv1024i1: ; CHECK: # %bb.0: @@ -1185,8 +1025,6 @@ define zeroext i1 @vreduce_and_nxv1024i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv1024i1() - define zeroext i1 @vreduce_umax_nxv1024i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv1024i1: ; CHECK: # %bb.0: @@ -1213,8 +1051,6 @@ define zeroext i1 @vreduce_umax_nxv1024i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv1024i1() - define zeroext i1 @vreduce_smax_nxv1024i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv1024i1: ; CHECK: # %bb.0: @@ -1241,8 +1077,6 @@ define zeroext i1 @vreduce_smax_nxv1024i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv1024i1() - define zeroext i1 @vreduce_umin_nxv1024i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv1024i1: ; CHECK: # %bb.0: @@ -1269,8 +1103,6 @@ define zeroext i1 @vreduce_umin_nxv1024i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv1024i1() - define zeroext i1 @vreduce_smin_nxv1024i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv1024i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredxor.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor.ll index 31436cc1d0def..a4a9be617017c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredxor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredxor.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vredxor.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv8i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv8i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv8i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv8i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv4i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv4i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv4i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv4i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv4i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv2i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv2i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv2i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv2i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv1i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv1i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv1i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll index 66ba2697fe5f6..b65663d30672f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.srem.nxv8i7(, , , i32) - define @vrem_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv8i7: ; CHECK: # %bb.0: @@ -23,8 +21,6 @@ define @vrem_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.srem.nxv1i8(, , , i32) - define @vrem_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv1i8: ; CHECK: # %bb.0: @@ -69,8 +65,6 @@ define @vrem_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.srem.nxv2i8(, , , i32) - define @vrem_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv2i8: ; CHECK: # %bb.0: @@ -115,8 +109,6 @@ define @vrem_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.srem.nxv3i8(, , , i32) - define @vrem_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv3i8: ; CHECK: # %bb.0: @@ -127,8 +119,6 @@ define @vrem_vv_nxv3i8( %va, %v } -declare @llvm.vp.srem.nxv4i8(, , , i32) - define @vrem_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv4i8: ; CHECK: # %bb.0: @@ -173,8 +163,6 @@ define @vrem_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.srem.nxv8i8(, , , i32) - define @vrem_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv8i8: ; CHECK: # %bb.0: @@ -219,8 +207,6 @@ define @vrem_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.srem.nxv16i8(, , , i32) - define @vrem_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv16i8: ; CHECK: # %bb.0: @@ -265,8 +251,6 @@ define @vrem_vx_nxv16i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.srem.nxv32i8(, , , i32) - define @vrem_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv32i8: ; CHECK: # %bb.0: @@ -311,8 +295,6 @@ define @vrem_vx_nxv32i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.srem.nxv64i8(, , , i32) - define @vrem_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv64i8: ; CHECK: # %bb.0: @@ -357,8 +339,6 @@ define @vrem_vx_nxv64i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.srem.nxv1i16(, , , i32) - define @vrem_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv1i16: ; CHECK: # %bb.0: @@ -403,8 +383,6 @@ define @vrem_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.srem.nxv2i16(, , , i32) - define @vrem_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv2i16: ; CHECK: # %bb.0: @@ -449,8 +427,6 @@ define @vrem_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.srem.nxv4i16(, , , i32) - define @vrem_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv4i16: ; CHECK: # %bb.0: @@ -495,8 +471,6 @@ define @vrem_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.srem.nxv8i16(, , , i32) - define @vrem_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv8i16: ; CHECK: # %bb.0: @@ -541,8 +515,6 @@ define @vrem_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.srem.nxv16i16(, , , i32) - define @vrem_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv16i16: ; CHECK: # %bb.0: @@ -587,8 +559,6 @@ define @vrem_vx_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.srem.nxv32i16(, , , i32) - define @vrem_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv32i16: ; CHECK: # %bb.0: @@ -633,8 +603,6 @@ define @vrem_vx_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.srem.nxv1i32(, , , i32) - define @vrem_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv1i32: ; CHECK: # %bb.0: @@ -679,8 +647,6 @@ define @vrem_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.srem.nxv2i32(, , , i32) - define @vrem_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv2i32: ; CHECK: # %bb.0: @@ -725,8 +691,6 @@ define @vrem_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.srem.nxv4i32(, , , i32) - define @vrem_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv4i32: ; CHECK: # %bb.0: @@ -771,8 +735,6 @@ define @vrem_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.srem.nxv8i32(, , , i32) - define @vrem_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv8i32: ; CHECK: # %bb.0: @@ -817,8 +779,6 @@ define @vrem_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.srem.nxv16i32(, , , i32) - define @vrem_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv16i32: ; CHECK: # %bb.0: @@ -863,8 +823,6 @@ define @vrem_vx_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.srem.nxv1i64(, , , i32) - define @vrem_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv1i64: ; CHECK: # %bb.0: @@ -937,8 +895,6 @@ define @vrem_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.srem.nxv2i64(, , , i32) - define @vrem_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1011,8 +967,6 @@ define @vrem_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.srem.nxv4i64(, , , i32) - define @vrem_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1085,8 +1039,6 @@ define @vrem_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.srem.nxv8i64(, , , i32) - define @vrem_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem.ll b/llvm/test/CodeGen/RISCV/rvv/vrem.ll index d18b939823a23..a9b1cef3984e8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrem.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vrem.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll index 4608661eb5df3..51448f2050b6b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.urem.nxv8i7(, , , i32) - define @vremu_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv8i7: ; CHECK: # %bb.0: @@ -22,8 +20,6 @@ define @vremu_vx_nxv8i7( %a, i7 signext %b, < ret %v } -declare @llvm.vp.urem.nxv1i8(, , , i32) - define @vremu_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv1i8: ; CHECK: # %bb.0: @@ -68,8 +64,6 @@ define @vremu_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.urem.nxv2i8(, , , i32) - define @vremu_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv2i8: ; CHECK: # %bb.0: @@ -114,8 +108,6 @@ define @vremu_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.urem.nxv3i8(, , , i32) - define @vremu_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv3i8: ; CHECK: # %bb.0: @@ -126,8 +118,6 @@ define @vremu_vv_nxv3i8( %va, %v } -declare @llvm.vp.urem.nxv4i8(, , , i32) - define @vremu_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv4i8: ; CHECK: # %bb.0: @@ -172,8 +162,6 @@ define @vremu_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.urem.nxv8i8(, , , i32) - define @vremu_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv8i8: ; CHECK: # %bb.0: @@ -218,8 +206,6 @@ define @vremu_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.urem.nxv16i8(, , , i32) - define @vremu_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv16i8: ; CHECK: # %bb.0: @@ -264,8 +250,6 @@ define @vremu_vx_nxv16i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.urem.nxv32i8(, , , i32) - define @vremu_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv32i8: ; CHECK: # %bb.0: @@ -310,8 +294,6 @@ define @vremu_vx_nxv32i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.urem.nxv64i8(, , , i32) - define @vremu_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv64i8: ; CHECK: # %bb.0: @@ -356,8 +338,6 @@ define @vremu_vx_nxv64i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.urem.nxv1i16(, , , i32) - define @vremu_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv1i16: ; CHECK: # %bb.0: @@ -402,8 +382,6 @@ define @vremu_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.urem.nxv2i16(, , , i32) - define @vremu_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv2i16: ; CHECK: # %bb.0: @@ -448,8 +426,6 @@ define @vremu_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.urem.nxv4i16(, , , i32) - define @vremu_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv4i16: ; CHECK: # %bb.0: @@ -494,8 +470,6 @@ define @vremu_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.urem.nxv8i16(, , , i32) - define @vremu_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv8i16: ; CHECK: # %bb.0: @@ -540,8 +514,6 @@ define @vremu_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.urem.nxv16i16(, , , i32) - define @vremu_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv16i16: ; CHECK: # %bb.0: @@ -586,8 +558,6 @@ define @vremu_vx_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.urem.nxv32i16(, , , i32) - define @vremu_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv32i16: ; CHECK: # %bb.0: @@ -632,8 +602,6 @@ define @vremu_vx_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.urem.nxv1i32(, , , i32) - define @vremu_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv1i32: ; CHECK: # %bb.0: @@ -678,8 +646,6 @@ define @vremu_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.urem.nxv2i32(, , , i32) - define @vremu_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv2i32: ; CHECK: # %bb.0: @@ -724,8 +690,6 @@ define @vremu_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.urem.nxv4i32(, , , i32) - define @vremu_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv4i32: ; CHECK: # %bb.0: @@ -770,8 +734,6 @@ define @vremu_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.urem.nxv8i32(, , , i32) - define @vremu_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv8i32: ; CHECK: # %bb.0: @@ -816,8 +778,6 @@ define @vremu_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.urem.nxv16i32(, , , i32) - define @vremu_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv16i32: ; CHECK: # %bb.0: @@ -862,8 +822,6 @@ define @vremu_vx_nxv16i32_unmasked( %va, ret %v } -declare @llvm.vp.urem.nxv1i64(, , , i32) - define @vremu_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv1i64: ; CHECK: # %bb.0: @@ -936,8 +894,6 @@ define @vremu_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.urem.nxv2i64(, , , i32) - define @vremu_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1010,8 +966,6 @@ define @vremu_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.urem.nxv4i64(, , , i32) - define @vremu_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1084,8 +1038,6 @@ define @vremu_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.urem.nxv8i64(, , , i32) - define @vremu_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu.ll b/llvm/test/CodeGen/RISCV/rvv/vremu.ll index 138232c103da0..c1a49f45e5d5b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vremu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vremu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vrev8.ll b/llvm/test/CodeGen/RISCV/rvv/vrev8.ll index b6588bceceb37..b94b3490dc835 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrev8.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrev8.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vrev8.nxv1i8( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv2i8( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv4i8( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv8i8( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv16i8( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv32i8( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv64i8( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv64i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv64i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv1i16( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv2i16( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv4i16( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv8i16( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv16i16( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv32i16( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv32i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv1i32( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv2i32( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -649,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv4i32( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -669,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -692,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv8i32( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -735,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv16i32( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -755,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv16i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -778,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv1i64( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -798,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -821,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv2i64( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -841,13 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv4i64( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -884,13 +639,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -907,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv8i64( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -927,13 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv8i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather.ll index 91b95a96050d2..9813e4a7533b3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zfhmin,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vrgather.vv.nxv1i8.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv2i8.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv2i8.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv4i8.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv4i8.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv8i8.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv8i8.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv16i8.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv16i8.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv32i8.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv32i8.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv64i8.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv64i8.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -341,12 +243,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv1i16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -364,14 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1i16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -389,12 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv2i16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,14 +294,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv2i16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -437,12 +311,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv4i16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -460,14 +328,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv4i16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -485,12 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv8i16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -508,14 +362,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv8i16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -533,12 +379,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv16i16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -556,14 +396,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv16i16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -581,12 +413,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv32i16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -604,14 +430,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv32i16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -630,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv1i32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -653,14 +465,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1i32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -678,12 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv2i32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -701,14 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv2i32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -726,12 +516,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv4i32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -749,14 +533,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv4i32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -774,12 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv8i32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -797,14 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv8i32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -822,12 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv16i32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -845,14 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv16i32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -871,12 +619,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv1i64.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -894,14 +636,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1i64.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -919,12 +653,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv2i64.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -942,14 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv2i64.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -967,12 +687,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv4i64.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -990,14 +704,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv4i64.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1015,12 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv8i64.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1038,14 +738,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv8i64.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1064,12 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv1f16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1087,14 +773,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1f16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1112,12 +790,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv2f16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1135,14 +807,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv2f16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1160,12 +824,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv4f16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1183,14 +841,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv4f16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1208,12 +858,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv8f16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1231,14 +875,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv8f16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1256,12 +892,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv16f16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1279,14 +909,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv16f16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1304,12 +926,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv32f16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1327,14 +943,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv32f16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1353,12 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv1f32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1376,14 +978,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1f32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1401,12 +995,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv2f32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1424,14 +1012,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv2f32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1449,12 +1029,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv4f32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1472,14 +1046,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv4f32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1497,12 +1063,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv8f32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1520,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv8f32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1545,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv16f32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1568,14 +1114,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv16f32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1594,12 +1132,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv1f64.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1617,14 +1149,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1f64.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1642,12 +1166,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv2f64.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1665,14 +1183,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv2f64.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1690,12 +1200,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv4f64.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1713,14 +1217,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv4f64.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1738,12 +1234,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv8f64.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1761,14 +1251,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv8f64.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1787,12 +1269,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1i8.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv1i8_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1810,14 +1286,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv1i8.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1835,12 +1303,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv2i8.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv2i8_nxv2i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1858,14 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv2i8.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1883,12 +1337,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv4i8.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv4i8_nxv4i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1906,14 +1354,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv4i8.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1931,12 +1371,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv8i8.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv8i8_nxv8i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1954,14 +1388,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv8i8.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1979,12 +1405,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv16i8.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv16i8_nxv16i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -2002,14 +1422,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv16i8.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -2027,12 +1439,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv32i8.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv32i8_nxv32i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -2050,14 +1456,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv32i8.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -2075,12 +1473,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv64i8.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv64i8_nxv64i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -2098,14 +1490,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv64i8.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -2123,12 +1507,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1i16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv1i16_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2146,14 +1524,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv1i16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2171,12 +1541,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv2i16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv2i16_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2194,14 +1558,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv2i16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2219,12 +1575,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv4i16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv4i16_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2242,14 +1592,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv4i16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2267,12 +1609,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv8i16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv8i16_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2290,14 +1626,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv8i16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2315,12 +1643,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv16i16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv16i16_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2338,14 +1660,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv16i16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2363,12 +1677,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv32i16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv32i16_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2386,14 +1694,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv32i16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2411,12 +1711,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1i32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv1i32_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2434,14 +1728,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv1i32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2459,12 +1745,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv2i32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv2i32_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2482,14 +1762,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv2i32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2507,12 +1779,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv4i32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv4i32_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2530,14 +1796,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv4i32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2555,12 +1813,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv8i32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv8i32_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2578,14 +1830,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv8i32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2603,12 +1847,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv16i32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv16i32_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -2626,14 +1864,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv16i32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -2651,12 +1881,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1i64.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv1i64_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2674,14 +1898,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv1i64.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2699,12 +1915,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv2i64.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv2i64_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2722,14 +1932,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv2i64.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2747,12 +1949,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv4i64.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv4i64_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2770,14 +1966,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv4i64.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2795,12 +1983,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv8i64.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv8i64_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2818,14 +2000,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv8i64.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2843,12 +2017,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1f16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv1f16_nxv1f16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2866,14 +2034,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv1f16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2891,12 +2051,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv2f16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv2f16_nxv2f16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -2914,14 +2068,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv2f16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -2939,12 +2085,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv4f16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv4f16_nxv4f16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -2962,14 +2102,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv4f16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -2987,12 +2119,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv8f16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv8f16_nxv8f16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -3010,14 +2136,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv8f16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -3035,12 +2153,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv16f16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv16f16_nxv16f16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -3058,14 +2170,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv16f16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -3083,12 +2187,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv32f16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv32f16_nxv32f16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -3106,14 +2204,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv32f16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -3131,12 +2221,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1f32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv1f32_nxv1f32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -3154,14 +2238,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv1f32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -3179,12 +2255,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv2f32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv2f32_nxv2f32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -3202,14 +2272,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv2f32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -3227,12 +2289,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv4f32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv4f32_nxv4f32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -3250,14 +2306,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv4f32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -3275,12 +2323,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv8f32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv8f32_nxv8f32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -3298,14 +2340,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv8f32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -3323,12 +2357,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv16f32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv16f32_nxv16f32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -3346,14 +2374,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv16f32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -3371,12 +2391,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1f64.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv1f64_nxv1f64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -3394,14 +2408,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv1f64.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -3419,12 +2425,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv2f64.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv2f64_nxv2f64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -3442,14 +2442,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv2f64.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -3467,12 +2459,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv4f64.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv4f64_nxv4f64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -3490,14 +2476,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv4f64.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -3515,12 +2493,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv8f64.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv8f64_nxv8f64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -3538,14 +2510,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv8f64.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -4821,12 +3785,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv1bf16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv1bf16_nxv1bf16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1bf16_nxv1bf16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4844,14 +3802,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1bf16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv1bf16_nxv1bf16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1bf16_nxv1bf16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4869,12 +3819,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv2bf16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv2bf16_nxv2bf16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2bf16_nxv2bf16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4892,14 +3836,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv2bf16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv2bf16_nxv2bf16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2bf16_nxv2bf16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4917,12 +3853,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv4bf16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv4bf16_nxv4bf16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4bf16_nxv4bf16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4940,14 +3870,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv4bf16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv4bf16_nxv4bf16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4bf16_nxv4bf16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4965,12 +3887,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv8bf16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv8bf16_nxv8bf16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8bf16_nxv8bf16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4988,14 +3904,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv8bf16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv8bf16_nxv8bf16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8bf16_nxv8bf16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -5013,12 +3921,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv16bf16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv16bf16_nxv16bf16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16bf16_nxv16bf16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -5036,14 +3938,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv16bf16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv16bf16_nxv16bf16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16bf16_nxv16bf16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -5061,12 +3955,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv32bf16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv32bf16_nxv32bf16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32bf16_nxv32bf16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -5084,14 +3972,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv32bf16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv32bf16_nxv32bf16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32bf16_nxv32bf16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -5110,12 +3990,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1bf16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv1bf16_nxv1bf16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -5133,14 +4007,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv1bf16_nxv1bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -5158,12 +4024,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv2bf16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv2bf16_nxv2bf16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -5181,14 +4041,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv2bf16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv2bf16_nxv2bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -5206,12 +4058,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv4bf16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv4bf16_nxv4bf16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -5229,14 +4075,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv4bf16_nxv4bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -5254,12 +4092,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv8bf16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv8bf16_nxv8bf16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -5277,14 +4109,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv8bf16_nxv8bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -5302,12 +4126,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv16bf16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv16bf16_nxv16bf16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -5325,14 +4143,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv16bf16_nxv16bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -5350,12 +4160,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv32bf16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv32bf16_nxv32bf16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -5373,14 +4177,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv32bf16_nxv32bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll index 7b460f2c058f8..737140783480b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll @@ -62,11 +62,3 @@ loopIR3.i.i: ; preds = %loopIR3.i.i, %loopI br label %loopIR3.i.i } -; Function Attrs: nocallback nofree nosync nounwind readnone willreturn -declare @llvm.vector.insert.nxv8i16.nxv1i16(, , i64 immarg) #0 - -; Function Attrs: nounwind readnone -declare @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(, , , i64) #1 - -attributes #0 = { nocallback nofree nosync nounwind readnone willreturn } -attributes #1 = { nounwind readnone } diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16.ll index b3f36e4420a6c..1b08999f8e4bc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vrgatherei16.vv.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -507,14 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -532,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -555,14 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -581,12 +413,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -604,14 +430,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -629,12 +447,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -652,14 +464,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -677,12 +481,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -700,14 +498,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -725,12 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -748,14 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -774,12 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -797,14 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -822,12 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -845,14 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -871,12 +619,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -894,14 +636,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -919,12 +653,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -942,14 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv2f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -967,12 +687,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -990,14 +704,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv4f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1015,12 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1038,14 +738,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv8f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1063,12 +755,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1086,14 +772,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv16f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1111,12 +789,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1134,14 +806,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv32f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1160,12 +824,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1183,14 +841,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1208,12 +858,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1231,14 +875,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv4f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1256,12 +892,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1279,14 +909,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv8f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1304,12 +926,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -1327,14 +943,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv16f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -1353,12 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1376,14 +978,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv4f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1401,12 +995,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -1424,14 +1012,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv8f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll index eb129da2697b6..ade68af6cd3a5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-ZVKB ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-ZVKB -declare @llvm.fshl.nxv1i8(, , ) - define @vrol_vv_nxv1i8( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv1i8: ; CHECK: # %bb.0: @@ -51,8 +49,6 @@ define @vrol_vx_nxv1i8( %a, i8 %b) { ret %x } -declare @llvm.fshl.nxv2i8(, , ) - define @vrol_vv_nxv2i8( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv2i8: ; CHECK: # %bb.0: @@ -98,8 +94,6 @@ define @vrol_vx_nxv2i8( %a, i8 %b) { ret %x } -declare @llvm.fshl.nxv4i8(, , ) - define @vrol_vv_nxv4i8( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv4i8: ; CHECK: # %bb.0: @@ -145,8 +139,6 @@ define @vrol_vx_nxv4i8( %a, i8 %b) { ret %x } -declare @llvm.fshl.nxv8i8(, , ) - define @vrol_vv_nxv8i8( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv8i8: ; CHECK: # %bb.0: @@ -192,8 +184,6 @@ define @vrol_vx_nxv8i8( %a, i8 %b) { ret %x } -declare @llvm.fshl.nxv16i8(, , ) - define @vrol_vv_nxv16i8( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv16i8: ; CHECK: # %bb.0: @@ -239,8 +229,6 @@ define @vrol_vx_nxv16i8( %a, i8 %b) { ret %x } -declare @llvm.fshl.nxv32i8(, , ) - define @vrol_vv_nxv32i8( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv32i8: ; CHECK: # %bb.0: @@ -286,8 +274,6 @@ define @vrol_vx_nxv32i8( %a, i8 %b) { ret %x } -declare @llvm.fshl.nxv64i8(, , ) - define @vrol_vv_nxv64i8( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv64i8: ; CHECK: # %bb.0: @@ -333,8 +319,6 @@ define @vrol_vx_nxv64i8( %a, i8 %b) { ret %x } -declare @llvm.fshl.nxv1i16(, , ) - define @vrol_vv_nxv1i16( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv1i16: ; CHECK: # %bb.0: @@ -380,8 +364,6 @@ define @vrol_vx_nxv1i16( %a, i16 %b) { ret %x } -declare @llvm.fshl.nxv2i16(, , ) - define @vrol_vv_nxv2i16( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv2i16: ; CHECK: # %bb.0: @@ -427,8 +409,6 @@ define @vrol_vx_nxv2i16( %a, i16 %b) { ret %x } -declare @llvm.fshl.nxv4i16(, , ) - define @vrol_vv_nxv4i16( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv4i16: ; CHECK: # %bb.0: @@ -474,8 +454,6 @@ define @vrol_vx_nxv4i16( %a, i16 %b) { ret %x } -declare @llvm.fshl.nxv8i16(, , ) - define @vrol_vv_nxv8i16( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv8i16: ; CHECK: # %bb.0: @@ -521,8 +499,6 @@ define @vrol_vx_nxv8i16( %a, i16 %b) { ret %x } -declare @llvm.fshl.nxv16i16(, , ) - define @vrol_vv_nxv16i16( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv16i16: ; CHECK: # %bb.0: @@ -568,8 +544,6 @@ define @vrol_vx_nxv16i16( %a, i16 %b) { ret %x } -declare @llvm.fshl.nxv32i16(, , ) - define @vrol_vv_nxv32i16( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv32i16: ; CHECK: # %bb.0: @@ -615,8 +589,6 @@ define @vrol_vx_nxv32i16( %a, i16 %b) { ret %x } -declare @llvm.fshl.nxv1i32(, , ) - define @vrol_vv_nxv1i32( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv1i32: ; CHECK: # %bb.0: @@ -675,8 +647,6 @@ define @vrol_vx_nxv1i32( %a, i32 %b) { ret %x } -declare @llvm.fshl.nxv2i32(, , ) - define @vrol_vv_nxv2i32( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv2i32: ; CHECK: # %bb.0: @@ -735,8 +705,6 @@ define @vrol_vx_nxv2i32( %a, i32 %b) { ret %x } -declare @llvm.fshl.nxv4i32(, , ) - define @vrol_vv_nxv4i32( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv4i32: ; CHECK: # %bb.0: @@ -795,8 +763,6 @@ define @vrol_vx_nxv4i32( %a, i32 %b) { ret %x } -declare @llvm.fshl.nxv8i32(, , ) - define @vrol_vv_nxv8i32( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv8i32: ; CHECK: # %bb.0: @@ -855,8 +821,6 @@ define @vrol_vx_nxv8i32( %a, i32 %b) { ret %x } -declare @llvm.fshl.nxv16i32(, , ) - define @vrol_vv_nxv16i32( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv16i32: ; CHECK: # %bb.0: @@ -915,8 +879,6 @@ define @vrol_vx_nxv16i32( %a, i32 %b) { ret %x } -declare @llvm.fshl.nxv1i64(, , ) - define @vrol_vv_nxv1i64( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv1i64: ; CHECK: # %bb.0: @@ -978,8 +940,6 @@ define @vrol_vx_nxv1i64( %a, i64 %b) { ret %x } -declare @llvm.fshl.nxv2i64(, , ) - define @vrol_vv_nxv2i64( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1041,8 +1001,6 @@ define @vrol_vx_nxv2i64( %a, i64 %b) { ret %x } -declare @llvm.fshl.nxv4i64(, , ) - define @vrol_vv_nxv4i64( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1104,8 +1062,6 @@ define @vrol_vx_nxv4i64( %a, i64 %b) { ret %x } -declare @llvm.fshl.nxv8i64(, , ) - define @vrol_vv_nxv8i64( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vrol.ll b/llvm/test/CodeGen/RISCV/rvv/vrol.ll index 5d3ac576eb4ed..959bb96781621 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrol.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrol.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vrol.nxv1i8.nxv1i8( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv2i8.nxv2i8( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv4i8.nxv4i8( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv8i8.nxv8i8( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv16i8.nxv16i8( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv32i8.nxv32i8( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv64i8.nxv64i8( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv1i16.nxv1i16( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv2i16.nxv2i16( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv4i16.nxv4i16( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv8i16.nxv8i16( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv16i16.nxv16i16( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv32i16.nxv32i16( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv1i32.nxv1i32( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv2i32.nxv2i32( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv4i32.nxv4i32( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv8i32.nxv8i32( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv16i32.nxv16i32( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv1i8( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv1i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv2i8( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv2i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv2i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv2i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv4i8( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv4i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv4i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv4i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv8i8( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv8i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv8i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv16i8( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv16i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv16i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv32i8( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv32i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv32i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv64i8( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv64i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv64i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv64i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv1i16( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv1i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv2i16( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv2i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv4i16( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv4i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv8i16( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv8i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv16i16( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv16i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv32i16( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv32i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv1i32( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv1i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv2i32( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv2i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv4i32( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv4i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv8i32( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv8i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv16i32( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv16i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv1i64( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1910,14 +1344,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv1i64( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1935,12 +1361,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv2i64( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1957,14 +1377,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv2i64( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1394,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv4i64( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2004,14 +1410,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv4i64( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2029,12 +1427,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv8i64( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2051,14 +1443,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv8i64( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll index 97524ac61b96e..ddf6c530e3f2f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll @@ -4,9 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-ZVKB ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-ZVKB -declare @llvm.fshr.nxv1i8(, , ) -declare @llvm.fshl.nxv1i8(, , ) - define @vror_vv_nxv1i8( %a, %b) { ; CHECK-LABEL: vror_vv_nxv1i8: ; CHECK: # %bb.0: @@ -88,9 +85,6 @@ define @vror_vi_rotl_nxv1i8( %a) { ret %x } -declare @llvm.fshr.nxv2i8(, , ) -declare @llvm.fshl.nxv2i8(, , ) - define @vror_vv_nxv2i8( %a, %b) { ; CHECK-LABEL: vror_vv_nxv2i8: ; CHECK: # %bb.0: @@ -172,9 +166,6 @@ define @vror_vi_rotl_nxv2i8( %a) { ret %x } -declare @llvm.fshr.nxv4i8(, , ) -declare @llvm.fshl.nxv4i8(, , ) - define @vror_vv_nxv4i8( %a, %b) { ; CHECK-LABEL: vror_vv_nxv4i8: ; CHECK: # %bb.0: @@ -256,9 +247,6 @@ define @vror_vi_rotl_nxv4i8( %a) { ret %x } -declare @llvm.fshr.nxv8i8(, , ) -declare @llvm.fshl.nxv8i8(, , ) - define @vror_vv_nxv8i8( %a, %b) { ; CHECK-LABEL: vror_vv_nxv8i8: ; CHECK: # %bb.0: @@ -340,9 +328,6 @@ define @vror_vi_rotl_nxv8i8( %a) { ret %x } -declare @llvm.fshr.nxv16i8(, , ) -declare @llvm.fshl.nxv16i8(, , ) - define @vror_vv_nxv16i8( %a, %b) { ; CHECK-LABEL: vror_vv_nxv16i8: ; CHECK: # %bb.0: @@ -424,9 +409,6 @@ define @vror_vi_rotl_nxv16i8( %a) { ret %x } -declare @llvm.fshr.nxv32i8(, , ) -declare @llvm.fshl.nxv32i8(, , ) - define @vror_vv_nxv32i8( %a, %b) { ; CHECK-LABEL: vror_vv_nxv32i8: ; CHECK: # %bb.0: @@ -508,9 +490,6 @@ define @vror_vi_rotl_nxv32i8( %a) { ret %x } -declare @llvm.fshr.nxv64i8(, , ) -declare @llvm.fshl.nxv64i8(, , ) - define @vror_vv_nxv64i8( %a, %b) { ; CHECK-LABEL: vror_vv_nxv64i8: ; CHECK: # %bb.0: @@ -592,9 +571,6 @@ define @vror_vi_rotl_nxv64i8( %a) { ret %x } -declare @llvm.fshr.nxv1i16(, , ) -declare @llvm.fshl.nxv1i16(, , ) - define @vror_vv_nxv1i16( %a, %b) { ; CHECK-LABEL: vror_vv_nxv1i16: ; CHECK: # %bb.0: @@ -676,9 +652,6 @@ define @vror_vi_rotl_nxv1i16( %a) { ret %x } -declare @llvm.fshr.nxv2i16(, , ) -declare @llvm.fshl.nxv2i16(, , ) - define @vror_vv_nxv2i16( %a, %b) { ; CHECK-LABEL: vror_vv_nxv2i16: ; CHECK: # %bb.0: @@ -760,9 +733,6 @@ define @vror_vi_rotl_nxv2i16( %a) { ret %x } -declare @llvm.fshr.nxv4i16(, , ) -declare @llvm.fshl.nxv4i16(, , ) - define @vror_vv_nxv4i16( %a, %b) { ; CHECK-LABEL: vror_vv_nxv4i16: ; CHECK: # %bb.0: @@ -844,9 +814,6 @@ define @vror_vi_rotl_nxv4i16( %a) { ret %x } -declare @llvm.fshr.nxv8i16(, , ) -declare @llvm.fshl.nxv8i16(, , ) - define @vror_vv_nxv8i16( %a, %b) { ; CHECK-LABEL: vror_vv_nxv8i16: ; CHECK: # %bb.0: @@ -928,9 +895,6 @@ define @vror_vi_rotl_nxv8i16( %a) { ret %x } -declare @llvm.fshr.nxv16i16(, , ) -declare @llvm.fshl.nxv16i16(, , ) - define @vror_vv_nxv16i16( %a, %b) { ; CHECK-LABEL: vror_vv_nxv16i16: ; CHECK: # %bb.0: @@ -1012,9 +976,6 @@ define @vror_vi_rotl_nxv16i16( %a) { ret %x } -declare @llvm.fshr.nxv32i16(, , ) -declare @llvm.fshl.nxv32i16(, , ) - define @vror_vv_nxv32i16( %a, %b) { ; CHECK-LABEL: vror_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1096,9 +1057,6 @@ define @vror_vi_rotl_nxv32i16( %a) { ret %x } -declare @llvm.fshr.nxv1i32(, , ) -declare @llvm.fshl.nxv1i32(, , ) - define @vror_vv_nxv1i32( %a, %b) { ; CHECK-LABEL: vror_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1193,9 +1151,6 @@ define @vror_vi_rotl_nxv1i32( %a) { ret %x } -declare @llvm.fshr.nxv2i32(, , ) -declare @llvm.fshl.nxv2i32(, , ) - define @vror_vv_nxv2i32( %a, %b) { ; CHECK-LABEL: vror_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1290,9 +1245,6 @@ define @vror_vi_rotl_nxv2i32( %a) { ret %x } -declare @llvm.fshr.nxv4i32(, , ) -declare @llvm.fshl.nxv4i32(, , ) - define @vror_vv_nxv4i32( %a, %b) { ; CHECK-LABEL: vror_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1387,9 +1339,6 @@ define @vror_vi_rotl_nxv4i32( %a) { ret %x } -declare @llvm.fshr.nxv8i32(, , ) -declare @llvm.fshl.nxv8i32(, , ) - define @vror_vv_nxv8i32( %a, %b) { ; CHECK-LABEL: vror_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1484,9 +1433,6 @@ define @vror_vi_rotl_nxv8i32( %a) { ret %x } -declare @llvm.fshr.nxv16i32(, , ) -declare @llvm.fshl.nxv16i32(, , ) - define @vror_vv_nxv16i32( %a, %b) { ; CHECK-LABEL: vror_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1581,9 +1527,6 @@ define @vror_vi_rotl_nxv16i32( %a) { ret %x } -declare @llvm.fshr.nxv1i64(, , ) -declare @llvm.fshl.nxv1i64(, , ) - define @vror_vv_nxv1i64( %a, %b) { ; CHECK-LABEL: vror_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1683,9 +1626,6 @@ define @vror_vi_rotl_nxv1i64( %a) { ret %x } -declare @llvm.fshr.nxv2i64(, , ) -declare @llvm.fshl.nxv2i64(, , ) - define @vror_vv_nxv2i64( %a, %b) { ; CHECK-LABEL: vror_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1785,9 +1725,6 @@ define @vror_vi_rotl_nxv2i64( %a) { ret %x } -declare @llvm.fshr.nxv4i64(, , ) -declare @llvm.fshl.nxv4i64(, , ) - define @vror_vv_nxv4i64( %a, %b) { ; CHECK-LABEL: vror_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1887,9 +1824,6 @@ define @vror_vi_rotl_nxv4i64( %a) { ret %x } -declare @llvm.fshr.nxv8i64(, , ) -declare @llvm.fshl.nxv8i64(, , ) - define @vror_vv_nxv8i64( %a, %b) { ; CHECK-LABEL: vror_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vror.ll b/llvm/test/CodeGen/RISCV/rvv/vror.ll index 4e5734310daef..1f3114ab152e1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vror.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vror.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vror.nxv1i8.nxv1i8( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv2i8.nxv2i8( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv4i8.nxv4i8( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv8i8.nxv8i8( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv16i8.nxv16i8( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv32i8.nxv32i8( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv64i8.nxv64i8( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv1i16.nxv1i16( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv2i16.nxv2i16( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv4i16.nxv4i16( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv8i16.nxv8i16( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv16i16.nxv16i16( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv32i16.nxv32i16( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv1i32.nxv1i32( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv2i32.nxv2i32( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv4i32.nxv4i32( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv8i32.nxv8i32( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv16i32.nxv16i32( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv1i8( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv1i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv2i8( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv2i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv2i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv2i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv4i8( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv4i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv4i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv4i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv8i8( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv8i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv8i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv16i8( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv16i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv16i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv32i8( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv32i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv32i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv64i8( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv64i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv64i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv64i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv1i16( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv1i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv2i16( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv2i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv4i16( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv4i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv8i16( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv8i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv16i16( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv16i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv32i16( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv32i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv1i32( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv1i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv2i32( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv2i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv4i32( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv4i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv8i32( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv8i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv16i32( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv16i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv1i64( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1910,14 +1344,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv1i64( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1935,12 +1361,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv2i64( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1957,14 +1377,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv2i64( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1394,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv4i64( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2004,14 +1410,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv4i64( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2029,12 +1427,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv8i64( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2051,14 +1443,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv8i64( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll index c41139c64eb08..451a9b47a89f7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.sub.nxv1i8(, , , i32) - define @vrsub_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv1i8: ; CHECK: # %bb.0: @@ -50,8 +48,6 @@ define @vrsub_vi_nxv1i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sub.nxv2i8(, , , i32) - define @vrsub_vx_nxv2i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv2i8: ; CHECK: # %bb.0: @@ -96,8 +92,6 @@ define @vrsub_vi_nxv2i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sub.nxv4i8(, , , i32) - define @vrsub_vx_nxv4i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv4i8: ; CHECK: # %bb.0: @@ -142,8 +136,6 @@ define @vrsub_vi_nxv4i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sub.nxv8i8(, , , i32) - define @vrsub_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv8i8: ; CHECK: # %bb.0: @@ -188,8 +180,6 @@ define @vrsub_vi_nxv8i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sub.nxv16i8(, , , i32) - define @vrsub_vx_nxv16i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv16i8: ; CHECK: # %bb.0: @@ -234,8 +224,6 @@ define @vrsub_vi_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv32i8(, , , i32) - define @vrsub_vx_nxv32i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv32i8: ; CHECK: # %bb.0: @@ -280,8 +268,6 @@ define @vrsub_vi_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv64i8(, , , i32) - define @vrsub_vx_nxv64i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv64i8: ; CHECK: # %bb.0: @@ -326,8 +312,6 @@ define @vrsub_vi_nxv64i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv1i16(, , , i32) - define @vrsub_vx_nxv1i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv1i16: ; CHECK: # %bb.0: @@ -372,8 +356,6 @@ define @vrsub_vi_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv2i16(, , , i32) - define @vrsub_vx_nxv2i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv2i16: ; CHECK: # %bb.0: @@ -418,8 +400,6 @@ define @vrsub_vi_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv4i16(, , , i32) - define @vrsub_vx_nxv4i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv4i16: ; CHECK: # %bb.0: @@ -464,8 +444,6 @@ define @vrsub_vi_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv8i16(, , , i32) - define @vrsub_vx_nxv8i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv8i16: ; CHECK: # %bb.0: @@ -510,8 +488,6 @@ define @vrsub_vi_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv16i16(, , , i32) - define @vrsub_vx_nxv16i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv16i16: ; CHECK: # %bb.0: @@ -556,8 +532,6 @@ define @vrsub_vi_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.sub.nxv32i16(, , , i32) - define @vrsub_vx_nxv32i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv32i16: ; CHECK: # %bb.0: @@ -602,8 +576,6 @@ define @vrsub_vi_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.sub.nxv1i32(, , , i32) - define @vrsub_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv1i32: ; CHECK: # %bb.0: @@ -648,8 +620,6 @@ define @vrsub_vi_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv2i32(, , , i32) - define @vrsub_vx_nxv2i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv2i32: ; CHECK: # %bb.0: @@ -694,8 +664,6 @@ define @vrsub_vi_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv4i32(, , , i32) - define @vrsub_vx_nxv4i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv4i32: ; CHECK: # %bb.0: @@ -740,8 +708,6 @@ define @vrsub_vi_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv8i32(, , , i32) - define @vrsub_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv8i32: ; CHECK: # %bb.0: @@ -786,8 +752,6 @@ define @vrsub_vi_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv16i32(, , , i32) - define @vrsub_vx_nxv16i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv16i32: ; CHECK: # %bb.0: @@ -832,8 +796,6 @@ define @vrsub_vi_nxv16i32_unmasked( %va, ret %v } -declare @llvm.vp.sub.nxv1i64(, , , i32) - define @vrsub_vx_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vrsub_vx_nxv1i64: ; RV32: # %bb.0: @@ -906,8 +868,6 @@ define @vrsub_vi_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv2i64(, , , i32) - define @vrsub_vx_nxv2i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vrsub_vx_nxv2i64: ; RV32: # %bb.0: @@ -980,8 +940,6 @@ define @vrsub_vi_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv4i64(, , , i32) - define @vrsub_vx_nxv4i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vrsub_vx_nxv4i64: ; RV32: # %bb.0: @@ -1054,8 +1012,6 @@ define @vrsub_vi_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv8i64(, , , i32) - define @vrsub_vx_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vrsub_vx_nxv8i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub.ll index bab0e8fa0bff3..d3f2c9a24b420 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vrsub.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -866,13 +626,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -902,12 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -936,13 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -972,12 +712,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1006,13 +740,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1042,12 +769,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -1076,13 +797,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll index 15af5c418b413..88b358c66b504 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.sadd.sat.nxv1i8(, ) - define @sadd_nxv1i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv1i8_vv: ; CHECK: # %bb.0: @@ -38,8 +36,6 @@ define @sadd_nxv1i8_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv2i8(, ) - define @sadd_nxv2i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv2i8_vv: ; CHECK: # %bb.0: @@ -72,8 +68,6 @@ define @sadd_nxv2i8_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv4i8(, ) - define @sadd_nxv4i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv4i8_vv: ; CHECK: # %bb.0: @@ -106,8 +100,6 @@ define @sadd_nxv4i8_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv8i8(, ) - define @sadd_nxv8i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv8i8_vv: ; CHECK: # %bb.0: @@ -140,8 +132,6 @@ define @sadd_nxv8i8_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv16i8(, ) - define @sadd_nxv16i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv16i8_vv: ; CHECK: # %bb.0: @@ -174,8 +164,6 @@ define @sadd_nxv16i8_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv32i8(, ) - define @sadd_nxv32i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv32i8_vv: ; CHECK: # %bb.0: @@ -208,8 +196,6 @@ define @sadd_nxv32i8_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv64i8(, ) - define @sadd_nxv64i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv64i8_vv: ; CHECK: # %bb.0: @@ -242,8 +228,6 @@ define @sadd_nxv64i8_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv1i16(, ) - define @sadd_nxv1i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv1i16_vv: ; CHECK: # %bb.0: @@ -276,8 +260,6 @@ define @sadd_nxv1i16_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv2i16(, ) - define @sadd_nxv2i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv2i16_vv: ; CHECK: # %bb.0: @@ -310,8 +292,6 @@ define @sadd_nxv2i16_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv4i16(, ) - define @sadd_nxv4i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv4i16_vv: ; CHECK: # %bb.0: @@ -344,8 +324,6 @@ define @sadd_nxv4i16_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv8i16(, ) - define @sadd_nxv8i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv8i16_vv: ; CHECK: # %bb.0: @@ -378,8 +356,6 @@ define @sadd_nxv8i16_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv16i16(, ) - define @sadd_nxv16i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv16i16_vv: ; CHECK: # %bb.0: @@ -412,8 +388,6 @@ define @sadd_nxv16i16_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv32i16(, ) - define @sadd_nxv32i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv32i16_vv: ; CHECK: # %bb.0: @@ -446,8 +420,6 @@ define @sadd_nxv32i16_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv1i32(, ) - define @sadd_nxv1i32_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv1i32_vv: ; CHECK: # %bb.0: @@ -480,8 +452,6 @@ define @sadd_nxv1i32_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv2i32(, ) - define @sadd_nxv2i32_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv2i32_vv: ; CHECK: # %bb.0: @@ -514,8 +484,6 @@ define @sadd_nxv2i32_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv4i32(, ) - define @sadd_nxv4i32_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv4i32_vv: ; CHECK: # %bb.0: @@ -548,8 +516,6 @@ define @sadd_nxv4i32_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv8i32(, ) - define @sadd_nxv8i32_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv8i32_vv: ; CHECK: # %bb.0: @@ -582,8 +548,6 @@ define @sadd_nxv8i32_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv16i32(, ) - define @sadd_nxv16i32_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv16i32_vv: ; CHECK: # %bb.0: @@ -616,8 +580,6 @@ define @sadd_nxv16i32_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv1i64(, ) - define @sadd_nxv1i64_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv1i64_vv: ; CHECK: # %bb.0: @@ -664,8 +626,6 @@ define @sadd_nxv1i64_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv2i64(, ) - define @sadd_nxv2i64_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv2i64_vv: ; CHECK: # %bb.0: @@ -712,8 +672,6 @@ define @sadd_nxv2i64_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv4i64(, ) - define @sadd_nxv4i64_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv4i64_vv: ; CHECK: # %bb.0: @@ -760,8 +718,6 @@ define @sadd_nxv4i64_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv8i64(, ) - define @sadd_nxv8i64_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv8i64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll index e471f4b2e92b5..98634fe55de41 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.sadd.sat.nxv8i7(, , , i32) - define @vsadd_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vx_nxv8i7: ; CHECK: # %bb.0: @@ -24,8 +22,6 @@ define @vsadd_vx_nxv8i7( %a, i7 signext %b, < ret %v } -declare @llvm.vp.sadd.sat.nxv1i8(, , , i32) - define @vsadd_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv1i8: ; CHECK: # %bb.0: @@ -102,8 +98,6 @@ define @vsadd_vi_nxv1i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sadd.sat.nxv2i8(, , , i32) - define @vsadd_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv2i8: ; CHECK: # %bb.0: @@ -168,8 +162,6 @@ define @vsadd_vi_nxv2i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sadd.sat.nxv3i8(, , , i32) - define @vsadd_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv3i8: ; CHECK: # %bb.0: @@ -234,8 +226,6 @@ define @vsadd_vi_nxv3i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sadd.sat.nxv4i8(, , , i32) - define @vsadd_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv4i8: ; CHECK: # %bb.0: @@ -300,8 +290,6 @@ define @vsadd_vi_nxv4i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sadd.sat.nxv8i8(, , , i32) - define @vsadd_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv8i8: ; CHECK: # %bb.0: @@ -366,8 +354,6 @@ define @vsadd_vi_nxv8i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sadd.sat.nxv16i8(, , , i32) - define @vsadd_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv16i8: ; CHECK: # %bb.0: @@ -432,8 +418,6 @@ define @vsadd_vi_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv32i8(, , , i32) - define @vsadd_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv32i8: ; CHECK: # %bb.0: @@ -498,8 +482,6 @@ define @vsadd_vi_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv64i8(, , , i32) - define @vsadd_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv64i8: ; CHECK: # %bb.0: @@ -566,8 +548,6 @@ define @vsadd_vi_nxv64i8_unmasked( %va, i32 ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.sadd.sat.nxv128i8(, , , i32) - define @vsadd_vi_nxv128i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vi_nxv128i8: ; CHECK: # %bb.0: @@ -616,8 +596,6 @@ define @vsadd_vi_nxv128i8_unmasked( %va, ret %v } -declare @llvm.vp.sadd.sat.nxv1i16(, , , i32) - define @vsadd_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv1i16: ; CHECK: # %bb.0: @@ -682,8 +660,6 @@ define @vsadd_vi_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv2i16(, , , i32) - define @vsadd_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv2i16: ; CHECK: # %bb.0: @@ -748,8 +724,6 @@ define @vsadd_vi_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv4i16(, , , i32) - define @vsadd_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv4i16: ; CHECK: # %bb.0: @@ -814,8 +788,6 @@ define @vsadd_vi_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv8i16(, , , i32) - define @vsadd_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv8i16: ; CHECK: # %bb.0: @@ -880,8 +852,6 @@ define @vsadd_vi_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv16i16(, , , i32) - define @vsadd_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv16i16: ; CHECK: # %bb.0: @@ -946,8 +916,6 @@ define @vsadd_vi_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.sadd.sat.nxv32i16(, , , i32) - define @vsadd_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1012,8 +980,6 @@ define @vsadd_vi_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.sadd.sat.nxv1i32(, , , i32) - define @vsadd_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1078,8 +1044,6 @@ define @vsadd_vi_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv2i32(, , , i32) - define @vsadd_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1144,8 +1108,6 @@ define @vsadd_vi_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv4i32(, , , i32) - define @vsadd_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1210,8 +1172,6 @@ define @vsadd_vi_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv8i32(, , , i32) - define @vsadd_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1276,8 +1236,6 @@ define @vsadd_vi_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv16i32(, , , i32) - define @vsadd_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1344,8 +1302,6 @@ define @vsadd_vi_nxv16i32_unmasked( %va, ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.sadd.sat.nxv32i32(, , , i32) - define @vsadd_vi_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vi_nxv32i32: ; CHECK: # %bb.0: @@ -1395,8 +1351,6 @@ define @vsadd_vi_nxv32i32_unmasked( %va, ret %v } -declare @llvm.vp.sadd.sat.nxv1i64(, , , i32) - define @vsadd_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1489,8 +1443,6 @@ define @vsadd_vi_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv2i64(, , , i32) - define @vsadd_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1583,8 +1535,6 @@ define @vsadd_vi_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv4i64(, , , i32) - define @vsadd_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1677,8 +1627,6 @@ define @vsadd_vi_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv8i64(, , , i32) - define @vsadd_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd.ll index 7729e7f00f5c4..4d16bb467fa46 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vsadd.nxv1i8.nxv1i8( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv2i8.nxv2i8( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv4i8.nxv4i8( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv8i8.nxv8i8( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv16i8.nxv16i8( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv32i8.nxv32i8( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv64i8.nxv64i8( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv1i16.nxv1i16( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv2i16.nxv2i16( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv4i16.nxv4i16( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv8i16.nxv8i16( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv16i16.nxv16i16( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv32i16.nxv32i16( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv1i32.nxv1i32( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv2i32.nxv2i32( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv4i32.nxv4i32( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv8i32.nxv8i32( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv16i32.nxv16i32( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv1i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv2i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv4i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv8i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv16i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv32i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv64i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv1i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv2i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv4i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv8i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv16i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv32i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv1i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv2i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv4i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv8i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv16i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll index c146f61fbf976..1328f5964e903 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.uadd.sat.nxv1i8(, ) - define @uadd_nxv1i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv1i8_vv: ; CHECK: # %bb.0: @@ -38,8 +36,6 @@ define @uadd_nxv1i8_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv2i8(, ) - define @uadd_nxv2i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv2i8_vv: ; CHECK: # %bb.0: @@ -72,8 +68,6 @@ define @uadd_nxv2i8_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv4i8(, ) - define @uadd_nxv4i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv4i8_vv: ; CHECK: # %bb.0: @@ -106,8 +100,6 @@ define @uadd_nxv4i8_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv8i8(, ) - define @uadd_nxv8i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv8i8_vv: ; CHECK: # %bb.0: @@ -140,8 +132,6 @@ define @uadd_nxv8i8_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv16i8(, ) - define @uadd_nxv16i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv16i8_vv: ; CHECK: # %bb.0: @@ -174,8 +164,6 @@ define @uadd_nxv16i8_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv32i8(, ) - define @uadd_nxv32i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv32i8_vv: ; CHECK: # %bb.0: @@ -208,8 +196,6 @@ define @uadd_nxv32i8_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv64i8(, ) - define @uadd_nxv64i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv64i8_vv: ; CHECK: # %bb.0: @@ -242,8 +228,6 @@ define @uadd_nxv64i8_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv1i16(, ) - define @uadd_nxv1i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv1i16_vv: ; CHECK: # %bb.0: @@ -276,8 +260,6 @@ define @uadd_nxv1i16_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv2i16(, ) - define @uadd_nxv2i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv2i16_vv: ; CHECK: # %bb.0: @@ -310,8 +292,6 @@ define @uadd_nxv2i16_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv4i16(, ) - define @uadd_nxv4i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv4i16_vv: ; CHECK: # %bb.0: @@ -344,8 +324,6 @@ define @uadd_nxv4i16_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv8i16(, ) - define @uadd_nxv8i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv8i16_vv: ; CHECK: # %bb.0: @@ -378,8 +356,6 @@ define @uadd_nxv8i16_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv16i16(, ) - define @uadd_nxv16i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv16i16_vv: ; CHECK: # %bb.0: @@ -412,8 +388,6 @@ define @uadd_nxv16i16_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv32i16(, ) - define @uadd_nxv32i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv32i16_vv: ; CHECK: # %bb.0: @@ -446,8 +420,6 @@ define @uadd_nxv32i16_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv1i32(, ) - define @uadd_nxv1i32_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv1i32_vv: ; CHECK: # %bb.0: @@ -480,8 +452,6 @@ define @uadd_nxv1i32_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv2i32(, ) - define @uadd_nxv2i32_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv2i32_vv: ; CHECK: # %bb.0: @@ -514,8 +484,6 @@ define @uadd_nxv2i32_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv4i32(, ) - define @uadd_nxv4i32_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv4i32_vv: ; CHECK: # %bb.0: @@ -548,8 +516,6 @@ define @uadd_nxv4i32_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv8i32(, ) - define @uadd_nxv8i32_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv8i32_vv: ; CHECK: # %bb.0: @@ -582,8 +548,6 @@ define @uadd_nxv8i32_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv16i32(, ) - define @uadd_nxv16i32_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv16i32_vv: ; CHECK: # %bb.0: @@ -616,8 +580,6 @@ define @uadd_nxv16i32_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv1i64(, ) - define @uadd_nxv1i64_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv1i64_vv: ; CHECK: # %bb.0: @@ -664,8 +626,6 @@ define @uadd_nxv1i64_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv2i64(, ) - define @uadd_nxv2i64_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv2i64_vv: ; CHECK: # %bb.0: @@ -712,8 +672,6 @@ define @uadd_nxv2i64_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv4i64(, ) - define @uadd_nxv4i64_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv4i64_vv: ; CHECK: # %bb.0: @@ -760,8 +718,6 @@ define @uadd_nxv4i64_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv8i64(, ) - define @uadd_nxv8i64_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv8i64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll index f76a2b4b78bca..a7d304261f87f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.uadd.sat.nxv8i7(, , , i32) - define @vsaddu_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vx_nxv8i7: ; CHECK: # %bb.0: @@ -23,8 +21,6 @@ define @vsaddu_vx_nxv8i7( %a, i7 signext %b, ret %v } -declare @llvm.vp.uadd.sat.nxv1i8(, , , i32) - define @vsaddu_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv1i8: ; CHECK: # %bb.0: @@ -101,8 +97,6 @@ define @vsaddu_vi_nxv1i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.uadd.sat.nxv2i8(, , , i32) - define @vsaddu_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv2i8: ; CHECK: # %bb.0: @@ -167,8 +161,6 @@ define @vsaddu_vi_nxv2i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.uadd.sat.nxv3i8(, , , i32) - define @vsaddu_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv3i8: ; CHECK: # %bb.0: @@ -233,8 +225,6 @@ define @vsaddu_vi_nxv3i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.uadd.sat.nxv4i8(, , , i32) - define @vsaddu_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv4i8: ; CHECK: # %bb.0: @@ -299,8 +289,6 @@ define @vsaddu_vi_nxv4i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.uadd.sat.nxv8i8(, , , i32) - define @vsaddu_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv8i8: ; CHECK: # %bb.0: @@ -365,8 +353,6 @@ define @vsaddu_vi_nxv8i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.uadd.sat.nxv16i8(, , , i32) - define @vsaddu_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv16i8: ; CHECK: # %bb.0: @@ -431,8 +417,6 @@ define @vsaddu_vi_nxv16i8_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv32i8(, , , i32) - define @vsaddu_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv32i8: ; CHECK: # %bb.0: @@ -497,8 +481,6 @@ define @vsaddu_vi_nxv32i8_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv64i8(, , , i32) - define @vsaddu_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv64i8: ; CHECK: # %bb.0: @@ -565,8 +547,6 @@ define @vsaddu_vi_nxv64i8_unmasked( %va, i3 ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.uadd.sat.nxv128i8(, , , i32) - define @vsaddu_vi_nxv128i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vi_nxv128i8: ; CHECK: # %bb.0: @@ -615,8 +595,6 @@ define @vsaddu_vi_nxv128i8_unmasked( %va, ret %v } -declare @llvm.vp.uadd.sat.nxv1i16(, , , i32) - define @vsaddu_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv1i16: ; CHECK: # %bb.0: @@ -681,8 +659,6 @@ define @vsaddu_vi_nxv1i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv2i16(, , , i32) - define @vsaddu_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv2i16: ; CHECK: # %bb.0: @@ -747,8 +723,6 @@ define @vsaddu_vi_nxv2i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv4i16(, , , i32) - define @vsaddu_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv4i16: ; CHECK: # %bb.0: @@ -813,8 +787,6 @@ define @vsaddu_vi_nxv4i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv8i16(, , , i32) - define @vsaddu_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv8i16: ; CHECK: # %bb.0: @@ -879,8 +851,6 @@ define @vsaddu_vi_nxv8i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv16i16(, , , i32) - define @vsaddu_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv16i16: ; CHECK: # %bb.0: @@ -945,8 +915,6 @@ define @vsaddu_vi_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.uadd.sat.nxv32i16(, , , i32) - define @vsaddu_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1011,8 +979,6 @@ define @vsaddu_vi_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.uadd.sat.nxv1i32(, , , i32) - define @vsaddu_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1077,8 +1043,6 @@ define @vsaddu_vi_nxv1i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv2i32(, , , i32) - define @vsaddu_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1143,8 +1107,6 @@ define @vsaddu_vi_nxv2i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv4i32(, , , i32) - define @vsaddu_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1209,8 +1171,6 @@ define @vsaddu_vi_nxv4i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv8i32(, , , i32) - define @vsaddu_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1275,8 +1235,6 @@ define @vsaddu_vi_nxv8i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv16i32(, , , i32) - define @vsaddu_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1343,8 +1301,6 @@ define @vsaddu_vi_nxv16i32_unmasked( %va, ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.uadd.sat.nxv32i32(, , , i32) - define @vsaddu_vi_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vi_nxv32i32: ; CHECK: # %bb.0: @@ -1394,8 +1350,6 @@ define @vsaddu_vi_nxv32i32_unmasked( %va, ret %v } -declare @llvm.vp.uadd.sat.nxv1i64(, , , i32) - define @vsaddu_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1488,8 +1442,6 @@ define @vsaddu_vi_nxv1i64_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv2i64(, , , i32) - define @vsaddu_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1582,8 +1534,6 @@ define @vsaddu_vi_nxv2i64_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv4i64(, , , i32) - define @vsaddu_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1676,8 +1626,6 @@ define @vsaddu_vi_nxv4i64_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv8i64(, , , i32) - define @vsaddu_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu.ll index 0526a5f4b5500..032c9057aa0c8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vsaddu.nxv1i8.nxv1i8( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv2i8.nxv2i8( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv4i8.nxv4i8( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv8i8.nxv8i8( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv16i8.nxv16i8( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv32i8.nxv32i8( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv64i8.nxv64i8( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv1i16.nxv1i16( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv2i16.nxv2i16( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv4i16.nxv4i16( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv8i16.nxv8i16( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv16i16.nxv16i16( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv32i16.nxv32i16( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv1i32.nxv1i32( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv2i32.nxv2i32( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv4i32.nxv4i32( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv8i32.nxv8i32( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv16i32.nxv16i32( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv1i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv2i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv4i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv8i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv16i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv32i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv64i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv1i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv2i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv4i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv8i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv16i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv32i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv1i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv2i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv4i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv8i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv16i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsbc.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc.ll index 014f6a02d83a9..bc12a3c0e7488 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsbc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsbc.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vsbc.nxv1i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv2i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -52,13 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv4i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -76,13 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -100,13 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv16i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -124,13 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv32i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -148,13 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv64i8.nxv64i8( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -172,13 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv1i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -196,13 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv2i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -220,13 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -244,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv8i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -268,13 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv16i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -292,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv32i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -316,13 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv1i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -340,13 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -364,13 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv4i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -388,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv8i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -412,13 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv16i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -436,13 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -460,13 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv2i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -484,13 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv4i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -508,13 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv8i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -532,13 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -556,13 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -580,13 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -604,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -628,13 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -652,13 +463,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -676,13 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv64i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -700,13 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -724,13 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -748,13 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -772,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -796,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -820,13 +582,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv32i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -844,13 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -868,13 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -892,13 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -916,13 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -940,13 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv16i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -964,13 +684,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1000,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1036,13 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1072,13 +771,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv8i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll b/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll index 4442f97b8fe76..aef3aa563069f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+zve64x,+m -verify-machineinstrs < %s | FileCheck %s -declare i64 @llvm.vscale.i64() - define i64 @vscale_lshr(i64 %TC) { ; CHECK-LABEL: vscale_lshr: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vse.ll b/llvm/test/CodeGen/RISCV/rvv/vse.ll index 607ce2394ee81..3b0d4f8891963 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vse.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare void @llvm.riscv.vse.nxv1i64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -24,12 +19,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1i64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -62,11 +51,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2i64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -82,12 +66,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2i64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -104,11 +82,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4i64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -124,12 +97,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4i64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -146,11 +113,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8i64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -166,12 +128,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8i64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1f64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -208,12 +159,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1f64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -230,11 +175,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2f64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -250,12 +190,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2f64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -272,11 +206,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4f64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -292,12 +221,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4f64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -314,11 +237,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8f64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -334,12 +252,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8f64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -356,11 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -376,12 +283,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -398,11 +299,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +314,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -440,11 +330,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -460,12 +345,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -482,11 +361,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -502,12 +376,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -524,11 +392,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -544,12 +407,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -566,11 +423,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -586,12 +438,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -608,11 +454,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -628,12 +469,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -650,11 +485,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -670,12 +500,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -692,11 +516,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -712,12 +531,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -734,11 +547,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -754,12 +562,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -776,11 +578,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -796,12 +593,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -818,11 +609,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -838,12 +624,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -860,11 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -880,12 +655,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -902,11 +671,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -922,12 +686,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -944,11 +702,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -964,12 +717,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -986,11 +733,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv32i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1006,12 +748,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1028,11 +764,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1048,12 +779,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1070,11 +795,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1090,12 +810,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1112,11 +826,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1132,12 +841,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1154,11 +857,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1174,12 +872,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1196,11 +888,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1216,12 +903,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1238,11 +919,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv32f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1258,12 +934,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv32f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1280,11 +950,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1bf16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1bf16_nxv1bf16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1300,12 +965,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1bf16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1bf16_nxv1bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1322,11 +981,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2bf16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2bf16_nxv2bf16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1342,12 +996,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2bf16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2bf16_nxv2bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1364,11 +1012,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4bf16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4bf16_nxv4bf16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1384,12 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4bf16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4bf16_nxv4bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1406,11 +1043,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8bf16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8bf16_nxv8bf16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1426,12 +1058,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8bf16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8bf16_nxv8bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1448,11 +1074,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16bf16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16bf16_nxv16bf16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1468,12 +1089,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16bf16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16bf16_nxv16bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1490,11 +1105,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv32bf16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv32bf16_nxv32bf16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1510,12 +1120,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv32bf16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv32bf16_nxv32bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1532,11 +1136,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1552,12 +1151,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1574,11 +1167,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1594,12 +1182,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1616,11 +1198,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1636,12 +1213,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1658,11 +1229,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1678,12 +1244,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1700,11 +1260,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1720,12 +1275,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1742,11 +1291,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv32i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1762,12 +1306,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1784,11 +1322,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv64i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1804,12 +1337,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv64i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp-bf16.ll index 76fd1e1d8293f..587c577c6e5c5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp-bf16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp-bf16.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+m,+v,+zvfbfmin -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.select.nxv1bf16(, , , i32) - define @select_nxv1bf16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1bf16: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define @select_nxv1bf16( %a, %v } -declare @llvm.vp.select.nxv2bf16(, , , i32) - define @select_nxv2bf16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2bf16: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define @select_nxv2bf16( %a, %v } -declare @llvm.vp.select.nxv4bf16(, , , i32) - define @select_nxv4bf16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4bf16: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define @select_nxv4bf16( %a, %v } -declare @llvm.vp.select.nxv8bf16(, , , i32) - define @select_nxv8bf16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8bf16: ; CHECK: # %bb.0: @@ -52,8 +44,6 @@ define @select_nxv8bf16( %a, %v } -declare @llvm.vp.select.nxv16bf16(, , , i32) - define @select_nxv16bf16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16bf16: ; CHECK: # %bb.0: @@ -64,8 +54,6 @@ define @select_nxv16bf16( %a, %v } -declare @llvm.vp.select.nxv32bf16(, , , i32) - define @select_nxv32bf16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv32bf16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll index 5cecd3cae4d2a..d1933560f2698 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+m,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.select.nxv1i1(, , , i32) - define @select_nxv1i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1i1: ; CHECK: # %bb.0: @@ -22,8 +20,6 @@ define @select_nxv1i1( %a, ret %v } -declare @llvm.vp.select.nxv2i1(, , , i32) - define @select_nxv2i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2i1: ; CHECK: # %bb.0: @@ -36,8 +32,6 @@ define @select_nxv2i1( %a, ret %v } -declare @llvm.vp.select.nxv4i1(, , , i32) - define @select_nxv4i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4i1: ; CHECK: # %bb.0: @@ -50,8 +44,6 @@ define @select_nxv4i1( %a, ret %v } -declare @llvm.vp.select.nxv8i1(, , , i32) - define @select_nxv8i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i1: ; CHECK: # %bb.0: @@ -64,8 +56,6 @@ define @select_nxv8i1( %a, ret %v } -declare @llvm.vp.select.nxv16i1(, , , i32) - define @select_nxv16i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16i1: ; CHECK: # %bb.0: @@ -78,8 +68,6 @@ define @select_nxv16i1( %a, %v } -declare @llvm.vp.select.nxv32i1(, , , i32) - define @select_nxv32i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv32i1: ; CHECK: # %bb.0: @@ -92,8 +80,6 @@ define @select_nxv32i1( %a, %v } -declare @llvm.vp.select.nxv64i1(, , , i32) - define @select_nxv64i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv64i1: ; CHECK: # %bb.0: @@ -106,8 +92,6 @@ define @select_nxv64i1( %a, %v } -declare @llvm.vp.select.nxv8i7(, , , i32) - define @select_nxv8i7( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i7: ; CHECK: # %bb.0: @@ -118,8 +102,6 @@ define @select_nxv8i7( %a, ret %v } -declare @llvm.vp.select.nxv1i8(, , , i32) - define @select_nxv1i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1i8: ; CHECK: # %bb.0: @@ -130,8 +112,6 @@ define @select_nxv1i8( %a, ret %v } -declare @llvm.vp.select.nxv2i8(, , , i32) - define @select_nxv2i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2i8: ; CHECK: # %bb.0: @@ -142,8 +122,6 @@ define @select_nxv2i8( %a, ret %v } -declare @llvm.vp.select.nxv4i8(, , , i32) - define @select_nxv4i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4i8: ; CHECK: # %bb.0: @@ -154,8 +132,6 @@ define @select_nxv4i8( %a, ret %v } -declare @llvm.vp.select.nxv8i8(, , , i32) - define @select_nxv8i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i8: ; CHECK: # %bb.0: @@ -166,8 +142,6 @@ define @select_nxv8i8( %a, ret %v } -declare @llvm.vp.select.nxv14i8(, , , i32) - define @select_nxv14i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv14i8: ; CHECK: # %bb.0: @@ -178,8 +152,6 @@ define @select_nxv14i8( %a, %v } -declare @llvm.vp.select.nxv16i8(, , , i32) - define @select_nxv16i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16i8: ; CHECK: # %bb.0: @@ -190,8 +162,6 @@ define @select_nxv16i8( %a, %v } -declare @llvm.vp.select.nxv32i8(, , , i32) - define @select_nxv32i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv32i8: ; CHECK: # %bb.0: @@ -202,8 +172,6 @@ define @select_nxv32i8( %a, %v } -declare @llvm.vp.select.nxv64i8(, , , i32) - define @select_nxv64i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv64i8: ; CHECK: # %bb.0: @@ -214,8 +182,6 @@ define @select_nxv64i8( %a, %v } -declare @llvm.vp.select.nxv1i16(, , , i32) - define @select_nxv1i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1i16: ; CHECK: # %bb.0: @@ -226,8 +192,6 @@ define @select_nxv1i16( %a, %v } -declare @llvm.vp.select.nxv2i16(, , , i32) - define @select_nxv2i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2i16: ; CHECK: # %bb.0: @@ -238,8 +202,6 @@ define @select_nxv2i16( %a, %v } -declare @llvm.vp.select.nxv4i16(, , , i32) - define @select_nxv4i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4i16: ; CHECK: # %bb.0: @@ -250,8 +212,6 @@ define @select_nxv4i16( %a, %v } -declare @llvm.vp.select.nxv8i16(, , , i32) - define @select_nxv8i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i16: ; CHECK: # %bb.0: @@ -262,8 +222,6 @@ define @select_nxv8i16( %a, %v } -declare @llvm.vp.select.nxv16i16(, , , i32) - define @select_nxv16i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16i16: ; CHECK: # %bb.0: @@ -274,8 +232,6 @@ define @select_nxv16i16( %a, %v } -declare @llvm.vp.select.nxv32i16(, , , i32) - define @select_nxv32i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv32i16: ; CHECK: # %bb.0: @@ -286,8 +242,6 @@ define @select_nxv32i16( %a, %v } -declare @llvm.vp.select.nxv1i32(, , , i32) - define @select_nxv1i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1i32: ; CHECK: # %bb.0: @@ -298,8 +252,6 @@ define @select_nxv1i32( %a, %v } -declare @llvm.vp.select.nxv2i32(, , , i32) - define @select_nxv2i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2i32: ; CHECK: # %bb.0: @@ -310,8 +262,6 @@ define @select_nxv2i32( %a, %v } -declare @llvm.vp.select.nxv4i32(, , , i32) - define @select_nxv4i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4i32: ; CHECK: # %bb.0: @@ -322,8 +272,6 @@ define @select_nxv4i32( %a, %v } -declare @llvm.vp.select.nxv8i32(, , , i32) - define @select_nxv8i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i32: ; CHECK: # %bb.0: @@ -334,8 +282,6 @@ define @select_nxv8i32( %a, %v } -declare @llvm.vp.select.nxv16i32(, , , i32) - define @select_nxv16i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16i32: ; CHECK: # %bb.0: @@ -346,8 +292,6 @@ define @select_nxv16i32( %a, %v } -declare @llvm.vp.select.nxv32i32(, , , i32) - define @select_nxv32i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv32i32: ; CHECK: # %bb.0: @@ -395,8 +339,6 @@ define @select_nxv32i32( %a, %v } -declare i32 @llvm.vscale.i32() - define @select_evl_nxv32i32( %a, %b, %c) { ; RV32-LABEL: select_evl_nxv32i32: ; RV32: # %bb.0: @@ -467,8 +409,6 @@ define @select_evl_nxv32i32( %a, %v } -declare @llvm.vp.select.nxv1i64(, , , i32) - define @select_nxv1i64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1i64: ; CHECK: # %bb.0: @@ -479,8 +419,6 @@ define @select_nxv1i64( %a, %v } -declare @llvm.vp.select.nxv2i64(, , , i32) - define @select_nxv2i64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2i64: ; CHECK: # %bb.0: @@ -513,8 +451,6 @@ define @select_nxv2i64_constant_false( %a, < ret %v } -declare @llvm.vp.select.nxv4i64(, , , i32) - define @select_nxv4i64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4i64: ; CHECK: # %bb.0: @@ -525,8 +461,6 @@ define @select_nxv4i64( %a, %v } -declare @llvm.vp.select.nxv8i64(, , , i32) - define @select_nxv8i64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i64: ; CHECK: # %bb.0: @@ -537,8 +471,6 @@ define @select_nxv8i64( %a, %v } -declare @llvm.vp.select.nxv1f16(, , , i32) - define @select_nxv1f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1f16: ; CHECK: # %bb.0: @@ -549,8 +481,6 @@ define @select_nxv1f16( %a, %v } -declare @llvm.vp.select.nxv2f16(, , , i32) - define @select_nxv2f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2f16: ; CHECK: # %bb.0: @@ -561,8 +491,6 @@ define @select_nxv2f16( %a, %v } -declare @llvm.vp.select.nxv4f16(, , , i32) - define @select_nxv4f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4f16: ; CHECK: # %bb.0: @@ -573,8 +501,6 @@ define @select_nxv4f16( %a, %v } -declare @llvm.vp.select.nxv8f16(, , , i32) - define @select_nxv8f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8f16: ; CHECK: # %bb.0: @@ -585,8 +511,6 @@ define @select_nxv8f16( %a, %v } -declare @llvm.vp.select.nxv16f16(, , , i32) - define @select_nxv16f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16f16: ; CHECK: # %bb.0: @@ -597,8 +521,6 @@ define @select_nxv16f16( %a, %v } -declare @llvm.vp.select.nxv32f16(, , , i32) - define @select_nxv32f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv32f16: ; CHECK: # %bb.0: @@ -609,8 +531,6 @@ define @select_nxv32f16( %a, %v } -declare @llvm.vp.select.nxv1f32(, , , i32) - define @select_nxv1f32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1f32: ; CHECK: # %bb.0: @@ -621,8 +541,6 @@ define @select_nxv1f32( %a, %v } -declare @llvm.vp.select.nxv2f32(, , , i32) - define @select_nxv2f32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2f32: ; CHECK: # %bb.0: @@ -633,8 +551,6 @@ define @select_nxv2f32( %a, %v } -declare @llvm.vp.select.nxv4f32(, , , i32) - define @select_nxv4f32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4f32: ; CHECK: # %bb.0: @@ -645,8 +561,6 @@ define @select_nxv4f32( %a, %v } -declare @llvm.vp.select.nxv8f32(, , , i32) - define @select_nxv8f32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8f32: ; CHECK: # %bb.0: @@ -657,8 +571,6 @@ define @select_nxv8f32( %a, %v } -declare @llvm.vp.select.nxv16f32(, , , i32) - define @select_nxv16f32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16f32: ; CHECK: # %bb.0: @@ -669,8 +581,6 @@ define @select_nxv16f32( %a, %v } -declare @llvm.vp.select.nxv1f64(, , , i32) - define @select_nxv1f64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1f64: ; CHECK: # %bb.0: @@ -681,8 +591,6 @@ define @select_nxv1f64( %a, %v } -declare @llvm.vp.select.nxv2f64(, , , i32) - define @select_nxv2f64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2f64: ; CHECK: # %bb.0: @@ -693,8 +601,6 @@ define @select_nxv2f64( %a, %v } -declare @llvm.vp.select.nxv4f64(, , , i32) - define @select_nxv4f64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4f64: ; CHECK: # %bb.0: @@ -705,8 +611,6 @@ define @select_nxv4f64( %a, %v } -declare @llvm.vp.select.nxv8f64(, , , i32) - define @select_nxv8f64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8f64: ; CHECK: # %bb.0: @@ -717,8 +621,6 @@ define @select_nxv8f64( %a, %v } -declare @llvm.vp.select.nxv16f64(, , , i32) - define @select_nxv16f64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll index 27d76bf41912e..a76427dfa3e96 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll @@ -1,9 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare i64 @llvm.riscv.vsetvli( - i64, i64, i64); - define signext i32 @vsetvl_sext() { ; CHECK-LABEL: vsetvl_sext: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll index 4fed1c2cd0522..71710432b7e42 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll @@ -2,19 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+a,+c,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs -O0 < %s | FileCheck %s -declare i64 @llvm.riscv.vsetvli(i64, i64, i64) -declare i64 @llvm.riscv.vsetvlimax(i64, i64) -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( - , - , - , - i64, i64) -declare @llvm.riscv.vle.mask.nxv1i64( - , - ptr, - , - i64, i64) - define <2 x double> @fixed_length(<2 x double> %a, <2 x double> %b) nounwind { ; CHECK-LABEL: fixed_length: ; CHECK: # %bb.0: # %entry @@ -49,7 +36,6 @@ entry: ret %2 } - define @intrinsic_same_vlmax( %a, %b) nounwind { ; CHECK-LABEL: intrinsic_same_vlmax: ; CHECK: # %bb.0: # %entry @@ -77,7 +63,6 @@ entry: ret %2 } - define @intrinsic_same_avl_imm( %a, %b) nounwind { ; CHECK-LABEL: intrinsic_same_avl_imm: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll index 05b76ec7733bb..60a291402b551 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -5,21 +5,6 @@ ; The following tests check whether inserting VSETVLI avoids inserting ; unneeded vsetvlis across basic blocks. -declare i64 @llvm.riscv.vsetvli(i64, i64, i64) - -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64(, , , i64, i64) -declare @llvm.riscv.vfadd.nxv2f32.nxv2f32(, , , i64, i64) - -declare @llvm.riscv.vfsub.nxv1f64.nxv1f64(, , , i64, i64) - -declare @llvm.riscv.vfmul.nxv1f64.nxv1f64(, , , i64, i64) - -declare @llvm.riscv.vfmv.v.f.nxv1f64.f64(, double, i64) -declare @llvm.riscv.vfmv.v.f.nxv2f32.f32( , float, i64) - -declare void @llvm.riscv.vse.nxv1f64(, ptr nocapture, i64) -declare void @llvm.riscv.vse.nxv2f32(, ptr nocapture, i64) - define @test1(i64 %avl, i8 zeroext %cond, %a, %b) nounwind { ; CHECK-LABEL: test1: ; CHECK: # %bb.0: # %entry @@ -533,11 +518,6 @@ for.end: ; preds = %for.body, %entry ret void } -declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) -declare @llvm.riscv.vle.nxv16f32.i64(, ptr nocapture, i64) -declare @llvm.riscv.vfmacc.nxv16f32.f32.i64(, float, , i64, i64, i64) -declare void @llvm.riscv.vse.nxv16f32.i64(, ptr nocapture, i64) - ; We need a vsetvli in the last block because the predecessors have different ; VTYPEs. The AVL is the same and the SEW/LMUL ratio implies the same VLMAX so ; we don't need to read AVL and can keep VL unchanged. @@ -570,10 +550,6 @@ if.end: %e = call @llvm.riscv.vadd.nxv2i32( poison, %a, %d, i64 %vl) ret %e } -declare @llvm.riscv.vle.nxv2i32(, ptr, i64) -declare @llvm.riscv.vle.nxv2i16(, ptr, i64) -declare @llvm.riscv.vwadd.nxv2i32(, , i16, i64) -declare @llvm.riscv.vadd.nxv2i32(, , , i64) ; We can use X0, X0 vsetvli in if2 and if2.end. The merge point as if.end will ; see two different vtypes with the same SEW/LMUL ratio. At if2.end we will only @@ -625,7 +601,6 @@ if2.end: %h = call @llvm.riscv.vadd.nxv2i32( poison, %g, %w, i64 %vl) ret %h } -declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(, , , i64) ; We should only need 1 vsetvli for this code. define void @vlmax(i64 %N, ptr %c, ptr %a, ptr %b) { @@ -1018,18 +993,6 @@ exit: ret void } -declare i64 @llvm.riscv.vsetvlimax.i64(i64, i64) -declare @llvm.riscv.vle.nxv1f64.i64(, ptr nocapture, i64) -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(, , , i64, i64) -declare void @llvm.riscv.vse.nxv1f64.i64(, ptr nocapture, i64) -declare @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( - , - , - , - , - i64, - i64); - ; Normally a pseudo's AVL is already live in its block, so it will already be ; live where we're inserting the vsetvli, before the pseudo. In some cases the ; AVL can be from a predecessor block, so make sure we extend its live range diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir index e09fc1828fec5..a35100654432c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -46,8 +46,6 @@ ret void } - declare i64 @llvm.riscv.vmv.x.s.nxv1i64() #1 - define i64 @vmv_x_s(i8 zeroext %cond, %0, %1, i64 %2) #0 { entry: %tobool = icmp eq i8 %cond, 0 @@ -67,8 +65,6 @@ ret i64 %d } - declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #2 - define @vsetvli_add_or_sub(i8 zeroext %cond, %0, %1, i64 %avl) #0 { entry: %vl = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 3, i64 0) @@ -146,27 +142,7 @@ ret void } - declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) - - declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , , i64) #1 - - declare @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(, , , i64) #1 - - declare @llvm.riscv.vle.nxv1i64.i64(, ptr nocapture, i64) #3 - - declare @llvm.riscv.vle.nxv1i32.i64(, ptr nocapture, i64) #3 - - declare void @llvm.riscv.vse.nxv1i64.i64(, ptr nocapture, i64) #4 - - declare @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(, , i64) #1 - - declare @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(, , i64) #1 - attributes #0 = { "target-features"="+v" } - attributes #1 = { nounwind readnone } - attributes #2 = { nounwind } - attributes #3 = { nounwind readonly } - attributes #4 = { nounwind writeonly } ... --- diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll index 2293a1e6979f4..5b56bfc535b75 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -2,19 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+a,+c,+v -verify-machineinstrs -O2 < %s | FileCheck %s --check-prefixes=CHECK,NODEPVL ; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+a,+c,+v,+vl-dependent-latency -verify-machineinstrs -O2 < %s | FileCheck %s --check-prefixes=CHECK,DEPVL -declare i64 @llvm.riscv.vsetvli(i64, i64, i64) -declare i64 @llvm.riscv.vsetvlimax(i64, i64) -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( - , - , - , - i64, i64) -declare @llvm.riscv.vle.mask.nxv1i64( - , - ptr, - , - i64, i64) - define @test1(i64 %avl, %a, %b) nounwind { ; CHECK-LABEL: test1: ; CHECK: # %bb.0: # %entry @@ -95,8 +82,6 @@ entry: %b = call @llvm.riscv.vmand.nxv1i1.i64( %a, %2, i64 %vl) ret %b } -declare @llvm.riscv.vmseq.nxv1i64.i64(, , i64) -declare @llvm.riscv.vmand.nxv1i1.i64(, , i64) ; Make sure we don't insert a vsetvli for the vmor instruction. define void @test6(ptr nocapture readonly %A, ptr nocapture %B, i64 %n) { @@ -300,7 +285,6 @@ entry: ret %f2 } - @gdouble = external global double define @test16(i64 %avl, double %a, %b) nounwind { @@ -346,7 +330,6 @@ entry: ret double %c3 } - define @test18( %a, double %b) nounwind { ; CHECK-LABEL: test18: ; CHECK: # %bb.0: # %entry @@ -431,7 +414,6 @@ entry: ret i64 %vl } - ; %vl is intentionally used only once define void @avl_forward3( %v, ptr %p, i64 %reg) nounwind { ; CHECK-LABEL: avl_forward3: @@ -529,12 +511,6 @@ entry: ret %5 } -declare { , i64 } @llvm.riscv.vleff.nxv1i64.i64( - , ptr nocapture, i64) - -declare @llvm.riscv.vmseq.nxv1i64.i64.i64( - , i64, i64) - ; Ensure AVL register is alive when forwarding an AVL immediate that does not fit in 5 bits define @avl_forward5(ptr %addr) { ; CHECK-LABEL: avl_forward5: @@ -549,8 +525,6 @@ define @avl_forward5(ptr %addr) { ret %ret } -declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(, , , i64, i64) - define @test20(i64 %avl, %a, %b, %c) nounwind { ; CHECK-LABEL: test20: ; CHECK: # %bb.0: # %entry @@ -590,7 +564,6 @@ bb: ret i64 %tmp2 } - define void @add_v128i8(ptr %x, ptr %y) vscale_range(2,2) { ; CHECK-LABEL: add_v128i8: ; CHECK: # %bb.0: @@ -649,55 +622,6 @@ define dso_local @int_reduction_vmv_s_x(i32 signext %0, %6 } -declare @llvm.riscv.vfmv.s.f.nxv8f32.i64(, float, i64) -declare @llvm.vector.extract.nxv2f32.nxv8f32(, i64) -declare @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64(, , , i64, i64) - -declare @llvm.riscv.vmv.s.x.nxv8i32.i64(, i32, i64) #1 -declare @llvm.vector.extract.nxv2i32.nxv8i32(, i64 immarg) #2 -declare @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(, , , i64) #1 - -declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( - , - , - , - , - i64, - i64); - -declare @llvm.riscv.vadd.nxv1i64.i64.i64( - , - , - i64, - i64); - -declare @llvm.riscv.vfadd.mask.nxv1f64.f64( - , - , - , - , - i64, - i64, - i64); - -declare @llvm.riscv.vmv.s.x.nxv1i64( - , - i64, - i64); - -declare @llvm.riscv.vfmv.s.f.nxv1f64 - (, - double, - i64) - -declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) -declare @llvm.riscv.vle.nxv2i32.i64(, ptr nocapture, i64) -declare @llvm.riscv.vmslt.nxv2i32.i32.i64(, i32, i64) -declare @llvm.riscv.vmsgt.nxv2i32.i32.i64(, i32, i64) -declare @llvm.riscv.vmor.nxv2i1.i64(, , i64) -declare void @llvm.riscv.vse.mask.nxv2i32.i64(, ptr nocapture, , i64) -declare void @llvm.riscv.vse.nxv2i32.i64(, ptr nocapture, i64) - define @avl_undef1(, , ) { ; CHECK-LABEL: avl_undef1: ; CHECK: # %bb.0: @@ -814,7 +738,6 @@ entry: ret %2 } - define @vmv.v.x_vl1() nounwind { ; NODEPVL-LABEL: vmv.v.x_vl1: ; NODEPVL: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir index 396ca517e4017..6e6b708dad694 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir @@ -27,8 +27,6 @@ ret %b } - declare i64 @llvm.riscv.vmv.x.s.nxv1i64() #1 - define i64 @vmv_x_s( %0) #0 { entry: %a = call i64 @llvm.riscv.vmv.x.s.nxv1i64( %0) @@ -43,16 +41,12 @@ ret void } - declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) #2 - define i64 @vreduce_add_v2i64(ptr %x) #0 { %v = load <2 x i64>, ptr %x, align 16 %red = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %v) ret i64 %red } - declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #3 - define @vsetvli_add( %0, %1, i64 %avl) #0 { entry: %a = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 3, i64 0) @@ -112,19 +106,7 @@ ret void } - declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , , i64) #1 - - declare @llvm.riscv.vle.nxv1i64.i64(, ptr nocapture, i64) #4 - - declare @llvm.riscv.vle.nxv1i32.i64(, ptr nocapture, i64) #4 - - declare @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(, , i64) #1 - attributes #0 = { "target-features"="+v" } - attributes #1 = { nounwind readnone } - attributes #2 = { nofree nosync nounwind readnone willreturn } - attributes #3 = { nounwind } - attributes #4 = { nounwind readonly } ... --- diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll index a4d372d0b5a62..3e8c9006a9d2b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll @@ -10,9 +10,6 @@ ; RUN: -riscv-v-vector-bits-max=128 -verify-machineinstrs \ ; RUN: | FileCheck %s --check-prefixes=CHECK,VLEN128 -declare iXLen @llvm.riscv.vsetvli.iXLen(iXLen, iXLen, iXLen) -declare iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen, iXLen) - define iXLen @test_vsetvli_e8m1(iXLen %avl) nounwind { ; CHECK-LABEL: test_vsetvli_e8m1: ; CHECK: # %bb.0: @@ -102,8 +99,6 @@ define void @test_vsetvlimax_e32m2_nouse() nounwind { ret void } -declare @llvm.riscv.vle.nxv4i32.iXLen(, ptr, iXLen) - ; Check that we remove the redundant vsetvli when followed by another operation define @redundant_vsetvli(iXLen %avl, ptr %ptr) nounwind { ; CHECK-LABEL: redundant_vsetvli: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-valid-elen-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-valid-elen-fp.ll index f92f5e934f9f4..2000cd81157a9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-valid-elen-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-valid-elen-fp.ll @@ -31,9 +31,6 @@ entry: ret void } -declare @llvm.riscv.vle.nxv1i64.i64(, ptr nocapture, i64) -declare @llvm.riscv.vfmv.s.f.nxv4f16.i64(, half, i64) - define void @bar(half %y, ptr %i32p) { ; CHECK-NO-FELEN64-LABEL: bar: ; CHECK-NO-FELEN64: # %bb.0: # %entry @@ -61,4 +58,3 @@ entry: ret void } -declare @llvm.riscv.vle.nxv2i32.i64(, ptr nocapture, i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll index 09162b55c7079..c64201462ad11 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare i64 @llvm.riscv.vsetvlimax(i64, i64); - define signext i32 @vsetvlmax_sext() { ; CHECK-LABEL: vsetvlmax_sext: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll index 04aed5d81db99..43fde84600de7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v < %s | FileCheck %s -declare @llvm.vp.sext.nxv2i16.nxv2i1(, , i32) - define @vsext_nxv2i1_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i1_nxv2i16: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define @vsext_nxv2i1_nxv2i16_unmasked( %a, i ret %v } -declare @llvm.vp.sext.nxv2i32.nxv2i1(, , i32) - define @vsext_nxv2i1_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i1_nxv2i32: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define @vsext_nxv2i1_nxv2i32_unmasked( %a, i ret %v } -declare @llvm.vp.sext.nxv2i64.nxv2i1(, , i32) - define @vsext_nxv2i1_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i1_nxv2i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll index eec2a5f3efcfb..07411b1c7ae08 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v < %s | FileCheck %s -declare @llvm.vp.sext.nxv2i16.nxv2i8(, , i32) - define @vsext_nxv2i8_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i8_nxv2i16: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define @vsext_nxv2i8_nxv2i16_unmasked( %a, i ret %v } -declare @llvm.vp.sext.nxv2i32.nxv2i8(, , i32) - define @vsext_nxv2i8_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i8_nxv2i32: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define @vsext_nxv2i8_nxv2i32_unmasked( %a, i ret %v } -declare @llvm.vp.sext.nxv2i64.nxv2i8(, , i32) - define @vsext_nxv2i8_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i8_nxv2i64: ; CHECK: # %bb.0: @@ -74,8 +68,6 @@ define @vsext_nxv2i8_nxv2i64_unmasked( %a, i ret %v } -declare @llvm.vp.sext.nxv2i32.nxv2i16(, , i32) - define @vsext_nxv2i16_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i16_nxv2i32: ; CHECK: # %bb.0: @@ -98,8 +90,6 @@ define @vsext_nxv2i16_nxv2i32_unmasked( %a, ret %v } -declare @llvm.vp.sext.nxv2i64.nxv2i16(, , i32) - define @vsext_nxv2i16_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i16_nxv2i64: ; CHECK: # %bb.0: @@ -122,8 +112,6 @@ define @vsext_nxv2i16_nxv2i64_unmasked( %a, ret %v } -declare @llvm.vp.sext.nxv2i64.nxv2i32(, , i32) - define @vsext_nxv2i32_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i32_nxv2i64: ; CHECK: # %bb.0: @@ -146,8 +134,6 @@ define @vsext_nxv2i32_nxv2i64_unmasked( %a, ret %v } -declare @llvm.vp.sext.nxv32i32.nxv32i8(, , i32) - define @vsext_nxv32i8_nxv32i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv32i8_nxv32i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext.ll b/llvm/test/CodeGen/RISCV/rvv/vsext.ll index fdc394189bce5..d7ebc209bd870 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsext.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vsext.nxv1i64.nxv1i8( - , - , - iXLen); - define @intrinsic_vsext_vf8_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv1i64.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf8_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv2i64.nxv2i8( - , - , - iXLen); - define @intrinsic_vsext_vf8_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv2i64.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf8_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv4i64.nxv4i8( - , - , - iXLen); - define @intrinsic_vsext_vf8_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv4i64.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf8_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv8i64.nxv8i8( - , - , - iXLen); - define @intrinsic_vsext_vf8_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv8i64.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf8_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv1i64.nxv1i16( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv1i64.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv2i64.nxv2i16( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv2i64.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -268,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv4i64.nxv4i16( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -289,13 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv4i64.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -312,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv8i64.nxv8i16( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -333,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv8i64.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -356,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv1i32.nxv1i8( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -377,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv1i32.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -400,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv2i32.nxv2i8( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -421,13 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv2i32.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -444,11 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv4i32.nxv4i8( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -465,13 +340,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv4i32.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -488,11 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv8i32.nxv8i8( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -509,13 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv8i32.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -532,11 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv16i32.nxv16i8( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -553,13 +404,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv16i32.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -576,11 +420,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv1i64.nxv1i32( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -597,13 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -620,11 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv2i64.nxv2i32( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -641,13 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv2i64.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -664,11 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv4i64.nxv4i32( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -685,13 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv4i64.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -708,11 +516,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv8i64.nxv8i32( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -729,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv8i64.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -752,11 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv1i32.nxv1i16( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -773,13 +564,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv1i32.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -796,11 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv2i32.nxv2i16( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -817,13 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv2i32.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -840,11 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv4i32.nxv4i16( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -861,13 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv4i32.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -884,11 +644,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv8i32.nxv8i16( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -905,13 +660,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv8i32.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -928,11 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv16i32.nxv16i16( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -949,13 +692,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv16i32.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -972,11 +708,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv1i16.nxv1i8( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -993,13 +724,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1016,11 +740,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv2i16.nxv2i8( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1037,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv2i16.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1060,11 +772,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv4i16.nxv4i8( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1081,13 +788,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv4i16.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1104,11 +804,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv8i16.nxv8i8( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1125,13 +820,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv8i16.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1148,11 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv16i16.nxv16i8( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1169,13 +852,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv16i16.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1192,11 +868,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv32i16.nxv32i8( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1213,13 +884,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv32i16.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv32i16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll b/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll index 9674b78ab9fa0..a571ff577079f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll @@ -10,13 +10,6 @@ ; CHECK-ERROR: LLVM ERROR: SEW=64 needs Zvknhb to be enabled. -declare @llvm.riscv.vsha2ch.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ch_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -34,13 +27,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2ch.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ch_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -58,13 +44,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2ch.nxv16i32.nxv16i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ch_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -83,13 +62,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2ch.nxv4i64.nxv4i64( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ch_vv_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -107,13 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2ch.nxv8i64.nxv8i64( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ch_vv_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll b/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll index b45a768b9ce22..b9ae90b8efd42 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll @@ -10,13 +10,6 @@ ; CHECK-ERROR: LLVM ERROR: SEW=64 needs Zvknhb to be enabled. -declare @llvm.riscv.vsha2cl.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2cl_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -34,13 +27,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2cl.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2cl_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -58,13 +44,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2cl.nxv16i32.nxv16i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2cl_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -83,13 +62,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2cl.nxv4i64.nxv4i64( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2cl_vv_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -107,13 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2cl.nxv8i64.nxv8i64( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2cl_vv_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll b/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll index ff51acc2f19aa..861211194b588 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll @@ -10,13 +10,6 @@ ; CHECK-ERROR: LLVM ERROR: SEW=64 needs Zvknhb to be enabled. -declare @llvm.riscv.vsha2ms.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ms_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -34,13 +27,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2ms.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ms_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -58,13 +44,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2ms.nxv16i32.nxv16i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ms_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -83,13 +62,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2ms.nxv4i64.nxv4i64( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ms_vv_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -107,13 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2ms.nxv8i64.nxv8i64( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ms_vv_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll index f5c46aec86b86..b335974a7b9f8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.shl.nxv8i7(, , , i32) - define @vsll_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv8i7: ; CHECK: # %bb.0: @@ -21,8 +19,6 @@ define @vsll_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.shl.nxv1i8(, , , i32) - define @vsll_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv1i8: ; CHECK: # %bb.0: @@ -87,8 +83,6 @@ define @vsll_vi_nxv1i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.shl.nxv2i8(, , , i32) - define @vsll_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv2i8: ; CHECK: # %bb.0: @@ -153,8 +147,6 @@ define @vsll_vi_nxv2i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.shl.nxv4i8(, , , i32) - define @vsll_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv4i8: ; CHECK: # %bb.0: @@ -219,8 +211,6 @@ define @vsll_vi_nxv4i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.shl.nxv5i8(, , , i32) - define @vsll_vv_nxv5i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv5i8: ; CHECK: # %bb.0: @@ -231,8 +221,6 @@ define @vsll_vv_nxv5i8( %va, %v } -declare @llvm.vp.shl.nxv8i8(, , , i32) - define @vsll_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv8i8: ; CHECK: # %bb.0: @@ -297,8 +285,6 @@ define @vsll_vi_nxv8i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.shl.nxv16i8(, , , i32) - define @vsll_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv16i8: ; CHECK: # %bb.0: @@ -363,8 +349,6 @@ define @vsll_vi_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv32i8(, , , i32) - define @vsll_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv32i8: ; CHECK: # %bb.0: @@ -429,8 +413,6 @@ define @vsll_vi_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv64i8(, , , i32) - define @vsll_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv64i8: ; CHECK: # %bb.0: @@ -495,8 +477,6 @@ define @vsll_vi_nxv64i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv1i16(, , , i32) - define @vsll_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv1i16: ; CHECK: # %bb.0: @@ -561,8 +541,6 @@ define @vsll_vi_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv2i16(, , , i32) - define @vsll_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv2i16: ; CHECK: # %bb.0: @@ -627,8 +605,6 @@ define @vsll_vi_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv4i16(, , , i32) - define @vsll_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv4i16: ; CHECK: # %bb.0: @@ -693,8 +669,6 @@ define @vsll_vi_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv8i16(, , , i32) - define @vsll_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv8i16: ; CHECK: # %bb.0: @@ -759,8 +733,6 @@ define @vsll_vi_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv16i16(, , , i32) - define @vsll_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv16i16: ; CHECK: # %bb.0: @@ -825,8 +797,6 @@ define @vsll_vi_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.shl.nxv32i16(, , , i32) - define @vsll_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv32i16: ; CHECK: # %bb.0: @@ -891,8 +861,6 @@ define @vsll_vi_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.shl.nxv1i32(, , , i32) - define @vsll_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv1i32: ; CHECK: # %bb.0: @@ -957,8 +925,6 @@ define @vsll_vi_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv2i32(, , , i32) - define @vsll_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1023,8 +989,6 @@ define @vsll_vi_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv4i32(, , , i32) - define @vsll_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1089,8 +1053,6 @@ define @vsll_vi_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv8i32(, , , i32) - define @vsll_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1155,8 +1117,6 @@ define @vsll_vi_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv16i32(, , , i32) - define @vsll_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1221,8 +1181,6 @@ define @vsll_vi_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.shl.nxv1i64(, , , i32) - define @vsll_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1299,8 +1257,6 @@ define @vsll_vi_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv2i64(, , , i32) - define @vsll_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1377,8 +1333,6 @@ define @vsll_vi_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv4i64(, , , i32) - define @vsll_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1455,8 +1409,6 @@ define @vsll_vi_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv8i64(, , , i32) - define @vsll_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll index ec16e58f6e57d..3de3447f009ec 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll @@ -31,8 +31,6 @@ define @vsitofp_nxv2bf16_nxv2i1_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2f16.nxv2i1(, , i32) - define @vsitofp_nxv2f16_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f16_nxv2i1: ; CHECK: # %bb.0: @@ -58,8 +56,6 @@ define @vsitofp_nxv2f16_nxv2i1_unmasked( %v ret %v } -declare @llvm.vp.sitofp.nxv2f32.nxv2i1(, , i32) - define @vsitofp_nxv2f32_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i1: ; CHECK: # %bb.0: @@ -85,8 +81,6 @@ define @vsitofp_nxv2f32_nxv2i1_unmasked( % ret %v } -declare @llvm.vp.sitofp.nxv2f64.nxv2i1(, , i32) - define @vsitofp_nxv2f64_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll index f69ae3d560ef7..7f96da141c363 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll @@ -20,8 +20,6 @@ define @vsitofp_nxv2bf16_nxv2i7( %va, %v } -declare @llvm.vp.sitofp.nxv2bf16.nxv2i8(, , i32) - define @vsitofp_nxv2bf16_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i8: ; CHECK: # %bb.0: @@ -48,8 +46,6 @@ define @vsitofp_nxv2bf16_nxv2i8_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2bf16.nxv2i16(, , i32) - define @vsitofp_nxv2bf16_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i16: ; CHECK: # %bb.0: @@ -74,8 +70,6 @@ define @vsitofp_nxv2bf16_nxv2i16_unmasked( %v } -declare @llvm.vp.sitofp.nxv2bf16.nxv2i32(, , i32) - define @vsitofp_nxv2bf16_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: @@ -100,8 +94,6 @@ define @vsitofp_nxv2bf16_nxv2i32_unmasked( %v } -declare @llvm.vp.sitofp.nxv2bf16.nxv2i64(, , i32) - define @vsitofp_nxv2bf16_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: @@ -126,8 +118,6 @@ define @vsitofp_nxv2bf16_nxv2i64_unmasked( %v } -declare @llvm.vp.sitofp.nxv2f16.nxv2i7(, , i32) - define @vsitofp_nxv2f16_nxv2i7( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_nxv2f16_nxv2i7: ; ZVFH: # %bb.0: @@ -152,8 +142,6 @@ define @vsitofp_nxv2f16_nxv2i7( %va, %v } -declare @llvm.vp.sitofp.nxv2f16.nxv2i8(, , i32) - define @vsitofp_nxv2f16_nxv2i8( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_nxv2f16_nxv2i8: ; ZVFH: # %bb.0: @@ -194,8 +182,6 @@ define @vsitofp_nxv2f16_nxv2i8_unmasked( %v ret %v } -declare @llvm.vp.sitofp.nxv2f16.nxv2i16(, , i32) - define @vsitofp_nxv2f16_nxv2i16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_nxv2f16_nxv2i16: ; ZVFH: # %bb.0: @@ -232,8 +218,6 @@ define @vsitofp_nxv2f16_nxv2i16_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2f16.nxv2i32(, , i32) - define @vsitofp_nxv2f16_nxv2i32( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_nxv2f16_nxv2i32: ; ZVFH: # %bb.0: @@ -272,8 +256,6 @@ define @vsitofp_nxv2f16_nxv2i32_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2f16.nxv2i64(, , i32) - define @vsitofp_nxv2f16_nxv2i64( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_nxv2f16_nxv2i64: ; ZVFH: # %bb.0: @@ -314,8 +296,6 @@ define @vsitofp_nxv2f16_nxv2i64_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2f32.nxv2i8(, , i32) - define @vsitofp_nxv2f32_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i8: ; CHECK: # %bb.0: @@ -338,8 +318,6 @@ define @vsitofp_nxv2f32_nxv2i8_unmasked( % ret %v } -declare @llvm.vp.sitofp.nxv2f32.nxv2i16(, , i32) - define @vsitofp_nxv2f32_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i16: ; CHECK: # %bb.0: @@ -362,8 +340,6 @@ define @vsitofp_nxv2f32_nxv2i16_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2f32.nxv2i32(, , i32) - define @vsitofp_nxv2f32_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i32: ; CHECK: # %bb.0: @@ -384,8 +360,6 @@ define @vsitofp_nxv2f32_nxv2i32_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2f32.nxv2i64(, , i32) - define @vsitofp_nxv2f32_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i64: ; CHECK: # %bb.0: @@ -408,8 +382,6 @@ define @vsitofp_nxv2f32_nxv2i64_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2f64.nxv2i8(, , i32) - define @vsitofp_nxv2f64_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i8: ; CHECK: # %bb.0: @@ -432,8 +404,6 @@ define @vsitofp_nxv2f64_nxv2i8_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2f64.nxv2i16(, , i32) - define @vsitofp_nxv2f64_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i16: ; CHECK: # %bb.0: @@ -456,8 +426,6 @@ define @vsitofp_nxv2f64_nxv2i16_unmasked( %v } -declare @llvm.vp.sitofp.nxv2f64.nxv2i32(, , i32) - define @vsitofp_nxv2f64_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i32: ; CHECK: # %bb.0: @@ -480,8 +448,6 @@ define @vsitofp_nxv2f64_nxv2i32_unmasked( %v } -declare @llvm.vp.sitofp.nxv2f64.nxv2i64(, , i32) - define @vsitofp_nxv2f64_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i64: ; CHECK: # %bb.0: @@ -502,8 +468,6 @@ define @vsitofp_nxv2f64_nxv2i64_unmasked( %v } -declare @llvm.vp.sitofp.nxv32f16.nxv32i32(, , i32) - define @vsitofp_nxv32f16_nxv32i32( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_nxv32f16_nxv32i32: ; ZVFH: # %bb.0: @@ -560,8 +524,6 @@ define @vsitofp_nxv32f16_nxv32i32( %va, ret %v } -declare @llvm.vp.sitofp.nxv32f32.nxv32i32(, , i32) - define @vsitofp_nxv32f32_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv32f32_nxv32i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll index dd1d2df1236ff..f6580164a2203 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll @@ -11,12 +11,6 @@ ; RUN: -mattr=+zve64x,+zvl64b -verify-machineinstrs \ ; RUN: < %s | FileCheck %s --check-prefixes=CHECK,CHECK-64 -declare @llvm.riscv.vslide1down.nxv1i64.i64( - , - , - i64, - i32) - define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl1( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down.ll index 0d8a4e827530f..8f5c8efd2d070 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vslide1down.nxv1i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv2i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv4i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv8i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv16i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv32i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv64i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -333,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv1i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -355,14 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -380,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv2i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -402,14 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -427,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv4i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -449,14 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -474,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv8i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -496,14 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -521,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv16i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -543,14 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -568,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv32i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -590,14 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -615,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv1i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -637,14 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -662,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv2i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -684,14 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -709,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv4i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -731,14 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -756,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv8i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -778,14 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -803,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv16i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -825,14 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -850,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -881,14 +623,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -917,12 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -984,12 +704,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1015,14 +729,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1051,12 +757,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -1082,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll index 161d3dd021600..c389db39b9f07 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll @@ -11,12 +11,6 @@ ; RUN: -mattr=+zve64x,+zvl64b -verify-machineinstrs \ ; RUN: < %s | FileCheck %s --check-prefixes=CHECK,CHECK-64 -declare @llvm.riscv.vslide1up.nxv1i64.i64( - , - , - i64, - i32) - define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl1( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up.ll index df19707180a82..d30110d43a53d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vslide1up.nxv1i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv2i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv4i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv8i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv16i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv32i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv64i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv1i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv2i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv4i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv8i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -507,14 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -532,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv16i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -555,14 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -580,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv32i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -603,14 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -628,12 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv1i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -651,14 +463,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -676,12 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv2i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -699,14 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -724,12 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv4i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -747,14 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -772,12 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv8i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -795,14 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -820,12 +582,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv16i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -843,14 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -868,12 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -936,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -968,14 +696,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1004,12 +724,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1036,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1072,12 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -1104,14 +804,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown.ll index 22a90cc2c94ac..40d68e6d76727 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslidedown.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vslidedown.nxv1i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv1i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -86,13 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv2i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv2i8_nxv2i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -110,13 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv2i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv4i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv4i8_nxv4i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv4i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -250,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv8i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -274,13 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv8i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -332,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv16i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -356,13 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv16i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -414,13 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv32i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -438,13 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv32i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -496,13 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv1i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv1i16_nxv1i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -520,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv1i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -578,13 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv2i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv2i16_nxv2i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -602,13 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv2i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -660,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv4i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -684,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv4i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -742,13 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv8i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -766,13 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv8i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -824,13 +684,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv16i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -848,13 +701,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv16i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -906,13 +752,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv1i32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv1i32_nxv1i32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -930,13 +769,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv1i32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -988,13 +820,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv2i32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1012,13 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv2i32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1070,13 +888,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv4i32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1094,13 +905,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv4i32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1152,13 +956,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv8i32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1176,13 +973,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv8i32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1234,13 +1024,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv1i64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1258,13 +1041,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv1i64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1316,13 +1092,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv2i64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1340,13 +1109,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv2i64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1398,13 +1160,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv4i64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1422,13 +1177,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv4i64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1480,13 +1228,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv1f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv1f16_nxv1f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1504,13 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv1f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1562,13 +1296,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv2f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv2f16_nxv2f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1586,13 +1313,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv2f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1644,13 +1364,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv4f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv4f16_nxv4f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1668,13 +1381,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv4f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1726,13 +1432,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv8f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv8f16_nxv8f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1750,13 +1449,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv8f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1808,13 +1500,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv16f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv16f16_nxv16f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1832,13 +1517,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv16f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1890,13 +1568,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv1f32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv1f32_nxv1f32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1914,13 +1585,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv1f32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1972,13 +1636,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv2f32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv2f32_nxv2f32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -1996,13 +1653,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv2f32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -2054,13 +1704,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv4f32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv4f32_nxv4f32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -2078,13 +1721,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv4f32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -2136,13 +1772,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv8f32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv8f32_nxv8f32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -2160,13 +1789,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv8f32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -2218,13 +1840,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv1f64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv1f64_nxv1f64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -2242,13 +1857,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv1f64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -2300,13 +1908,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv2f64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv2f64_nxv2f64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -2324,13 +1925,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv2f64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -2382,13 +1976,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv4f64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv4f64_nxv4f64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -2406,13 +1993,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv4f64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vslideup.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup.ll index 0291207ccdb49..a2595e8652fd5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslideup.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslideup.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vslideup.nxv1i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -86,13 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv2i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv2i8_nxv2i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -110,13 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv2i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv4i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv4i8_nxv4i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv4i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -250,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv8i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -274,13 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv8i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -332,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv16i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -356,13 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv16i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -414,13 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv32i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -438,13 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv32i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -496,13 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv1i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv1i16_nxv1i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -520,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -578,13 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv2i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv2i16_nxv2i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -602,13 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv2i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -660,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv4i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -684,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv4i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -742,13 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv8i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -766,13 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv8i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -824,13 +684,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv16i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -848,13 +701,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv16i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -906,13 +752,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv1i32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv1i32_nxv1i32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -930,13 +769,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1i32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -988,13 +820,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv2i32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1012,13 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv2i32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1070,13 +888,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv4i32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1094,13 +905,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv4i32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1152,13 +956,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv8i32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1176,13 +973,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv8i32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1234,13 +1024,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv1i64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1258,13 +1041,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1i64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1316,13 +1092,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv2i64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1340,13 +1109,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv2i64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1398,13 +1160,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv4i64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1422,13 +1177,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv4i64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1480,13 +1228,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv1f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv1f16_nxv1f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1504,13 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1562,13 +1296,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv2f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv2f16_nxv2f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1586,13 +1313,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv2f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1644,13 +1364,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv4f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv4f16_nxv4f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1668,13 +1381,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv4f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1726,13 +1432,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv8f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv8f16_nxv8f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1750,13 +1449,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv8f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1808,13 +1500,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv16f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv16f16_nxv16f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1832,13 +1517,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv16f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1890,13 +1568,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv1f32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv1f32_nxv1f32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1914,13 +1585,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1f32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1972,13 +1636,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv2f32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv2f32_nxv2f32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -1996,13 +1653,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv2f32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -2054,13 +1704,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv4f32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv4f32_nxv4f32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -2078,13 +1721,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv4f32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -2136,13 +1772,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv8f32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv8f32_nxv8f32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -2160,13 +1789,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv8f32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -2218,13 +1840,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv1f64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv1f64_nxv1f64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -2242,13 +1857,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1f64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -2300,13 +1908,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv2f64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv2f64_nxv2f64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -2324,13 +1925,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv2f64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -2382,13 +1976,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv4f64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv4f64_nxv4f64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -2406,13 +1993,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv4f64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll.ll b/llvm/test/CodeGen/RISCV/rvv/vsll.ll index 90fbfc9a1557b..7d899dcd0ba4a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsll.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vsll.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv1i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv1i8_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv2i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv2i8_nxv2i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv2i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv4i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv4i8_nxv4i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv4i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv8i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv8i8_nxv8i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv8i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv16i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv16i8_nxv16i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv16i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv32i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv32i8_nxv32i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv32i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv64i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv64i8_nxv64i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv64i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv1i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv1i16_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv2i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv2i16_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv2i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv4i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv4i16_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv4i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv8i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv8i16_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv8i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv16i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv16i16_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv16i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv32i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv32i16_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv32i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv1i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv1i32_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv2i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv2i32_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv2i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv4i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv4i32_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv4i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv8i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv8i32_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv8i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv16i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv16i32_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv16i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv1i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv1i64_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1910,14 +1344,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1935,12 +1361,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv2i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv2i64_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1957,14 +1377,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv2i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1394,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv4i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv4i64_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2004,14 +1410,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv4i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2029,12 +1427,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv8i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv8i64_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2051,14 +1443,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv8i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm.ll b/llvm/test/CodeGen/RISCV/rvv/vsm.ll index 1f5341e2a332a..5aae41c2e437f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsm.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare void @llvm.riscv.vsm.nxv1i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv1i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -17,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv2i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv2i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -30,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv4i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv4i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -43,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv8i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv8i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -56,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv16i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv16i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -69,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv32i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv32i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -82,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv64i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv64i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -95,11 +81,6 @@ entry: ret void } -declare @llvm.riscv.vmseq.nxv1i16( - , - , - iXLen); - ; Make sure we can use the vsetvli from the producing instruction. define void @test_vsetvli_i16( %0, %1, ptr %2, iXLen %3) nounwind { ; CHECK-LABEL: test_vsetvli_i16: @@ -117,11 +98,6 @@ entry: ret void } -declare @llvm.riscv.vmseq.nxv1i32( - , - , - iXLen); - define void @test_vsetvli_i32( %0, %1, ptr %2, iXLen %3) nounwind { ; CHECK-LABEL: test_vsetvli_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll b/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll index 17d59682c104f..fa8075e7b967a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvksh \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vsm3c.nxv8i32.i32( - , - , - iXLen, - iXLen, - iXLen) - define @intrinsic_vsm3c_vi_nxv8i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm3c_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vsm3c.nxv16i32.i32( - , - , - iXLen, - iXLen, - iXLen) - define @intrinsic_vsm3c_vi_nxv16i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm3c_vi_nxv16i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll b/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll index 313482f8c6229..418d23e1c91cf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvksh \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vsm3me.nxv8i32.nxv8i32( - , - , - , - iXLen) - define @intrinsic_vsm3me_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm3me_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -26,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsm3me.nxv16i32.nxv16i32( - , - , - , - iXLen) - define @intrinsic_vsm3me_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm3me_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll b/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll index 34ee021715f89..a3abf6af0bde8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvksed \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vsm4k.nxv4i32.i32( - , - , - iXLen, - iXLen) - define @intrinsic_vsm4k_vi_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsm4k_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -26,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsm4k.nxv8i32.i32( - , - , - iXLen, - iXLen) - define @intrinsic_vsm4k_vi_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsm4k_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -48,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vsm4k.nxv16i32.i32( - , - , - iXLen, - iXLen) - define @intrinsic_vsm4k_vi_nxv16i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsm4k_vi_nxv16i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll b/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll index bcea335deefa7..007421ed87746 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvksed \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vsm4r.vv.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vsm4r_vv_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm4r_vv_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vsm4r.vv.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vsm4r_vv_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm4r_vv_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vsm4r.vv.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vsm4r_vv_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm4r_vv_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vsm4r.vs.nxv4i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vsm4r_vs_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm4r_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vsm4r.vs.nxv8i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vsm4r_vs_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm4r_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vsm4r.vs.nxv16i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vsm4r_vs_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm4r_vs_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul.ll index 0b56a54f08a8c..0606823162521 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul.ll @@ -8,12 +8,6 @@ ; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vsmul -declare @llvm.riscv.vsmul.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -31,13 +25,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -56,12 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv2i8.nxv2i8( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -79,13 +60,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv4i8.nxv4i8( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -127,13 +95,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -152,12 +113,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -175,13 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -200,12 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv16i8.nxv16i8( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -223,13 +165,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -248,12 +183,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv32i8.nxv32i8( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -271,13 +200,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -296,12 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv64i8.nxv64i8( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -319,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -345,12 +254,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i16.nxv1i16( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -368,13 +271,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -393,12 +289,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv2i16.nxv2i16( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -416,13 +306,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -441,12 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv4i16.nxv4i16( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,13 +341,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +359,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv8i16.nxv8i16( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -512,13 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -537,12 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv16i16.nxv16i16( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -560,13 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -585,12 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv32i16.nxv32i16( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -608,13 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -634,12 +465,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i32.nxv1i32( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -657,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -682,12 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv2i32.nxv2i32( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -705,13 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -730,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv4i32.nxv4i32( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -753,13 +552,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -778,12 +570,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv8i32.nxv8i32( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -801,13 +587,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -826,12 +605,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv16i32.nxv16i32( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -849,13 +622,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -875,12 +641,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i64.nxv1i64( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -898,13 +658,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -923,12 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv2i64.nxv2i64( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -971,12 +711,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv4i64.nxv4i64( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,13 +728,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1019,12 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv8i64.nxv8i64( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1068,12 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i8.i8( - , - , - i8, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1091,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1116,12 +817,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv2i8.i8( - , - , - i8, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1139,13 +834,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1164,12 +852,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv4i8.i8( - , - , - i8, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1187,13 +869,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1212,12 +887,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv8i8.i8( - , - , - i8, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1235,13 +904,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1260,12 +922,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv16i8.i8( - , - , - i8, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1283,13 +939,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1308,12 +957,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv32i8.i8( - , - , - i8, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1331,13 +974,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1356,12 +992,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv64i8.i8( - , - , - i8, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1379,13 +1009,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1404,12 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i16.i16( - , - , - i16, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1427,13 +1044,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1452,12 +1062,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv2i16.i16( - , - , - i16, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1475,13 +1079,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1500,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv4i16.i16( - , - , - i16, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1523,13 +1114,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1548,12 +1132,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv8i16.i16( - , - , - i16, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1571,13 +1149,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1596,12 +1167,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv16i16.i16( - , - , - i16, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1619,13 +1184,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1644,12 +1202,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv32i16.i16( - , - , - i16, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1667,13 +1219,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1692,12 +1237,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i32.i32( - , - , - i32, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1715,13 +1254,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1740,12 +1272,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv2i32.i32( - , - , - i32, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1763,13 +1289,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1788,12 +1307,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv4i32.i32( - , - , - i32, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1811,13 +1324,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1836,12 +1342,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv8i32.i32( - , - , - i32, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1859,13 +1359,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1884,12 +1377,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv16i32.i32( - , - , - i32, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1907,13 +1394,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1932,12 +1412,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i64.i64( - , - , - i64, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1968,13 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -2006,12 +1473,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv2i64.i64( - , - , - i64, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2042,13 +1503,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2080,12 +1534,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv4i64.i64( - , - , - i64, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2116,13 +1564,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2154,12 +1595,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv8i64.i64( - , - , - i64, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2190,13 +1625,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll index f3ad06529210a..949d9be1ce176 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll @@ -4,12 +4,6 @@ ; The intrinsics are not supported with RV32. -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1bf16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1bf16_nxv1bf16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1bf16_nxv1bf16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1bf16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -970,12 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2bf16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2bf16_nxv2bf16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2bf16_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -992,13 +713,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2bf16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1016,12 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4bf16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4bf16_nxv4bf16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4bf16_nxv4bf16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1038,13 +746,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4bf16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1062,12 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8bf16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8bf16_nxv8bf16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8bf16_nxv8bf16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1084,13 +779,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8bf16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1108,12 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1130,13 +812,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1176,13 +845,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1200,12 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1222,13 +878,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1246,12 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1268,13 +911,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1292,12 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1314,13 +944,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1338,12 +961,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1360,13 +977,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1384,12 +994,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1406,13 +1010,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1430,12 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1452,13 +1043,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll index 89222711d4d91..a559c8bcd705a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -970,12 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -992,13 +713,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1016,12 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1038,13 +746,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1062,12 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1084,13 +779,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1108,12 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1130,13 +812,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1154,12 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1176,13 +845,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1200,12 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1222,13 +878,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1246,12 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1268,13 +911,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1292,12 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1314,13 +944,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1338,12 +961,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1360,13 +977,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1384,12 +994,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1406,13 +1010,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1430,12 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1452,13 +1043,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1476,12 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1498,13 +1076,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1522,12 +1093,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1544,13 +1109,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1568,12 +1126,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1590,13 +1142,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1614,12 +1159,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1636,13 +1175,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1660,12 +1192,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1682,13 +1208,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1706,12 +1225,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1728,13 +1241,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1752,12 +1258,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1774,13 +1274,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1798,12 +1291,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1820,13 +1307,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1844,12 +1324,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1866,13 +1340,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1890,12 +1357,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1912,13 +1373,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1936,12 +1390,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1958,13 +1406,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1423,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2004,13 +1439,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2028,12 +1456,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2050,13 +1472,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2074,12 +1489,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2096,13 +1505,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2120,12 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2142,13 +1538,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2166,12 +1555,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2188,13 +1571,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2212,12 +1588,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2234,13 +1604,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2258,12 +1621,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2280,13 +1637,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2304,12 +1654,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2326,13 +1670,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2350,12 +1687,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2372,13 +1703,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2396,12 +1720,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2418,13 +1736,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2442,12 +1753,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2464,13 +1769,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2488,12 +1786,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2510,13 +1802,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2534,12 +1819,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2556,13 +1835,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2580,12 +1852,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2602,13 +1868,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2626,12 +1885,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2648,13 +1901,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2672,12 +1918,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2694,13 +1934,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2718,12 +1951,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2740,13 +1967,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2764,12 +1984,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2786,13 +2000,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2810,12 +2017,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2832,13 +2033,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2856,12 +2050,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2878,13 +2066,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2902,12 +2083,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2924,13 +2099,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2948,12 +2116,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2970,13 +2132,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2994,12 +2149,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3016,13 +2165,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3040,12 +2182,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3062,13 +2198,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3086,12 +2215,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3108,13 +2231,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3132,12 +2248,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3154,13 +2264,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3178,12 +2281,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3200,13 +2297,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3224,12 +2314,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3246,13 +2330,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3270,12 +2347,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3292,13 +2363,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3316,12 +2380,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3338,13 +2396,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3362,12 +2413,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3384,13 +2429,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3408,12 +2446,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3430,13 +2462,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3454,12 +2479,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv64i8.nxv64i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3476,13 +2495,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3500,12 +2512,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3522,13 +2528,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3546,12 +2545,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3568,13 +2561,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3592,12 +2578,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3614,13 +2594,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3638,12 +2611,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3660,13 +2627,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3684,12 +2644,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3706,13 +2660,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3730,12 +2677,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3752,13 +2693,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3776,12 +2710,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3798,13 +2726,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3822,12 +2743,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3844,13 +2759,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3868,12 +2776,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3890,13 +2792,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3914,12 +2809,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3936,13 +2825,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3960,12 +2842,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3982,13 +2858,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4006,12 +2875,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4028,13 +2891,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4052,12 +2908,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4074,13 +2924,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4098,12 +2941,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4120,13 +2957,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4144,12 +2974,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4166,13 +2990,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4190,12 +3007,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4212,13 +3023,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4236,12 +3040,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4258,13 +3056,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4282,12 +3073,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4304,13 +3089,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4328,12 +3106,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4350,13 +3122,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4374,12 +3139,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4396,13 +3155,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4420,12 +3172,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4442,13 +3188,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4466,12 +3205,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1bf16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1bf16_nxv1bf16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1bf16_nxv1bf16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4488,13 +3221,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1bf16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4512,12 +3238,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2bf16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2bf16_nxv2bf16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2bf16_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4534,13 +3254,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2bf16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4558,12 +3271,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4bf16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4bf16_nxv4bf16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4bf16_nxv4bf16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4580,13 +3287,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4bf16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4604,12 +3304,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8bf16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8bf16_nxv8bf16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8bf16_nxv8bf16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4626,13 +3320,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8bf16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4650,12 +3337,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16bf16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16bf16_nxv16bf16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16bf16_nxv16bf16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -4672,13 +3353,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16bf16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -4696,12 +3370,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4718,13 +3386,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4742,12 +3403,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4764,13 +3419,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4788,12 +3436,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4810,13 +3452,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4834,12 +3469,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4856,13 +3485,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4880,12 +3502,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4902,13 +3518,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4926,12 +3535,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4948,13 +3551,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4972,12 +3568,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4994,13 +3584,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5018,12 +3601,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5040,13 +3617,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5064,12 +3634,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5086,13 +3650,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll index 6b54ce4974f34..9890fc20021e2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,9 +24,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -52,9 +46,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -77,9 +68,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -102,9 +90,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -127,9 +112,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -152,9 +134,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -177,9 +156,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -202,9 +178,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -227,9 +200,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -252,9 +222,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -277,9 +244,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -302,9 +266,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -327,9 +288,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -352,9 +310,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -377,9 +332,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -402,9 +354,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -427,9 +376,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -452,9 +398,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -477,9 +420,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -502,9 +442,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -527,9 +464,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -552,9 +486,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -577,9 +508,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -602,9 +530,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -627,9 +552,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -652,9 +574,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -677,9 +596,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -702,9 +618,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -727,9 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -752,9 +662,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -777,9 +684,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -802,9 +706,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -827,9 +728,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -852,9 +750,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -877,9 +772,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -902,9 +794,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -927,9 +816,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -952,9 +838,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -977,9 +860,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1002,9 +882,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1027,9 +904,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1052,9 +926,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1077,9 +948,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1102,9 +970,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1127,9 +992,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1152,9 +1014,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1177,9 +1036,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1202,9 +1058,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1227,9 +1080,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1252,9 +1102,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1277,9 +1124,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1302,9 +1146,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1327,9 +1168,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1352,9 +1190,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1377,9 +1212,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1402,9 +1234,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1427,9 +1256,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1452,9 +1278,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1477,9 +1300,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1502,9 +1322,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1527,9 +1344,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1552,9 +1366,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1577,9 +1388,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1602,9 +1410,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1627,9 +1432,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1652,9 +1454,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1677,9 +1476,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1702,9 +1498,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1727,9 +1520,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1752,9 +1542,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1777,9 +1564,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1802,9 +1586,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1827,9 +1608,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1852,9 +1630,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1877,9 +1652,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1902,9 +1674,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1927,9 +1696,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1952,9 +1718,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1977,9 +1740,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2002,9 +1762,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2027,9 +1784,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2052,9 +1806,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2077,9 +1828,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2102,9 +1850,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2127,9 +1872,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2152,9 +1894,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2177,9 +1916,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2202,9 +1938,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2227,9 +1960,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2252,9 +1982,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2277,9 +2004,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2302,9 +2026,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2327,9 +2048,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2352,9 +2070,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2377,9 +2092,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2402,9 +2114,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2427,9 +2136,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2452,9 +2158,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2477,9 +2180,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2502,9 +2202,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2527,9 +2224,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2552,9 +2246,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2577,9 +2268,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2602,9 +2290,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2627,9 +2312,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2652,9 +2334,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2677,9 +2356,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -2702,9 +2378,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2727,9 +2400,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -2752,9 +2422,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2777,9 +2444,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2802,9 +2466,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2827,9 +2488,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2852,9 +2510,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2877,9 +2532,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2902,9 +2554,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2927,9 +2576,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2952,9 +2598,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2977,9 +2620,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3002,9 +2642,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3027,9 +2664,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3052,9 +2686,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3077,9 +2708,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3102,9 +2730,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3127,9 +2752,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3152,9 +2774,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3177,9 +2796,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3202,9 +2818,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3227,9 +2840,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3252,9 +2862,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3277,9 +2884,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3302,9 +2906,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3327,9 +2928,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3352,9 +2950,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3377,9 +2972,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3402,9 +2994,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3427,9 +3016,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3452,9 +3038,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3477,9 +3060,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3502,9 +3082,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3527,9 +3104,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3552,9 +3126,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3577,9 +3148,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3602,9 +3170,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3627,9 +3192,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3652,9 +3214,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3677,9 +3236,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3702,9 +3258,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3727,9 +3280,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3752,9 +3302,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3777,9 +3324,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3802,9 +3346,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3827,9 +3368,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3852,9 +3390,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3877,9 +3412,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3902,9 +3434,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3927,9 +3456,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3952,9 +3478,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3977,9 +3500,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4002,9 +3522,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4027,9 +3544,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4052,9 +3566,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4077,9 +3588,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4102,9 +3610,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4127,9 +3632,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4152,9 +3654,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4177,9 +3676,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4202,9 +3698,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4227,9 +3720,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4252,9 +3742,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4277,9 +3764,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4302,9 +3786,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4327,9 +3808,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4352,9 +3830,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4377,9 +3852,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4402,9 +3874,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4427,9 +3896,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4452,9 +3918,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4477,9 +3940,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4502,9 +3962,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4527,9 +3984,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4552,9 +4006,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4577,9 +4028,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4602,9 +4050,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4627,9 +4072,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4652,9 +4094,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4677,9 +4116,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4702,9 +4138,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4727,9 +4160,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4752,9 +4182,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4777,9 +4204,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4802,9 +4226,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4827,9 +4248,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4852,9 +4270,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4877,9 +4292,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4902,9 +4314,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4927,9 +4336,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4952,9 +4358,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4977,9 +4380,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5002,9 +4402,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5027,9 +4424,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5052,9 +4446,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5077,9 +4468,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5102,9 +4490,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5127,9 +4512,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5152,9 +4534,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5177,9 +4556,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5202,9 +4578,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5227,9 +4600,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5252,9 +4622,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5277,9 +4644,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5302,9 +4666,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5327,9 +4688,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5352,9 +4710,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5377,9 +4732,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5402,9 +4754,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5427,9 +4776,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5452,9 +4798,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5477,9 +4820,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5502,9 +4842,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5527,9 +4864,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5552,9 +4886,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5577,9 +4908,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5602,9 +4930,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5627,9 +4952,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5652,9 +4974,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5677,9 +4996,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5702,9 +5018,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5727,9 +5040,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5752,9 +5062,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5777,9 +5084,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5802,9 +5106,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5827,9 +5128,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5852,9 +5150,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5877,9 +5172,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5902,9 +5194,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5927,9 +5216,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5952,9 +5238,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5977,9 +5260,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6002,9 +5282,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6027,9 +5304,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6052,9 +5326,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6077,9 +5348,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6102,9 +5370,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6127,9 +5392,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6152,9 +5414,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6177,9 +5436,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6202,9 +5458,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6227,9 +5480,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6252,9 +5502,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6277,9 +5524,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6302,9 +5546,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6327,9 +5568,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6352,9 +5590,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6377,9 +5612,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6402,9 +5634,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6427,7 +5656,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6450,7 +5678,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6473,7 +5700,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6496,7 +5722,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6519,7 +5744,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6542,7 +5766,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6565,7 +5788,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6588,7 +5810,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6611,7 +5832,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6634,7 +5854,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -6657,7 +5876,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -6680,7 +5898,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -6703,7 +5920,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -6726,7 +5942,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -6749,7 +5964,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -6772,7 +5986,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6795,7 +6008,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6818,7 +6030,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6841,7 +6052,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6864,7 +6074,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6887,7 +6096,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6910,7 +6118,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6933,7 +6140,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6956,7 +6162,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6979,7 +6184,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -7002,7 +6206,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -7025,7 +6228,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -7048,7 +6250,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7071,7 +6272,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7094,7 +6294,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7117,7 +6316,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7140,7 +6338,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7163,7 +6360,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7186,7 +6382,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7209,7 +6404,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7232,7 +6426,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7255,7 +6448,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -7278,7 +6470,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -7301,7 +6492,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -7324,7 +6514,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7347,7 +6536,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7370,7 +6558,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7393,7 +6580,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7416,7 +6602,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7439,7 +6624,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7462,7 +6646,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7485,7 +6668,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7508,7 +6690,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7531,7 +6712,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7554,7 +6734,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7577,7 +6756,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7600,7 +6778,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7623,7 +6800,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7646,7 +6822,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7669,7 +6844,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7692,7 +6866,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7715,7 +6888,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7738,7 +6910,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7761,7 +6932,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7784,7 +6954,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7807,7 +6976,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7830,7 +6998,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7853,7 +7020,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7876,7 +7042,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7899,7 +7064,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7922,7 +7086,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7945,7 +7108,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7968,7 +7130,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7991,7 +7152,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8014,7 +7174,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8037,7 +7196,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8060,7 +7218,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8083,7 +7240,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8106,7 +7262,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8129,7 +7284,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8152,7 +7306,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8175,7 +7328,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8198,7 +7350,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8221,7 +7372,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8244,7 +7394,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8267,7 +7416,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8290,7 +7438,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8313,7 +7460,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8336,7 +7482,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8359,7 +7504,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -8382,7 +7526,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -8405,7 +7548,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -8428,7 +7570,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8451,7 +7592,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8474,7 +7614,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8497,7 +7636,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8520,7 +7658,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8543,7 +7680,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8566,7 +7702,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8589,7 +7724,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8612,7 +7746,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8635,7 +7768,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8658,7 +7790,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8681,7 +7812,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8704,7 +7834,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8727,7 +7856,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8750,7 +7878,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8773,7 +7900,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8796,7 +7922,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8819,7 +7944,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8842,7 +7966,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8865,7 +7988,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8888,7 +8010,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8911,7 +8032,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8934,7 +8054,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8957,7 +8076,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8980,7 +8098,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9003,7 +8120,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9026,7 +8142,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9049,7 +8164,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9072,7 +8186,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9095,7 +8208,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9118,7 +8230,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9141,7 +8252,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9164,7 +8274,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9187,7 +8296,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9210,7 +8318,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9233,7 +8340,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9256,7 +8362,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9279,7 +8384,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9302,7 +8406,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9325,7 +8428,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9348,7 +8450,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9371,7 +8472,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9394,7 +8494,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9417,7 +8516,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9440,7 +8538,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9463,7 +8560,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9486,7 +8582,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9509,7 +8604,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9532,7 +8626,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9555,7 +8648,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9578,7 +8670,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9601,7 +8692,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9624,7 +8714,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9647,7 +8736,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9670,7 +8758,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9693,7 +8780,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9716,7 +8802,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9739,7 +8824,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9762,7 +8846,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9785,7 +8868,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9808,7 +8890,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9831,7 +8912,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9854,7 +8934,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9877,7 +8956,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9900,7 +8978,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9923,7 +9000,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9946,7 +9022,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9969,7 +9044,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9992,7 +9066,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10015,7 +9088,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10038,7 +9110,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10061,7 +9132,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10084,7 +9154,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10107,7 +9176,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10130,7 +9198,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10153,7 +9220,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10176,7 +9242,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10199,7 +9264,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10222,7 +9286,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10245,7 +9308,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10268,7 +9330,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10291,7 +9352,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10314,7 +9374,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10337,7 +9396,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10360,7 +9418,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -10383,7 +9440,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -10406,7 +9462,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -10429,7 +9484,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -10452,7 +9506,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -10475,7 +9528,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -10498,7 +9550,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10521,7 +9572,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10544,7 +9594,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10567,7 +9616,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10590,7 +9638,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10613,7 +9660,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10636,7 +9682,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10659,7 +9704,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10682,7 +9726,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10705,7 +9748,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -10728,7 +9770,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -10751,7 +9792,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -10774,7 +9814,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10797,7 +9836,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10820,7 +9858,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10843,7 +9880,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10866,7 +9902,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10889,7 +9924,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10912,7 +9946,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10935,7 +9968,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10958,7 +9990,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10981,7 +10012,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -11004,7 +10034,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -11027,7 +10056,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -11050,7 +10078,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11073,7 +10100,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11096,7 +10122,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11119,7 +10144,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11142,7 +10166,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11165,7 +10188,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11188,7 +10210,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11211,7 +10232,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11234,7 +10254,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11257,7 +10276,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11280,7 +10298,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11303,7 +10320,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11326,7 +10342,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11349,7 +10364,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11372,7 +10386,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11395,7 +10408,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11418,7 +10430,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11441,7 +10452,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11464,7 +10474,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11487,7 +10496,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11510,7 +10518,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11533,7 +10540,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11556,7 +10562,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11579,7 +10584,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11602,7 +10606,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11625,7 +10628,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11648,7 +10650,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11671,7 +10672,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11694,7 +10694,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11717,7 +10716,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11740,7 +10738,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11763,7 +10760,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11786,7 +10782,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11809,7 +10804,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11832,7 +10826,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11855,7 +10848,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll index 70fb9c2b348d3..a63b6d7ed7efc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,9 +24,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -52,9 +46,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -77,9 +68,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -102,9 +90,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -127,9 +112,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -152,9 +134,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -177,9 +156,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -202,9 +178,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -227,9 +200,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -252,9 +222,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -277,9 +244,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -302,9 +266,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -327,9 +288,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -352,9 +310,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -377,9 +332,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -402,9 +354,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -427,9 +376,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -452,9 +398,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -477,9 +420,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -502,9 +442,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -527,9 +464,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -552,9 +486,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -577,9 +508,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -602,9 +530,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -627,9 +552,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -652,9 +574,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -677,9 +596,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -702,9 +618,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -727,9 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -752,9 +662,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -777,9 +684,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -802,9 +706,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -827,9 +728,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -852,9 +750,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -877,9 +772,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -902,9 +794,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -927,9 +816,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -952,9 +838,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -977,9 +860,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1002,9 +882,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1027,9 +904,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1052,9 +926,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1077,9 +948,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1102,9 +970,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1127,9 +992,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1152,9 +1014,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1177,9 +1036,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1202,9 +1058,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1227,9 +1080,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1252,9 +1102,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1277,9 +1124,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1302,9 +1146,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1327,9 +1168,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1352,9 +1190,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1377,9 +1212,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1402,9 +1234,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1427,9 +1256,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1452,9 +1278,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1477,9 +1300,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1502,9 +1322,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1527,9 +1344,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1552,9 +1366,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1577,9 +1388,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1602,9 +1410,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1627,9 +1432,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1652,9 +1454,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1677,9 +1476,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1702,9 +1498,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1727,9 +1520,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1752,9 +1542,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1777,9 +1564,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1802,9 +1586,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1827,9 +1608,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1852,9 +1630,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1877,9 +1652,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1902,9 +1674,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1927,9 +1696,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1952,9 +1718,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1977,9 +1740,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2002,9 +1762,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2027,9 +1784,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2052,9 +1806,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2077,9 +1828,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2102,9 +1850,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2127,9 +1872,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2152,9 +1894,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2177,9 +1916,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2202,9 +1938,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2227,9 +1960,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2252,9 +1982,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2277,9 +2004,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2302,9 +2026,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2327,9 +2048,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2352,9 +2070,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2377,9 +2092,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2402,9 +2114,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2427,9 +2136,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2452,9 +2158,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2477,9 +2180,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2502,9 +2202,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2527,9 +2224,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2552,9 +2246,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2577,9 +2268,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2602,9 +2290,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2627,9 +2312,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2652,9 +2334,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2677,9 +2356,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2702,9 +2378,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2727,9 +2400,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2752,9 +2422,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2777,9 +2444,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2802,9 +2466,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2827,9 +2488,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2852,9 +2510,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2877,9 +2532,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2902,9 +2554,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2927,9 +2576,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2952,9 +2598,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2977,9 +2620,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3002,9 +2642,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3027,9 +2664,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3052,9 +2686,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3077,9 +2708,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3102,9 +2730,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3127,9 +2752,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3152,9 +2774,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -3177,9 +2796,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3202,9 +2818,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3227,9 +2840,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3252,9 +2862,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -3277,9 +2884,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3302,9 +2906,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3327,9 +2928,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3352,9 +2950,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3377,9 +2972,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3402,9 +2994,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3427,9 +3016,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3452,9 +3038,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3477,9 +3060,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3502,9 +3082,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3527,9 +3104,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -3552,9 +3126,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3577,9 +3148,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3602,9 +3170,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3627,9 +3192,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -3652,9 +3214,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3677,9 +3236,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3702,9 +3258,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3727,9 +3280,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -3752,9 +3302,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3777,9 +3324,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3802,9 +3346,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3827,9 +3368,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3852,9 +3390,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3877,9 +3412,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3902,9 +3434,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3927,9 +3456,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3952,9 +3478,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3977,9 +3500,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4002,9 +3522,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4027,9 +3544,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4052,9 +3566,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4077,9 +3588,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4102,9 +3610,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4127,9 +3632,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4152,9 +3654,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4177,9 +3676,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4202,9 +3698,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4227,9 +3720,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4252,9 +3742,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4277,9 +3764,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4302,9 +3786,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4327,9 +3808,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -4352,9 +3830,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4377,9 +3852,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4402,9 +3874,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4427,9 +3896,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4452,9 +3918,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4477,9 +3940,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4502,9 +3962,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4527,9 +3984,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4552,9 +4006,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4577,9 +4028,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4602,9 +4050,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4627,9 +4072,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4652,9 +4094,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4677,9 +4116,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4702,9 +4138,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4727,9 +4160,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4752,9 +4182,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4777,9 +4204,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4802,9 +4226,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4827,9 +4248,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4852,9 +4270,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4877,9 +4292,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4902,9 +4314,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4927,9 +4336,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4952,9 +4358,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4977,9 +4380,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5002,9 +4402,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5027,9 +4424,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5052,9 +4446,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5077,9 +4468,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5102,9 +4490,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5127,9 +4512,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5152,9 +4534,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5177,9 +4556,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5202,9 +4578,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5227,9 +4600,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5252,9 +4622,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5277,9 +4644,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5302,9 +4666,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5327,9 +4688,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5352,9 +4710,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5377,9 +4732,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5402,9 +4754,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5427,9 +4776,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5452,9 +4798,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5477,9 +4820,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5502,9 +4842,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5527,9 +4864,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5552,9 +4886,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5577,9 +4908,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5602,9 +4930,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5627,9 +4952,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5652,9 +4974,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5677,9 +4996,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5702,9 +5018,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5727,9 +5040,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5752,9 +5062,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5777,9 +5084,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5802,9 +5106,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5827,9 +5128,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5852,9 +5150,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5877,9 +5172,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -5902,9 +5194,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -5927,9 +5216,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -5952,9 +5238,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5977,9 +5260,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6002,9 +5282,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6027,9 +5304,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6052,9 +5326,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6077,9 +5348,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6102,9 +5370,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6127,9 +5392,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6152,9 +5414,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6177,9 +5436,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6202,9 +5458,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6227,9 +5480,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6252,9 +5502,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6277,9 +5524,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6302,9 +5546,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6327,9 +5568,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6352,9 +5590,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6377,9 +5612,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6402,9 +5634,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6427,9 +5656,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6452,9 +5678,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6477,9 +5700,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6502,9 +5722,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6527,9 +5744,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6552,9 +5766,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6577,9 +5788,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6602,9 +5810,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6627,9 +5832,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6652,9 +5854,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6677,9 +5876,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6702,9 +5898,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6727,9 +5920,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6752,9 +5942,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6777,9 +5964,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6802,9 +5986,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6827,9 +6008,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6852,9 +6030,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6877,9 +6052,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6902,9 +6074,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6927,9 +6096,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6952,9 +6118,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6977,9 +6140,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7002,9 +6162,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7027,9 +6184,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7052,9 +6206,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7077,9 +6228,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7102,9 +6250,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7127,9 +6272,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7152,9 +6294,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7177,9 +6316,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7202,9 +6338,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7227,9 +6360,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7252,9 +6382,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7277,9 +6404,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7302,9 +6426,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7327,9 +6448,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7352,9 +6470,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7377,9 +6492,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7402,9 +6514,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7427,9 +6536,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7452,9 +6558,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7477,9 +6580,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7502,9 +6602,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7527,9 +6624,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7552,9 +6646,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7577,9 +6668,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7602,9 +6690,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7627,9 +6712,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -7652,9 +6734,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7677,9 +6756,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7702,9 +6778,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7727,9 +6800,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7752,9 +6822,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7777,9 +6844,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7802,9 +6866,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7827,9 +6888,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7852,9 +6910,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7877,9 +6932,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7902,9 +6954,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7927,9 +6976,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7952,9 +6998,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7977,9 +7020,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8002,9 +7042,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8027,9 +7064,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8052,9 +7086,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8077,9 +7108,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8102,9 +7130,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8127,9 +7152,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8152,9 +7174,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8177,9 +7196,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8202,9 +7218,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8227,9 +7240,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8252,9 +7262,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8277,9 +7284,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8302,9 +7306,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8327,9 +7328,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8352,9 +7350,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8377,9 +7372,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8402,9 +7394,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8427,9 +7416,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8452,7 +7438,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8475,7 +7460,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8498,7 +7482,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8521,7 +7504,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8544,7 +7526,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8567,7 +7548,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8590,7 +7570,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8613,7 +7592,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8636,7 +7614,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8659,7 +7636,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8682,7 +7658,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8705,7 +7680,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -8728,7 +7702,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -8751,7 +7724,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -8774,7 +7746,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -8797,7 +7768,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -8820,7 +7790,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -8843,7 +7812,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -8866,7 +7834,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -8889,7 +7856,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8912,7 +7878,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8935,7 +7900,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8958,7 +7922,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8981,7 +7944,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9004,7 +7966,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9027,7 +7988,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9050,7 +8010,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9073,7 +8032,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9096,7 +8054,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9119,7 +8076,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9142,7 +8098,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -9165,7 +8120,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -9188,7 +8142,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -9211,7 +8164,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -9234,7 +8186,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -9257,7 +8208,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9280,7 +8230,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9303,7 +8252,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9326,7 +8274,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9349,7 +8296,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9372,7 +8318,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9395,7 +8340,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9418,7 +8362,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9441,7 +8384,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9464,7 +8406,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9487,7 +8428,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9510,7 +8450,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -9533,7 +8472,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -9556,7 +8494,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -9579,7 +8516,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -9602,7 +8538,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -9625,7 +8560,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9648,7 +8582,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9671,7 +8604,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9694,7 +8626,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9717,7 +8648,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9740,7 +8670,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9763,7 +8692,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9786,7 +8714,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9809,7 +8736,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9832,7 +8758,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9855,7 +8780,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9878,7 +8802,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -9901,7 +8824,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9924,7 +8846,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9947,7 +8868,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9970,7 +8890,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9993,7 +8912,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10016,7 +8934,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10039,7 +8956,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10062,7 +8978,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10085,7 +9000,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10108,7 +9022,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10131,7 +9044,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10154,7 +9066,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10177,7 +9088,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10200,7 +9110,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10223,7 +9132,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10246,7 +9154,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10269,7 +9176,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10292,7 +9198,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10315,7 +9220,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10338,7 +9242,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10361,7 +9264,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10384,7 +9286,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10407,7 +9308,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10430,7 +9330,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10453,7 +9352,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10476,7 +9374,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10499,7 +9396,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10522,7 +9418,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10545,7 +9440,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10568,7 +9462,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10591,7 +9484,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10614,7 +9506,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10637,7 +9528,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10660,7 +9550,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10683,7 +9572,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10706,7 +9594,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10729,7 +9616,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10752,7 +9638,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10775,7 +9660,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10798,7 +9682,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10821,7 +9704,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10844,7 +9726,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10867,7 +9748,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10890,7 +9770,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10913,7 +9792,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10936,7 +9814,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10959,7 +9836,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10982,7 +9858,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11005,7 +9880,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -11028,7 +9902,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -11051,7 +9924,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -11074,7 +9946,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -11097,7 +9968,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11120,7 +9990,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11143,7 +10012,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11166,7 +10034,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11189,7 +10056,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11212,7 +10078,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11235,7 +10100,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11258,7 +10122,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11281,7 +10144,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11304,7 +10166,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11327,7 +10188,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11350,7 +10210,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11373,7 +10232,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11396,7 +10254,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11419,7 +10276,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11442,7 +10298,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11465,7 +10320,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11488,7 +10342,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11511,7 +10364,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11534,7 +10386,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11557,7 +10408,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11580,7 +10430,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11603,7 +10452,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11626,7 +10474,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11649,7 +10496,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11672,7 +10518,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11695,7 +10540,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11718,7 +10562,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11741,7 +10584,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11764,7 +10606,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11787,7 +10628,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11810,7 +10650,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11833,7 +10672,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11856,7 +10694,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11879,7 +10716,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11902,7 +10738,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11925,7 +10760,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11948,7 +10782,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11971,7 +10804,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11994,7 +10826,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12017,7 +10848,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12040,7 +10870,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12063,7 +10892,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12086,7 +10914,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12109,7 +10936,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12132,7 +10958,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12155,7 +10980,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12178,7 +11002,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12201,7 +11024,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12224,7 +11046,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12247,7 +11068,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12270,7 +11090,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12293,7 +11112,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12316,7 +11134,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12339,7 +11156,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12362,7 +11178,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12385,7 +11200,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12408,7 +11222,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12431,7 +11244,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12454,7 +11266,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12477,7 +11288,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12500,7 +11310,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12523,7 +11332,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12546,7 +11354,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12569,7 +11376,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12592,7 +11398,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12615,7 +11420,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12638,7 +11442,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -12661,7 +11464,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12684,7 +11486,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12707,7 +11508,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12730,7 +11530,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12753,7 +11552,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12776,7 +11574,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12799,7 +11596,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12822,7 +11618,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12845,7 +11640,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12868,7 +11662,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12891,7 +11684,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12914,7 +11706,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12937,7 +11728,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12960,7 +11750,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12983,7 +11772,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13006,7 +11794,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13029,7 +11816,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13052,7 +11838,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13075,7 +11860,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13098,7 +11882,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13121,7 +11904,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13144,7 +11926,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13167,7 +11948,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13190,7 +11970,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13213,7 +11992,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13236,7 +12014,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13259,7 +12036,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13282,7 +12058,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13305,7 +12080,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13328,7 +12102,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13351,7 +12124,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13374,7 +12146,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13397,7 +12168,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13420,7 +12190,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13443,7 +12212,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13466,7 +12234,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13489,7 +12256,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13512,7 +12278,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13535,7 +12300,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13558,7 +12322,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13581,7 +12344,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13604,7 +12366,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13627,7 +12388,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13650,7 +12410,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -13673,7 +12432,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -13696,7 +12454,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -13719,7 +12476,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -13742,7 +12498,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -13765,7 +12520,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -13788,7 +12542,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -13811,7 +12564,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -13834,7 +12586,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13857,7 +12608,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13880,7 +12630,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13903,7 +12652,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13926,7 +12674,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13949,7 +12696,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13972,7 +12718,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13995,7 +12740,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14018,7 +12762,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -14041,7 +12784,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -14064,7 +12806,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -14087,7 +12828,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -14110,7 +12850,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -14133,7 +12872,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -14156,7 +12894,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -14179,7 +12916,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -14202,7 +12938,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14225,7 +12960,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14248,7 +12982,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14271,7 +13004,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14294,7 +13026,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14317,7 +13048,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14340,7 +13070,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14363,7 +13092,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14386,7 +13114,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -14409,7 +13136,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -14432,7 +13158,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -14455,7 +13180,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -14478,7 +13202,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -14501,7 +13224,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -14524,7 +13246,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -14547,7 +13268,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -14570,7 +13290,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14593,7 +13312,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14616,7 +13334,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14639,7 +13356,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14662,7 +13378,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14685,7 +13400,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14708,7 +13422,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14731,7 +13444,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14754,7 +13466,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -14777,7 +13488,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -14800,7 +13510,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -14823,7 +13532,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -14846,7 +13554,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14869,7 +13576,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14892,7 +13598,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14915,7 +13620,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14938,7 +13642,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14961,7 +13664,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14984,7 +13686,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15007,7 +13708,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15030,7 +13730,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -15053,7 +13752,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -15076,7 +13774,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -15099,7 +13796,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -15122,7 +13818,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15145,7 +13840,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15168,7 +13862,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15191,7 +13884,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15214,7 +13906,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15237,7 +13928,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15260,7 +13950,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15283,7 +13972,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15306,7 +13994,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -15329,7 +14016,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -15352,7 +14038,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -15375,7 +14060,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -15398,7 +14082,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15421,7 +14104,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15444,7 +14126,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15467,7 +14148,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15490,7 +14170,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15513,7 +14192,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15536,7 +14214,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15559,7 +14236,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15582,7 +14258,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -15605,7 +14280,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -15628,7 +14302,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -15651,7 +14324,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll index b83f8f0779255..c4b4d4f6b328d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll @@ -956,4 +956,3 @@ define @vsra_vv_nxv1i8_sext_zext_mixed_trunc( %vd = call @llvm.vp.trunc.nxv1i8.nxvi16( %vc, %m, i32 %evl) ret %vd } -declare @llvm.vp.trunc.nxv1i8.nxvi16(, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll index 961689b15b839..58d6759b34947 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.ashr.nxv8i7(, , , i32) - define @vsra_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv8i7: ; CHECK: # %bb.0: @@ -23,8 +21,6 @@ define @vsra_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.ashr.nxv1i8(, , , i32) - define @vsra_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i8: ; CHECK: # %bb.0: @@ -89,8 +85,6 @@ define @vsra_vi_nxv1i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.ashr.nxv2i8(, , , i32) - define @vsra_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv2i8: ; CHECK: # %bb.0: @@ -155,8 +149,6 @@ define @vsra_vi_nxv2i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.ashr.nxv4i8(, , , i32) - define @vsra_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv4i8: ; CHECK: # %bb.0: @@ -221,8 +213,6 @@ define @vsra_vi_nxv4i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.ashr.nxv8i8(, , , i32) - define @vsra_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv8i8: ; CHECK: # %bb.0: @@ -287,8 +277,6 @@ define @vsra_vi_nxv8i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.ashr.nxv16i8(, , , i32) - define @vsra_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv16i8: ; CHECK: # %bb.0: @@ -353,8 +341,6 @@ define @vsra_vi_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv32i8(, , , i32) - define @vsra_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv32i8: ; CHECK: # %bb.0: @@ -419,8 +405,6 @@ define @vsra_vi_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv64i8(, , , i32) - define @vsra_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv64i8: ; CHECK: # %bb.0: @@ -485,8 +469,6 @@ define @vsra_vi_nxv64i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv1i16(, , , i32) - define @vsra_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i16: ; CHECK: # %bb.0: @@ -551,8 +533,6 @@ define @vsra_vi_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv2i16(, , , i32) - define @vsra_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv2i16: ; CHECK: # %bb.0: @@ -617,8 +597,6 @@ define @vsra_vi_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv4i16(, , , i32) - define @vsra_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv4i16: ; CHECK: # %bb.0: @@ -683,8 +661,6 @@ define @vsra_vi_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv8i16(, , , i32) - define @vsra_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv8i16: ; CHECK: # %bb.0: @@ -749,8 +725,6 @@ define @vsra_vi_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv16i16(, , , i32) - define @vsra_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv16i16: ; CHECK: # %bb.0: @@ -815,8 +789,6 @@ define @vsra_vi_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.ashr.nxv32i16(, , , i32) - define @vsra_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv32i16: ; CHECK: # %bb.0: @@ -881,8 +853,6 @@ define @vsra_vi_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.ashr.nxv1i32(, , , i32) - define @vsra_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i32: ; CHECK: # %bb.0: @@ -947,8 +917,6 @@ define @vsra_vi_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv2i32(, , , i32) - define @vsra_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1013,8 +981,6 @@ define @vsra_vi_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv4i32(, , , i32) - define @vsra_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1079,8 +1045,6 @@ define @vsra_vi_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv8i32(, , , i32) - define @vsra_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1145,8 +1109,6 @@ define @vsra_vi_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv16i32(, , , i32) - define @vsra_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1211,8 +1173,6 @@ define @vsra_vi_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.ashr.nxv1i64(, , , i32) - define @vsra_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1289,8 +1249,6 @@ define @vsra_vi_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv2i64(, , , i32) - define @vsra_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1367,8 +1325,6 @@ define @vsra_vi_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv4i64(, , , i32) - define @vsra_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1445,8 +1401,6 @@ define @vsra_vi_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv6i64(, , , i32) - define @vsra_vv_nxv6i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv6i64: ; CHECK: # %bb.0: @@ -1457,8 +1411,6 @@ define @vsra_vv_nxv6i64( %va, %v } -declare @llvm.vp.ashr.nxv8i64(, , , i32) - define @vsra_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra.ll b/llvm/test/CodeGen/RISCV/rvv/vsra.ll index e0e0500f6c1ae..601956c8de444 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsra.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vsra.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv1i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv1i8_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv1i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv2i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv2i8_nxv2i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv2i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv4i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv4i8_nxv4i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv4i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv8i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv8i8_nxv8i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv8i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv16i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv16i8_nxv16i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv16i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv32i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv32i8_nxv32i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv32i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv64i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv64i8_nxv64i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv64i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv1i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv1i16_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv1i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv2i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv2i16_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv2i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv4i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv4i16_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv4i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv8i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv8i16_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv8i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv16i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv16i16_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv16i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv32i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv32i16_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv32i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv1i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv1i32_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv1i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv2i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv2i32_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv2i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv4i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv4i32_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv4i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv8i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv8i32_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv8i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv16i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv16i32_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv16i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv1i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv1i64_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1910,14 +1344,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv1i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1935,12 +1361,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv2i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv2i64_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1957,14 +1377,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv2i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1394,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv4i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv4i64_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2004,14 +1410,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv4i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2029,12 +1427,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv8i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv8i64_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2051,14 +1443,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv8i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll index f52c02d5d935a..b57f0bee21f5a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.lshr.nxv8i7(, , , i32) - define @vsrl_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv8i7: ; CHECK: # %bb.0: @@ -22,8 +20,6 @@ define @vsrl_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.lshr.nxv1i8(, , , i32) - define @vsrl_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv1i8: ; CHECK: # %bb.0: @@ -88,8 +84,6 @@ define @vsrl_vi_nxv1i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.lshr.nxv2i8(, , , i32) - define @vsrl_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv2i8: ; CHECK: # %bb.0: @@ -154,8 +148,6 @@ define @vsrl_vi_nxv2i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.lshr.nxv4i8(, , , i32) - define @vsrl_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv4i8: ; CHECK: # %bb.0: @@ -220,8 +212,6 @@ define @vsrl_vi_nxv4i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.lshr.nxv8i8(, , , i32) - define @vsrl_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv8i8: ; CHECK: # %bb.0: @@ -286,8 +276,6 @@ define @vsrl_vi_nxv8i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.lshr.nxv16i8(, , , i32) - define @vsrl_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv16i8: ; CHECK: # %bb.0: @@ -352,8 +340,6 @@ define @vsrl_vi_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv32i8(, , , i32) - define @vsrl_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv32i8: ; CHECK: # %bb.0: @@ -418,8 +404,6 @@ define @vsrl_vi_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv64i8(, , , i32) - define @vsrl_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv64i8: ; CHECK: # %bb.0: @@ -484,8 +468,6 @@ define @vsrl_vi_nxv64i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv1i16(, , , i32) - define @vsrl_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv1i16: ; CHECK: # %bb.0: @@ -550,8 +532,6 @@ define @vsrl_vi_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv2i16(, , , i32) - define @vsrl_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv2i16: ; CHECK: # %bb.0: @@ -616,8 +596,6 @@ define @vsrl_vi_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv4i16(, , , i32) - define @vsrl_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv4i16: ; CHECK: # %bb.0: @@ -682,8 +660,6 @@ define @vsrl_vi_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv8i16(, , , i32) - define @vsrl_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv8i16: ; CHECK: # %bb.0: @@ -748,8 +724,6 @@ define @vsrl_vi_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv16i16(, , , i32) - define @vsrl_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv16i16: ; CHECK: # %bb.0: @@ -814,8 +788,6 @@ define @vsrl_vi_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.lshr.nxv32i16(, , , i32) - define @vsrl_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv32i16: ; CHECK: # %bb.0: @@ -880,8 +852,6 @@ define @vsrl_vi_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.lshr.nxv1i32(, , , i32) - define @vsrl_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv1i32: ; CHECK: # %bb.0: @@ -946,8 +916,6 @@ define @vsrl_vi_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv2i32(, , , i32) - define @vsrl_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1012,8 +980,6 @@ define @vsrl_vi_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv4i32(, , , i32) - define @vsrl_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1078,8 +1044,6 @@ define @vsrl_vi_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv8i32(, , , i32) - define @vsrl_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1144,8 +1108,6 @@ define @vsrl_vi_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv16i32(, , , i32) - define @vsrl_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1210,8 +1172,6 @@ define @vsrl_vi_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.lshr.nxv1i64(, , , i32) - define @vsrl_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1288,8 +1248,6 @@ define @vsrl_vi_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv2i64(, , , i32) - define @vsrl_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1366,8 +1324,6 @@ define @vsrl_vi_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv4i64(, , , i32) - define @vsrl_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1444,8 +1400,6 @@ define @vsrl_vi_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv5i64(, , , i32) - define @vsrl_vv_nxv5i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv5i64: ; CHECK: # %bb.0: @@ -1456,8 +1410,6 @@ define @vsrl_vv_nxv5i64( %va, %v } -declare @llvm.vp.lshr.nxv8i64(, , , i32) - define @vsrl_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl.ll index acc5322e0ecb7..bd4a1d1280a33 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsrl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vsrl.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv1i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv1i8_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv2i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv2i8_nxv2i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv4i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv4i8_nxv4i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv8i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv8i8_nxv8i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv16i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv16i8_nxv16i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv32i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv32i8_nxv32i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv32i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv64i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv64i8_nxv64i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv64i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv1i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv1i16_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv2i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv2i16_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv4i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv4i16_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv8i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv8i16_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv16i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv16i16_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv32i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv32i16_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv32i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv1i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv1i32_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv2i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv2i32_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv4i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv4i32_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv8i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv8i32_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv16i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv16i32_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv1i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv1i64_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1910,14 +1344,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1935,12 +1361,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv2i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv2i64_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1957,14 +1377,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1394,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv4i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv4i64_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2004,14 +1410,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2029,12 +1427,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv8i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv8i64_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2051,14 +1443,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsse.ll b/llvm/test/CodeGen/RISCV/rvv/vsse.ll index 770e06749c348..f11fb6a6613de 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsse.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare void @llvm.riscv.vsse.nxv1i64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1i64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -67,12 +54,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2i64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -89,13 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2i64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -113,12 +87,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4i64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -135,13 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4i64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -159,12 +120,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8i64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -181,13 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8i64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -205,12 +153,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1f64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -227,13 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1f64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -251,12 +186,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2f64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -273,13 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2f64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -297,12 +219,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4f64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -319,13 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4f64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -343,12 +252,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8f64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -365,13 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8f64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -389,12 +285,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -411,13 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -435,12 +318,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -457,13 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -481,12 +351,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -503,13 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -527,12 +384,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -549,13 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -573,12 +417,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -595,13 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -619,12 +450,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -641,13 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -665,12 +483,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -687,13 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -711,12 +516,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -733,13 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -757,12 +549,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -779,13 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -803,12 +582,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -825,13 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -849,12 +615,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -871,13 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -895,12 +648,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -917,13 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -941,12 +681,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -963,13 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -987,12 +714,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1009,13 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1033,12 +747,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1055,13 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1079,12 +780,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv32i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1101,13 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv32i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1125,12 +813,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1147,13 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1171,12 +846,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1193,13 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1217,12 +879,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1239,13 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1263,12 +912,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1285,13 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1309,12 +945,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1331,13 +961,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1355,12 +978,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv32f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1377,13 +994,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv32f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1401,12 +1011,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1bf16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1bf16_nxv1bf16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1423,13 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1bf16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1bf16_nxv1bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1447,12 +1044,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2bf16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2bf16_nxv2bf16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1469,13 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2bf16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2bf16_nxv2bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1493,12 +1077,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4bf16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4bf16_nxv4bf16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1515,13 +1093,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4bf16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4bf16_nxv4bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1539,12 +1110,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8bf16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8bf16_nxv8bf16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1561,13 +1126,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8bf16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8bf16_nxv8bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1585,12 +1143,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16bf16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16bf16_nxv16bf16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1607,13 +1159,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16bf16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16bf16_nxv16bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1631,12 +1176,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv32bf16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv32bf16_nxv32bf16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1653,13 +1192,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv32bf16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv32bf16_nxv32bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1677,12 +1209,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1699,13 +1225,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1723,12 +1242,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1745,13 +1258,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1769,12 +1275,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1791,13 +1291,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1815,12 +1308,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1837,13 +1324,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1861,12 +1341,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1883,13 +1357,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1907,12 +1374,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv32i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1929,13 +1390,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv32i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1953,12 +1407,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv64i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1975,13 +1423,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv64i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll index 7b80d45a924d3..71e959b84b560 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry @@ -27,9 +24,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -52,9 +46,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -77,9 +68,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -102,9 +90,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -127,9 +112,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -152,9 +134,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry @@ -177,9 +156,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -202,9 +178,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -227,9 +200,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -252,9 +222,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -277,9 +244,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry @@ -302,9 +266,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -327,9 +288,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -352,9 +310,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -377,9 +332,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -402,9 +354,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry @@ -427,9 +376,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -452,9 +398,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -477,9 +420,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -502,9 +442,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry @@ -527,9 +464,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -552,9 +486,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -577,9 +508,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -602,9 +530,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry @@ -627,9 +552,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -652,9 +574,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -677,9 +596,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -702,9 +618,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry @@ -727,9 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -752,9 +662,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -777,9 +684,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -802,8 +706,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -826,8 +728,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -850,8 +750,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -874,8 +772,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -898,8 +794,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -922,8 +816,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -946,8 +838,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -970,8 +860,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -994,8 +882,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1018,8 +904,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -1042,8 +926,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1066,8 +948,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1090,8 +970,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1114,8 +992,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -1138,8 +1014,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1162,8 +1036,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1186,8 +1058,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -1210,8 +1080,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1234,8 +1102,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1258,8 +1124,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -1282,8 +1146,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1306,8 +1168,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1330,8 +1190,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1212,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1378,8 +1234,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1402,8 +1256,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -1426,8 +1278,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1450,8 +1300,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1474,8 +1322,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1498,8 +1344,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1522,8 +1366,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1546,8 +1388,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1570,8 +1410,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1594,8 +1432,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1618,8 +1454,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1642,8 +1476,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1498,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1690,8 +1520,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1714,8 +1542,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1738,8 +1564,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1762,8 +1586,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1786,8 +1608,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1810,8 +1630,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1834,8 +1652,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1858,8 +1674,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1882,8 +1696,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1906,8 +1718,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1930,8 +1740,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1954,8 +1762,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1784,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2002,8 +1806,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2026,8 +1828,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2050,8 +1850,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2074,8 +1872,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2098,7 +1894,6 @@ entry: ret void } - define void @test_vsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -2121,7 +1916,6 @@ entry: ret void } - define void @test_vsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2144,7 +1938,6 @@ entry: ret void } - define void @test_vsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2167,7 +1960,6 @@ entry: ret void } - define void @test_vsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2190,7 +1982,6 @@ entry: ret void } - define void @test_vsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2213,7 +2004,6 @@ entry: ret void } - define void @test_vsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -2236,7 +2026,6 @@ entry: ret void } - define void @test_vsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2259,7 +2048,6 @@ entry: ret void } - define void @test_vsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2282,7 +2070,6 @@ entry: ret void } - define void @test_vsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2305,7 +2092,6 @@ entry: ret void } - define void @test_vsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -2328,7 +2114,6 @@ entry: ret void } - define void @test_vsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2351,7 +2136,6 @@ entry: ret void } - define void @test_vsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2374,7 +2158,6 @@ entry: ret void } - define void @test_vsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2397,7 +2180,6 @@ entry: ret void } - define void @test_vsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -2420,7 +2202,6 @@ entry: ret void } - define void @test_vsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2443,7 +2224,6 @@ entry: ret void } - define void @test_vsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2466,7 +2246,6 @@ entry: ret void } - define void @test_vsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -2489,7 +2268,6 @@ entry: ret void } - define void @test_vsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2512,7 +2290,6 @@ entry: ret void } - define void @test_vsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2535,7 +2312,6 @@ entry: ret void } - define void @test_vsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -2558,7 +2334,6 @@ entry: ret void } - define void @test_vsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2581,7 +2356,6 @@ entry: ret void } - define void @test_vsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2604,7 +2378,6 @@ entry: ret void } - define void @test_vsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -2627,7 +2400,6 @@ entry: ret void } - define void @test_vsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -2650,7 +2422,6 @@ entry: ret void } - define void @test_vsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2673,7 +2444,6 @@ entry: ret void } - define void @test_vsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2696,7 +2466,6 @@ entry: ret void } - define void @test_vsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2719,7 +2488,6 @@ entry: ret void } - define void @test_vsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2742,7 +2510,6 @@ entry: ret void } - define void @test_vsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2765,7 +2532,6 @@ entry: ret void } - define void @test_vsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2788,7 +2554,6 @@ entry: ret void } - define void @test_vsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2811,7 +2576,6 @@ entry: ret void } - define void @test_vsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2834,7 +2598,6 @@ entry: ret void } - define void @test_vsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2857,7 +2620,6 @@ entry: ret void } - define void @test_vsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2880,7 +2642,6 @@ entry: ret void } - define void @test_vsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2903,7 +2664,6 @@ entry: ret void } - define void @test_vsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2926,7 +2686,6 @@ entry: ret void } - define void @test_vsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2949,7 +2708,6 @@ entry: ret void } - define void @test_vsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2972,7 +2730,6 @@ entry: ret void } - define void @test_vsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2995,7 +2752,6 @@ entry: ret void } - define void @test_vsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3018,7 +2774,6 @@ entry: ret void } - define void @test_vsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3041,7 +2796,6 @@ entry: ret void } - define void @test_vsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3064,7 +2818,6 @@ entry: ret void } - define void @test_vsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3087,7 +2840,6 @@ entry: ret void } - define void @test_vsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3110,7 +2862,6 @@ entry: ret void } - define void @test_vsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3133,7 +2884,6 @@ entry: ret void } - define void @test_vsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3156,7 +2906,6 @@ entry: ret void } - define void @test_vsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3179,7 +2928,6 @@ entry: ret void } - define void @test_vsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3202,7 +2950,6 @@ entry: ret void } - define void @test_vsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3225,7 +2972,6 @@ entry: ret void } - define void @test_vsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3248,7 +2994,6 @@ entry: ret void } - define void @test_vsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3271,7 +3016,6 @@ entry: ret void } - define void @test_vsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3294,7 +3038,6 @@ entry: ret void } - define void @test_vsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3317,7 +3060,6 @@ entry: ret void } - define void @test_vsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3340,7 +3082,6 @@ entry: ret void } - define void @test_vsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -3363,7 +3104,6 @@ entry: ret void } - define void @test_vsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -3386,7 +3126,6 @@ entry: ret void } - define void @test_vsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3409,7 +3148,6 @@ entry: ret void } - define void @test_vsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3432,7 +3170,6 @@ entry: ret void } - define void @test_vsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3455,7 +3192,6 @@ entry: ret void } - define void @test_vsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -3478,7 +3214,6 @@ entry: ret void } - define void @test_vsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3501,7 +3236,6 @@ entry: ret void } - define void @test_vsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3524,7 +3258,6 @@ entry: ret void } - define void @test_vsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3547,7 +3280,6 @@ entry: ret void } - define void @test_vsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -3570,7 +3302,6 @@ entry: ret void } - define void @test_vsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3593,7 +3324,6 @@ entry: ret void } - define void @test_vsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3616,7 +3346,6 @@ entry: ret void } - define void @test_vsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3639,7 +3368,6 @@ entry: ret void } - define void @test_vsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -3662,7 +3390,6 @@ entry: ret void } - define void @test_vsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3685,7 +3412,6 @@ entry: ret void } - define void @test_vsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3708,7 +3434,6 @@ entry: ret void } - define void @test_vsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -3731,7 +3456,6 @@ entry: ret void } - define void @test_vsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -3754,7 +3478,6 @@ entry: ret void } - define void @test_vsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3777,7 +3500,6 @@ entry: ret void } - define void @test_vsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -3800,7 +3522,6 @@ entry: ret void } - define void @test_vsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3823,7 +3544,6 @@ entry: ret void } - define void @test_vsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3846,7 +3566,6 @@ entry: ret void } - define void @test_vsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -3869,7 +3588,6 @@ entry: ret void } - define void @test_vsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3892,7 +3610,6 @@ entry: ret void } - define void @test_vsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll index 880066bf45990..420d04837965c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry @@ -38,9 +35,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -63,9 +57,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -88,9 +79,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -113,9 +101,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -138,9 +123,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -163,9 +145,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry @@ -188,9 +167,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -213,9 +189,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -238,9 +211,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -263,9 +233,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -299,9 +266,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry @@ -335,9 +299,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -360,9 +321,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -385,9 +343,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -410,9 +365,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -435,9 +387,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry @@ -471,9 +420,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -496,9 +442,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -521,9 +464,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -546,9 +486,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry @@ -582,9 +519,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -607,9 +541,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -632,9 +563,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -657,9 +585,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry @@ -693,9 +618,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -718,9 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -743,9 +662,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -768,9 +684,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry @@ -804,9 +717,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -829,9 +739,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -854,9 +761,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -879,8 +783,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -903,8 +805,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -927,8 +827,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -951,8 +849,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -975,8 +871,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -999,8 +893,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -1023,8 +915,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1047,8 +937,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1071,8 +959,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1095,8 +981,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -1119,8 +1003,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1143,8 +1025,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1167,8 +1047,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1191,8 +1069,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -1215,8 +1091,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1239,8 +1113,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1263,8 +1135,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -1287,8 +1157,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1311,8 +1179,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1335,8 +1201,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -1359,8 +1223,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1383,8 +1245,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1407,8 +1267,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -1431,8 +1289,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1455,8 +1311,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1479,8 +1333,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -1503,8 +1355,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1527,8 +1377,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1551,8 +1399,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1575,8 +1421,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1599,8 +1443,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1623,8 +1465,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1647,8 +1487,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1671,8 +1509,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1695,8 +1531,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1719,8 +1553,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1743,8 +1575,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1767,8 +1597,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1791,8 +1619,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1815,8 +1641,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1839,8 +1663,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1863,8 +1685,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1887,8 +1707,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1911,8 +1729,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1935,8 +1751,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1959,8 +1773,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1983,8 +1795,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2007,8 +1817,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2031,8 +1839,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2055,8 +1861,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2079,8 +1883,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2103,8 +1905,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2127,8 +1927,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2151,8 +1949,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2175,7 +1971,6 @@ entry: ret void } - define void @test_vsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -2198,7 +1993,6 @@ entry: ret void } - define void @test_vsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2221,7 +2015,6 @@ entry: ret void } - define void @test_vsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2244,7 +2037,6 @@ entry: ret void } - define void @test_vsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2267,7 +2059,6 @@ entry: ret void } - define void @test_vsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2290,7 +2081,6 @@ entry: ret void } - define void @test_vsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -2313,7 +2103,6 @@ entry: ret void } - define void @test_vsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2336,7 +2125,6 @@ entry: ret void } - define void @test_vsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2359,7 +2147,6 @@ entry: ret void } - define void @test_vsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2382,7 +2169,6 @@ entry: ret void } - define void @test_vsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -2405,7 +2191,6 @@ entry: ret void } - define void @test_vsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2428,7 +2213,6 @@ entry: ret void } - define void @test_vsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2451,7 +2235,6 @@ entry: ret void } - define void @test_vsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2474,7 +2257,6 @@ entry: ret void } - define void @test_vsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -2497,7 +2279,6 @@ entry: ret void } - define void @test_vsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2520,7 +2301,6 @@ entry: ret void } - define void @test_vsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2543,7 +2323,6 @@ entry: ret void } - define void @test_vsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -2566,7 +2345,6 @@ entry: ret void } - define void @test_vsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2589,7 +2367,6 @@ entry: ret void } - define void @test_vsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2612,7 +2389,6 @@ entry: ret void } - define void @test_vsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -2635,7 +2411,6 @@ entry: ret void } - define void @test_vsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2658,7 +2433,6 @@ entry: ret void } - define void @test_vsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2681,7 +2455,6 @@ entry: ret void } - define void @test_vsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -2704,7 +2477,6 @@ entry: ret void } - define void @test_vsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -2727,7 +2499,6 @@ entry: ret void } - define void @test_vsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2750,7 +2521,6 @@ entry: ret void } - define void @test_vsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2773,7 +2543,6 @@ entry: ret void } - define void @test_vsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2796,7 +2565,6 @@ entry: ret void } - define void @test_vsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2819,7 +2587,6 @@ entry: ret void } - define void @test_vsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2842,7 +2609,6 @@ entry: ret void } - define void @test_vsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2865,7 +2631,6 @@ entry: ret void } - define void @test_vsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2888,7 +2653,6 @@ entry: ret void } - define void @test_vsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2911,7 +2675,6 @@ entry: ret void } - define void @test_vsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2934,7 +2697,6 @@ entry: ret void } - define void @test_vsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2957,7 +2719,6 @@ entry: ret void } - define void @test_vsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2980,7 +2741,6 @@ entry: ret void } - define void @test_vsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3003,7 +2763,6 @@ entry: ret void } - define void @test_vsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3026,7 +2785,6 @@ entry: ret void } - define void @test_vsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -3049,7 +2807,6 @@ entry: ret void } - define void @test_vsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3072,7 +2829,6 @@ entry: ret void } - define void @test_vsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3095,7 +2851,6 @@ entry: ret void } - define void @test_vsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3118,7 +2873,6 @@ entry: ret void } - define void @test_vsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3141,7 +2895,6 @@ entry: ret void } - define void @test_vsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3164,7 +2917,6 @@ entry: ret void } - define void @test_vsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3187,7 +2939,6 @@ entry: ret void } - define void @test_vsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3210,7 +2961,6 @@ entry: ret void } - define void @test_vsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3233,7 +2983,6 @@ entry: ret void } - define void @test_vsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3256,7 +3005,6 @@ entry: ret void } - define void @test_vsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3279,7 +3027,6 @@ entry: ret void } - define void @test_vsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3302,7 +3049,6 @@ entry: ret void } - define void @test_vsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3325,7 +3071,6 @@ entry: ret void } - define void @test_vsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3348,7 +3093,6 @@ entry: ret void } - define void @test_vsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3371,7 +3115,6 @@ entry: ret void } - define void @test_vsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3394,7 +3137,6 @@ entry: ret void } - define void @test_vsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3417,7 +3159,6 @@ entry: ret void } - define void @test_vsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -3440,7 +3181,6 @@ entry: ret void } - define void @test_vsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -3463,7 +3203,6 @@ entry: ret void } - define void @test_vsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3486,7 +3225,6 @@ entry: ret void } - define void @test_vsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3509,7 +3247,6 @@ entry: ret void } - define void @test_vsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3532,7 +3269,6 @@ entry: ret void } - define void @test_vsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -3555,7 +3291,6 @@ entry: ret void } - define void @test_vsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3578,7 +3313,6 @@ entry: ret void } - define void @test_vsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3601,7 +3335,6 @@ entry: ret void } - define void @test_vsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3624,7 +3357,6 @@ entry: ret void } - define void @test_vsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -3647,7 +3379,6 @@ entry: ret void } - define void @test_vsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3670,7 +3401,6 @@ entry: ret void } - define void @test_vsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3693,7 +3423,6 @@ entry: ret void } - define void @test_vsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3716,7 +3445,6 @@ entry: ret void } - define void @test_vsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -3739,7 +3467,6 @@ entry: ret void } - define void @test_vsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3762,7 +3489,6 @@ entry: ret void } - define void @test_vsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3785,7 +3511,6 @@ entry: ret void } - define void @test_vsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -3808,7 +3533,6 @@ entry: ret void } - define void @test_vsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -3831,7 +3555,6 @@ entry: ret void } - define void @test_vsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3854,7 +3577,6 @@ entry: ret void } - define void @test_vsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -3877,7 +3599,6 @@ entry: ret void } - define void @test_vsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3900,7 +3621,6 @@ entry: ret void } - define void @test_vsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3923,7 +3643,6 @@ entry: ret void } - define void @test_vsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -3946,7 +3665,6 @@ entry: ret void } - define void @test_vsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3969,7 +3687,6 @@ entry: ret void } - define void @test_vsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll index 7fd1b05bb444d..d21a0498474af 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll @@ -14,8 +14,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i8.nxv1i8.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i8mf8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf8: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i8mf4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf4: ; CHECK: # %bb.0: # %entry @@ -42,8 +38,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i8.nxv2i8.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i8mf4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf4: ; CHECK: # %bb.0: # %entry @@ -56,8 +50,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i8mf2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf2: ; CHECK: # %bb.0: # %entry @@ -70,8 +62,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i8.nxv4i8.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i8mf2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf2: ; CHECK: # %bb.0: # %entry @@ -84,8 +74,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i8m1( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m1: ; CHECK: # %bb.0: # %entry @@ -98,8 +86,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i8.nxv8i8.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i8m1( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m1: ; CHECK: # %bb.0: # %entry @@ -112,8 +98,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i8m2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m2: ; CHECK: # %bb.0: # %entry @@ -126,8 +110,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i8.nxv16i8.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i8m2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m2: ; CHECK: # %bb.0: # %entry @@ -140,8 +122,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i8m4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m4: ; CHECK: # %bb.0: # %entry @@ -154,8 +134,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv32i8.nxv32i8.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i8m4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m4: ; CHECK: # %bb.0: # %entry @@ -168,8 +146,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv32i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i8m8( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m8: ; CHECK: # %bb.0: # %entry @@ -182,8 +158,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv64i8.nxv64i8.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i8m8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m8: ; CHECK: # %bb.0: # %entry @@ -196,8 +170,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv64i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i16mf4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16mf4: ; CHECK: # %bb.0: # %entry @@ -210,8 +182,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i16.nxv1i16.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i16mf4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16mf4: ; CHECK: # %bb.0: # %entry @@ -224,8 +194,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i16mf2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16mf2: ; CHECK: # %bb.0: # %entry @@ -238,8 +206,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i16.nxv2i16.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i16mf2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16mf2: ; CHECK: # %bb.0: # %entry @@ -252,8 +218,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i16m1( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m1: ; CHECK: # %bb.0: # %entry @@ -266,8 +230,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i16.nxv4i16.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i16m1( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m1: ; CHECK: # %bb.0: # %entry @@ -280,8 +242,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i16m2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m2: ; CHECK: # %bb.0: # %entry @@ -294,8 +254,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i16.nxv8i16.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i16m2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m2: ; CHECK: # %bb.0: # %entry @@ -308,8 +266,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i16m4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m4: ; CHECK: # %bb.0: # %entry @@ -322,8 +278,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i16.nxv16i16.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i16m4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m4: ; CHECK: # %bb.0: # %entry @@ -336,8 +290,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i16m8( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m8: ; CHECK: # %bb.0: # %entry @@ -350,8 +302,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv32i16.nxv32i16.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i16m8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m8: ; CHECK: # %bb.0: # %entry @@ -364,8 +314,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv32i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i32mf2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32mf2: ; CHECK: # %bb.0: # %entry @@ -378,8 +326,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i32.nxv1i32.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i32mf2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32mf2: ; CHECK: # %bb.0: # %entry @@ -392,8 +338,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i32m1( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m1: ; CHECK: # %bb.0: # %entry @@ -406,8 +350,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i32.nxv2i32.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i32m1( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m1: ; CHECK: # %bb.0: # %entry @@ -420,8 +362,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i32m2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m2: ; CHECK: # %bb.0: # %entry @@ -434,8 +374,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i32.nxv4i32.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i32m2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m2: ; CHECK: # %bb.0: # %entry @@ -448,8 +386,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i32m4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m4: ; CHECK: # %bb.0: # %entry @@ -462,8 +398,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i32.nxv8i32.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i32m4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m4: ; CHECK: # %bb.0: # %entry @@ -476,8 +410,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i32m8( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m8: ; CHECK: # %bb.0: # %entry @@ -490,8 +422,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i32.nxv16i32.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i32m8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m8: ; CHECK: # %bb.0: # %entry @@ -504,8 +434,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i64m1( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m1: ; CHECK: # %bb.0: # %entry @@ -518,8 +446,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i64.nxv1i64.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i64m1( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m1: ; CHECK: # %bb.0: # %entry @@ -532,8 +458,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i64.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i64m2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m2: ; CHECK: # %bb.0: # %entry @@ -546,8 +470,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i64.nxv2i64.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i64m2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m2: ; CHECK: # %bb.0: # %entry @@ -560,8 +482,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i64.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i64m4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m4: ; CHECK: # %bb.0: # %entry @@ -574,8 +494,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i64.nxv4i64.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i64m4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m4: ; CHECK: # %bb.0: # %entry @@ -588,8 +506,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i64.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i64m8( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m8: ; CHECK: # %bb.0: # %entry @@ -602,8 +518,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i64.nxv8i64.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i64m8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m8: ; CHECK: # %bb.0: # %entry @@ -616,8 +530,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i64.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i8mf8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf8_m: ; CHECK: # %bb.0: # %entry @@ -630,8 +542,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i8mf8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf8_m: ; CHECK: # %bb.0: # %entry @@ -644,8 +554,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i8mf4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf4_m: ; CHECK: # %bb.0: # %entry @@ -658,8 +566,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i8mf4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf4_m: ; CHECK: # %bb.0: # %entry @@ -672,8 +578,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i8mf2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf2_m: ; CHECK: # %bb.0: # %entry @@ -686,8 +590,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i8mf2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf2_m: ; CHECK: # %bb.0: # %entry @@ -700,8 +602,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i8m1_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m1_m: ; CHECK: # %bb.0: # %entry @@ -714,8 +614,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i8m1_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m1_m: ; CHECK: # %bb.0: # %entry @@ -728,8 +626,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i8m2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m2_m: ; CHECK: # %bb.0: # %entry @@ -742,8 +638,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i8m2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m2_m: ; CHECK: # %bb.0: # %entry @@ -756,8 +650,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i8m4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m4_m: ; CHECK: # %bb.0: # %entry @@ -770,8 +662,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i8m4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m4_m: ; CHECK: # %bb.0: # %entry @@ -784,8 +674,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i8m8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m8_m: ; CHECK: # %bb.0: # %entry @@ -798,8 +686,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i8m8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m8_m: ; CHECK: # %bb.0: # %entry @@ -812,8 +698,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv64i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i16mf4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16mf4_m: ; CHECK: # %bb.0: # %entry @@ -826,8 +710,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i16mf4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16mf4_m: ; CHECK: # %bb.0: # %entry @@ -840,8 +722,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i16mf2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16mf2_m: ; CHECK: # %bb.0: # %entry @@ -854,8 +734,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i16mf2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16mf2_m: ; CHECK: # %bb.0: # %entry @@ -868,8 +746,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i16m1_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m1_m: ; CHECK: # %bb.0: # %entry @@ -882,8 +758,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i16m1_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m1_m: ; CHECK: # %bb.0: # %entry @@ -896,8 +770,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i16m2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m2_m: ; CHECK: # %bb.0: # %entry @@ -910,8 +782,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i16m2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m2_m: ; CHECK: # %bb.0: # %entry @@ -924,8 +794,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i16m4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m4_m: ; CHECK: # %bb.0: # %entry @@ -938,8 +806,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i16m4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m4_m: ; CHECK: # %bb.0: # %entry @@ -952,8 +818,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i16m8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m8_m: ; CHECK: # %bb.0: # %entry @@ -966,8 +830,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i16m8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m8_m: ; CHECK: # %bb.0: # %entry @@ -980,8 +842,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i32mf2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32mf2_m: ; CHECK: # %bb.0: # %entry @@ -994,8 +854,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i32mf2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32mf2_m: ; CHECK: # %bb.0: # %entry @@ -1008,8 +866,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i32m1_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m1_m: ; CHECK: # %bb.0: # %entry @@ -1022,8 +878,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i32m1_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m1_m: ; CHECK: # %bb.0: # %entry @@ -1036,8 +890,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i32m2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m2_m: ; CHECK: # %bb.0: # %entry @@ -1050,8 +902,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i32m2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m2_m: ; CHECK: # %bb.0: # %entry @@ -1064,8 +914,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i32m4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m4_m: ; CHECK: # %bb.0: # %entry @@ -1078,8 +926,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i32m4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m4_m: ; CHECK: # %bb.0: # %entry @@ -1092,8 +938,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i32m8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m8_m: ; CHECK: # %bb.0: # %entry @@ -1106,8 +950,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i32m8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m8_m: ; CHECK: # %bb.0: # %entry @@ -1120,8 +962,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i64m1_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m1_m: ; CHECK: # %bb.0: # %entry @@ -1134,8 +974,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i64m1_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m1_m: ; CHECK: # %bb.0: # %entry @@ -1148,8 +986,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i64m2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m2_m: ; CHECK: # %bb.0: # %entry @@ -1162,8 +998,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i64m2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m2_m: ; CHECK: # %bb.0: # %entry @@ -1176,8 +1010,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i64m4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m4_m: ; CHECK: # %bb.0: # %entry @@ -1190,8 +1022,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i64m4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m4_m: ; CHECK: # %bb.0: # %entry @@ -1204,8 +1034,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i64m8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m8_m: ; CHECK: # %bb.0: # %entry @@ -1218,8 +1046,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i64m8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m8_m: ; CHECK: # %bb.0: # %entry @@ -1232,4 +1058,3 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll index b7a84e58e6e61..47abf706562ac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll @@ -14,8 +14,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i8mf8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf8: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i8mf4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf4: ; CHECK: # %bb.0: # %entry @@ -42,8 +38,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i8mf4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf4: ; CHECK: # %bb.0: # %entry @@ -56,8 +50,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i8mf2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf2: ; CHECK: # %bb.0: # %entry @@ -70,8 +62,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i8mf2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf2: ; CHECK: # %bb.0: # %entry @@ -84,8 +74,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i8m1( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m1: ; CHECK: # %bb.0: # %entry @@ -98,8 +86,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i8m1( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m1: ; CHECK: # %bb.0: # %entry @@ -112,8 +98,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i8m2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m2: ; CHECK: # %bb.0: # %entry @@ -126,8 +110,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i8m2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m2: ; CHECK: # %bb.0: # %entry @@ -140,8 +122,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i8m4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m4: ; CHECK: # %bb.0: # %entry @@ -154,8 +134,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i8m4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m4: ; CHECK: # %bb.0: # %entry @@ -168,8 +146,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv32i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i8m8( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m8: ; CHECK: # %bb.0: # %entry @@ -182,8 +158,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i8m8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m8: ; CHECK: # %bb.0: # %entry @@ -196,8 +170,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv64i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i16mf4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16mf4: ; CHECK: # %bb.0: # %entry @@ -210,8 +182,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i16mf4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16mf4: ; CHECK: # %bb.0: # %entry @@ -224,8 +194,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i16mf2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16mf2: ; CHECK: # %bb.0: # %entry @@ -238,8 +206,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i16mf2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16mf2: ; CHECK: # %bb.0: # %entry @@ -252,8 +218,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i16m1( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m1: ; CHECK: # %bb.0: # %entry @@ -266,8 +230,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i16m1( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m1: ; CHECK: # %bb.0: # %entry @@ -280,8 +242,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i16m2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m2: ; CHECK: # %bb.0: # %entry @@ -294,8 +254,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i16m2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m2: ; CHECK: # %bb.0: # %entry @@ -308,8 +266,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i16m4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m4: ; CHECK: # %bb.0: # %entry @@ -322,8 +278,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i16m4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m4: ; CHECK: # %bb.0: # %entry @@ -336,8 +290,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i16m8( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m8: ; CHECK: # %bb.0: # %entry @@ -350,8 +302,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i16m8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m8: ; CHECK: # %bb.0: # %entry @@ -364,8 +314,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv32i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i32mf2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32mf2: ; CHECK: # %bb.0: # %entry @@ -378,8 +326,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i32mf2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32mf2: ; CHECK: # %bb.0: # %entry @@ -392,8 +338,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i32m1( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m1: ; CHECK: # %bb.0: # %entry @@ -406,8 +350,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i32m1( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m1: ; CHECK: # %bb.0: # %entry @@ -420,8 +362,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i32m2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m2: ; CHECK: # %bb.0: # %entry @@ -434,8 +374,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i32m2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m2: ; CHECK: # %bb.0: # %entry @@ -448,8 +386,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i32m4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m4: ; CHECK: # %bb.0: # %entry @@ -462,8 +398,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i32m4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m4: ; CHECK: # %bb.0: # %entry @@ -476,8 +410,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i32m8( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m8: ; CHECK: # %bb.0: # %entry @@ -490,8 +422,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i32m8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m8: ; CHECK: # %bb.0: # %entry @@ -504,8 +434,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i64m1( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m1: ; CHECK: # %bb.0: # %entry @@ -518,8 +446,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i64m1( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m1: ; CHECK: # %bb.0: # %entry @@ -532,8 +458,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i64.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i64m2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m2: ; CHECK: # %bb.0: # %entry @@ -546,8 +470,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i64m2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m2: ; CHECK: # %bb.0: # %entry @@ -560,8 +482,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i64.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i64m4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m4: ; CHECK: # %bb.0: # %entry @@ -574,8 +494,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i64m4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m4: ; CHECK: # %bb.0: # %entry @@ -588,8 +506,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i64.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i64m8( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m8: ; CHECK: # %bb.0: # %entry @@ -602,8 +518,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i64m8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m8: ; CHECK: # %bb.0: # %entry @@ -616,8 +530,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i64.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i8mf8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf8_m: ; CHECK: # %bb.0: # %entry @@ -630,8 +542,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i8mf8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf8_m: ; CHECK: # %bb.0: # %entry @@ -644,8 +554,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i8mf4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf4_m: ; CHECK: # %bb.0: # %entry @@ -658,8 +566,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i8mf4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf4_m: ; CHECK: # %bb.0: # %entry @@ -672,8 +578,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i8mf2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf2_m: ; CHECK: # %bb.0: # %entry @@ -686,8 +590,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i8mf2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf2_m: ; CHECK: # %bb.0: # %entry @@ -700,8 +602,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i8m1_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m1_m: ; CHECK: # %bb.0: # %entry @@ -714,8 +614,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i8m1_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m1_m: ; CHECK: # %bb.0: # %entry @@ -728,8 +626,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i8m2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m2_m: ; CHECK: # %bb.0: # %entry @@ -742,8 +638,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i8m2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m2_m: ; CHECK: # %bb.0: # %entry @@ -756,8 +650,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i8m4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m4_m: ; CHECK: # %bb.0: # %entry @@ -770,8 +662,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i8m4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m4_m: ; CHECK: # %bb.0: # %entry @@ -784,8 +674,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i8m8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m8_m: ; CHECK: # %bb.0: # %entry @@ -798,8 +686,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i8m8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m8_m: ; CHECK: # %bb.0: # %entry @@ -812,8 +698,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv64i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i16mf4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16mf4_m: ; CHECK: # %bb.0: # %entry @@ -826,8 +710,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i16mf4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16mf4_m: ; CHECK: # %bb.0: # %entry @@ -840,8 +722,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i16mf2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16mf2_m: ; CHECK: # %bb.0: # %entry @@ -854,8 +734,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i16mf2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16mf2_m: ; CHECK: # %bb.0: # %entry @@ -868,8 +746,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i16m1_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m1_m: ; CHECK: # %bb.0: # %entry @@ -882,8 +758,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i16m1_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m1_m: ; CHECK: # %bb.0: # %entry @@ -896,8 +770,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i16m2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m2_m: ; CHECK: # %bb.0: # %entry @@ -910,8 +782,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i16m2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m2_m: ; CHECK: # %bb.0: # %entry @@ -924,8 +794,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i16m4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m4_m: ; CHECK: # %bb.0: # %entry @@ -938,8 +806,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i16m4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m4_m: ; CHECK: # %bb.0: # %entry @@ -952,8 +818,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i16m8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m8_m: ; CHECK: # %bb.0: # %entry @@ -966,8 +830,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i16m8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m8_m: ; CHECK: # %bb.0: # %entry @@ -980,8 +842,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i32mf2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32mf2_m: ; CHECK: # %bb.0: # %entry @@ -994,8 +854,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i32mf2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32mf2_m: ; CHECK: # %bb.0: # %entry @@ -1008,8 +866,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i32m1_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m1_m: ; CHECK: # %bb.0: # %entry @@ -1022,8 +878,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i32m1_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m1_m: ; CHECK: # %bb.0: # %entry @@ -1036,8 +890,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i32m2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m2_m: ; CHECK: # %bb.0: # %entry @@ -1050,8 +902,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i32m2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m2_m: ; CHECK: # %bb.0: # %entry @@ -1064,8 +914,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i32m4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m4_m: ; CHECK: # %bb.0: # %entry @@ -1078,8 +926,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i32m4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m4_m: ; CHECK: # %bb.0: # %entry @@ -1092,8 +938,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i32m8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m8_m: ; CHECK: # %bb.0: # %entry @@ -1106,8 +950,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i32m8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m8_m: ; CHECK: # %bb.0: # %entry @@ -1120,8 +962,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i64m1_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m1_m: ; CHECK: # %bb.0: # %entry @@ -1134,8 +974,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i64m1_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m1_m: ; CHECK: # %bb.0: # %entry @@ -1148,8 +986,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i64m2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m2_m: ; CHECK: # %bb.0: # %entry @@ -1162,8 +998,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i64m2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m2_m: ; CHECK: # %bb.0: # %entry @@ -1176,8 +1010,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i64m4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m4_m: ; CHECK: # %bb.0: # %entry @@ -1190,8 +1022,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i64m4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m4_m: ; CHECK: # %bb.0: # %entry @@ -1204,8 +1034,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i64m8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m8_m: ; CHECK: # %bb.0: # %entry @@ -1218,8 +1046,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i64m8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m8_m: ; CHECK: # %bb.0: # %entry @@ -1232,4 +1058,3 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll index 0c2cdff65776e..58703158e58a0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll @@ -14,8 +14,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i8.nxv1i8.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u8mf8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf8: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u8mf4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf4: ; CHECK: # %bb.0: # %entry @@ -42,8 +38,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i8.nxv2i8.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u8mf4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf4: ; CHECK: # %bb.0: # %entry @@ -56,8 +50,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u8mf2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf2: ; CHECK: # %bb.0: # %entry @@ -70,8 +62,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i8.nxv4i8.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u8mf2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf2: ; CHECK: # %bb.0: # %entry @@ -84,8 +74,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u8m1( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m1: ; CHECK: # %bb.0: # %entry @@ -98,8 +86,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i8.nxv8i8.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u8m1( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m1: ; CHECK: # %bb.0: # %entry @@ -112,8 +98,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u8m2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m2: ; CHECK: # %bb.0: # %entry @@ -126,8 +110,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i8.nxv16i8.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u8m2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m2: ; CHECK: # %bb.0: # %entry @@ -140,8 +122,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u8m4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m4: ; CHECK: # %bb.0: # %entry @@ -154,8 +134,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv32i8.nxv32i8.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u8m4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m4: ; CHECK: # %bb.0: # %entry @@ -168,8 +146,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv32i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u8m8( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m8: ; CHECK: # %bb.0: # %entry @@ -182,8 +158,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv64i8.nxv64i8.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u8m8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m8: ; CHECK: # %bb.0: # %entry @@ -196,8 +170,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv64i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u16mf4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16mf4: ; CHECK: # %bb.0: # %entry @@ -210,8 +182,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i16.nxv1i16.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u16mf4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16mf4: ; CHECK: # %bb.0: # %entry @@ -224,8 +194,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u16mf2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16mf2: ; CHECK: # %bb.0: # %entry @@ -238,8 +206,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i16.nxv2i16.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u16mf2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16mf2: ; CHECK: # %bb.0: # %entry @@ -252,8 +218,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u16m1( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m1: ; CHECK: # %bb.0: # %entry @@ -266,8 +230,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i16.nxv4i16.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u16m1( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m1: ; CHECK: # %bb.0: # %entry @@ -280,8 +242,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u16m2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m2: ; CHECK: # %bb.0: # %entry @@ -294,8 +254,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i16.nxv8i16.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u16m2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m2: ; CHECK: # %bb.0: # %entry @@ -308,8 +266,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u16m4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m4: ; CHECK: # %bb.0: # %entry @@ -322,8 +278,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i16.nxv16i16.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u16m4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m4: ; CHECK: # %bb.0: # %entry @@ -336,8 +290,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u16m8( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m8: ; CHECK: # %bb.0: # %entry @@ -350,8 +302,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv32i16.nxv32i16.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u16m8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m8: ; CHECK: # %bb.0: # %entry @@ -364,8 +314,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv32i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u32mf2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32mf2: ; CHECK: # %bb.0: # %entry @@ -378,8 +326,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i32.nxv1i32.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u32mf2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32mf2: ; CHECK: # %bb.0: # %entry @@ -392,8 +338,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u32m1( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m1: ; CHECK: # %bb.0: # %entry @@ -406,8 +350,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i32.nxv2i32.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u32m1( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m1: ; CHECK: # %bb.0: # %entry @@ -420,8 +362,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u32m2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m2: ; CHECK: # %bb.0: # %entry @@ -434,8 +374,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i32.nxv4i32.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u32m2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m2: ; CHECK: # %bb.0: # %entry @@ -448,8 +386,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u32m4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m4: ; CHECK: # %bb.0: # %entry @@ -462,8 +398,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i32.nxv8i32.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u32m4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m4: ; CHECK: # %bb.0: # %entry @@ -476,8 +410,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u32m8( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m8: ; CHECK: # %bb.0: # %entry @@ -490,8 +422,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i32.nxv16i32.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u32m8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m8: ; CHECK: # %bb.0: # %entry @@ -504,8 +434,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u64m1( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m1: ; CHECK: # %bb.0: # %entry @@ -518,8 +446,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i64.nxv1i64.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u64m1( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m1: ; CHECK: # %bb.0: # %entry @@ -532,8 +458,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i64.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u64m2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m2: ; CHECK: # %bb.0: # %entry @@ -546,8 +470,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i64.nxv2i64.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u64m2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m2: ; CHECK: # %bb.0: # %entry @@ -560,8 +482,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i64.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u64m4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m4: ; CHECK: # %bb.0: # %entry @@ -574,8 +494,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i64.nxv4i64.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u64m4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m4: ; CHECK: # %bb.0: # %entry @@ -588,8 +506,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i64.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u64m8( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m8: ; CHECK: # %bb.0: # %entry @@ -602,8 +518,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i64.nxv8i64.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u64m8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m8: ; CHECK: # %bb.0: # %entry @@ -616,8 +530,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i64.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u8mf8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf8_m: ; CHECK: # %bb.0: # %entry @@ -630,8 +542,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u8mf8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf8_m: ; CHECK: # %bb.0: # %entry @@ -644,8 +554,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u8mf4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf4_m: ; CHECK: # %bb.0: # %entry @@ -658,8 +566,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u8mf4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf4_m: ; CHECK: # %bb.0: # %entry @@ -672,8 +578,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u8mf2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf2_m: ; CHECK: # %bb.0: # %entry @@ -686,8 +590,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u8mf2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf2_m: ; CHECK: # %bb.0: # %entry @@ -700,8 +602,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u8m1_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m1_m: ; CHECK: # %bb.0: # %entry @@ -714,8 +614,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u8m1_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m1_m: ; CHECK: # %bb.0: # %entry @@ -728,8 +626,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u8m2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m2_m: ; CHECK: # %bb.0: # %entry @@ -742,8 +638,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u8m2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m2_m: ; CHECK: # %bb.0: # %entry @@ -756,8 +650,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u8m4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m4_m: ; CHECK: # %bb.0: # %entry @@ -770,8 +662,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u8m4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m4_m: ; CHECK: # %bb.0: # %entry @@ -784,8 +674,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u8m8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m8_m: ; CHECK: # %bb.0: # %entry @@ -798,8 +686,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u8m8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m8_m: ; CHECK: # %bb.0: # %entry @@ -812,8 +698,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv64i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u16mf4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16mf4_m: ; CHECK: # %bb.0: # %entry @@ -826,8 +710,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u16mf4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16mf4_m: ; CHECK: # %bb.0: # %entry @@ -840,8 +722,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u16mf2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16mf2_m: ; CHECK: # %bb.0: # %entry @@ -854,8 +734,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u16mf2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16mf2_m: ; CHECK: # %bb.0: # %entry @@ -868,8 +746,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u16m1_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m1_m: ; CHECK: # %bb.0: # %entry @@ -882,8 +758,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u16m1_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m1_m: ; CHECK: # %bb.0: # %entry @@ -896,8 +770,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u16m2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m2_m: ; CHECK: # %bb.0: # %entry @@ -910,8 +782,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u16m2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m2_m: ; CHECK: # %bb.0: # %entry @@ -924,8 +794,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u16m4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m4_m: ; CHECK: # %bb.0: # %entry @@ -938,8 +806,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u16m4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m4_m: ; CHECK: # %bb.0: # %entry @@ -952,8 +818,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u16m8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m8_m: ; CHECK: # %bb.0: # %entry @@ -966,8 +830,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u16m8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m8_m: ; CHECK: # %bb.0: # %entry @@ -980,8 +842,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u32mf2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32mf2_m: ; CHECK: # %bb.0: # %entry @@ -994,8 +854,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u32mf2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32mf2_m: ; CHECK: # %bb.0: # %entry @@ -1008,8 +866,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u32m1_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m1_m: ; CHECK: # %bb.0: # %entry @@ -1022,8 +878,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u32m1_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m1_m: ; CHECK: # %bb.0: # %entry @@ -1036,8 +890,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u32m2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m2_m: ; CHECK: # %bb.0: # %entry @@ -1050,8 +902,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u32m2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m2_m: ; CHECK: # %bb.0: # %entry @@ -1064,8 +914,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u32m4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m4_m: ; CHECK: # %bb.0: # %entry @@ -1078,8 +926,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u32m4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m4_m: ; CHECK: # %bb.0: # %entry @@ -1092,8 +938,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u32m8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m8_m: ; CHECK: # %bb.0: # %entry @@ -1106,8 +950,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u32m8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m8_m: ; CHECK: # %bb.0: # %entry @@ -1120,8 +962,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u64m1_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m1_m: ; CHECK: # %bb.0: # %entry @@ -1134,8 +974,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u64m1_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m1_m: ; CHECK: # %bb.0: # %entry @@ -1148,8 +986,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u64m2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m2_m: ; CHECK: # %bb.0: # %entry @@ -1162,8 +998,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u64m2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m2_m: ; CHECK: # %bb.0: # %entry @@ -1176,8 +1010,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u64m4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m4_m: ; CHECK: # %bb.0: # %entry @@ -1190,8 +1022,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u64m4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m4_m: ; CHECK: # %bb.0: # %entry @@ -1204,8 +1034,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u64m8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m8_m: ; CHECK: # %bb.0: # %entry @@ -1218,8 +1046,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u64m8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m8_m: ; CHECK: # %bb.0: # %entry @@ -1232,4 +1058,3 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll index fe80854bb2646..66308dc89844d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll @@ -14,8 +14,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u8mf8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf8: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u8mf4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf4: ; CHECK: # %bb.0: # %entry @@ -42,8 +38,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u8mf4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf4: ; CHECK: # %bb.0: # %entry @@ -56,8 +50,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u8mf2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf2: ; CHECK: # %bb.0: # %entry @@ -70,8 +62,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u8mf2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf2: ; CHECK: # %bb.0: # %entry @@ -84,8 +74,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u8m1( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m1: ; CHECK: # %bb.0: # %entry @@ -98,8 +86,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u8m1( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m1: ; CHECK: # %bb.0: # %entry @@ -112,8 +98,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u8m2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m2: ; CHECK: # %bb.0: # %entry @@ -126,8 +110,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u8m2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m2: ; CHECK: # %bb.0: # %entry @@ -140,8 +122,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u8m4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m4: ; CHECK: # %bb.0: # %entry @@ -154,8 +134,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u8m4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m4: ; CHECK: # %bb.0: # %entry @@ -168,8 +146,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv32i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u8m8( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m8: ; CHECK: # %bb.0: # %entry @@ -182,8 +158,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u8m8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m8: ; CHECK: # %bb.0: # %entry @@ -196,8 +170,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv64i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u16mf4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16mf4: ; CHECK: # %bb.0: # %entry @@ -210,8 +182,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u16mf4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16mf4: ; CHECK: # %bb.0: # %entry @@ -224,8 +194,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u16mf2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16mf2: ; CHECK: # %bb.0: # %entry @@ -238,8 +206,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u16mf2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16mf2: ; CHECK: # %bb.0: # %entry @@ -252,8 +218,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u16m1( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m1: ; CHECK: # %bb.0: # %entry @@ -266,8 +230,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u16m1( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m1: ; CHECK: # %bb.0: # %entry @@ -280,8 +242,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u16m2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m2: ; CHECK: # %bb.0: # %entry @@ -294,8 +254,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u16m2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m2: ; CHECK: # %bb.0: # %entry @@ -308,8 +266,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u16m4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m4: ; CHECK: # %bb.0: # %entry @@ -322,8 +278,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u16m4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m4: ; CHECK: # %bb.0: # %entry @@ -336,8 +290,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u16m8( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m8: ; CHECK: # %bb.0: # %entry @@ -350,8 +302,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u16m8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m8: ; CHECK: # %bb.0: # %entry @@ -364,8 +314,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv32i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u32mf2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32mf2: ; CHECK: # %bb.0: # %entry @@ -378,8 +326,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u32mf2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32mf2: ; CHECK: # %bb.0: # %entry @@ -392,8 +338,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u32m1( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m1: ; CHECK: # %bb.0: # %entry @@ -406,8 +350,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u32m1( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m1: ; CHECK: # %bb.0: # %entry @@ -420,8 +362,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u32m2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m2: ; CHECK: # %bb.0: # %entry @@ -434,8 +374,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u32m2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m2: ; CHECK: # %bb.0: # %entry @@ -448,8 +386,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u32m4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m4: ; CHECK: # %bb.0: # %entry @@ -462,8 +398,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u32m4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m4: ; CHECK: # %bb.0: # %entry @@ -476,8 +410,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u32m8( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m8: ; CHECK: # %bb.0: # %entry @@ -490,8 +422,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u32m8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m8: ; CHECK: # %bb.0: # %entry @@ -504,8 +434,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u64m1( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m1: ; CHECK: # %bb.0: # %entry @@ -518,8 +446,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u64m1( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m1: ; CHECK: # %bb.0: # %entry @@ -532,8 +458,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i64.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u64m2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m2: ; CHECK: # %bb.0: # %entry @@ -546,8 +470,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u64m2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m2: ; CHECK: # %bb.0: # %entry @@ -560,8 +482,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i64.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u64m4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m4: ; CHECK: # %bb.0: # %entry @@ -574,8 +494,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u64m4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m4: ; CHECK: # %bb.0: # %entry @@ -588,8 +506,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i64.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u64m8( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m8: ; CHECK: # %bb.0: # %entry @@ -602,8 +518,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u64m8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m8: ; CHECK: # %bb.0: # %entry @@ -616,8 +530,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i64.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u8mf8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf8_m: ; CHECK: # %bb.0: # %entry @@ -630,8 +542,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u8mf8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf8_m: ; CHECK: # %bb.0: # %entry @@ -644,8 +554,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u8mf4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf4_m: ; CHECK: # %bb.0: # %entry @@ -658,8 +566,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u8mf4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf4_m: ; CHECK: # %bb.0: # %entry @@ -672,8 +578,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u8mf2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf2_m: ; CHECK: # %bb.0: # %entry @@ -686,8 +590,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u8mf2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf2_m: ; CHECK: # %bb.0: # %entry @@ -700,8 +602,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u8m1_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m1_m: ; CHECK: # %bb.0: # %entry @@ -714,8 +614,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u8m1_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m1_m: ; CHECK: # %bb.0: # %entry @@ -728,8 +626,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u8m2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m2_m: ; CHECK: # %bb.0: # %entry @@ -742,8 +638,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u8m2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m2_m: ; CHECK: # %bb.0: # %entry @@ -756,8 +650,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u8m4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m4_m: ; CHECK: # %bb.0: # %entry @@ -770,8 +662,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u8m4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m4_m: ; CHECK: # %bb.0: # %entry @@ -784,8 +674,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u8m8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m8_m: ; CHECK: # %bb.0: # %entry @@ -798,8 +686,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u8m8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m8_m: ; CHECK: # %bb.0: # %entry @@ -812,8 +698,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u16mf4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16mf4_m: ; CHECK: # %bb.0: # %entry @@ -826,8 +710,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u16mf4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16mf4_m: ; CHECK: # %bb.0: # %entry @@ -840,8 +722,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u16mf2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16mf2_m: ; CHECK: # %bb.0: # %entry @@ -854,8 +734,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u16mf2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16mf2_m: ; CHECK: # %bb.0: # %entry @@ -868,8 +746,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u16m1_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m1_m: ; CHECK: # %bb.0: # %entry @@ -882,8 +758,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u16m1_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m1_m: ; CHECK: # %bb.0: # %entry @@ -896,8 +770,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u16m2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m2_m: ; CHECK: # %bb.0: # %entry @@ -910,8 +782,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u16m2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m2_m: ; CHECK: # %bb.0: # %entry @@ -924,8 +794,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u16m4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m4_m: ; CHECK: # %bb.0: # %entry @@ -938,8 +806,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u16m4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m4_m: ; CHECK: # %bb.0: # %entry @@ -952,8 +818,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u16m8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m8_m: ; CHECK: # %bb.0: # %entry @@ -966,8 +830,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u16m8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m8_m: ; CHECK: # %bb.0: # %entry @@ -980,8 +842,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u32mf2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32mf2_m: ; CHECK: # %bb.0: # %entry @@ -994,8 +854,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u32mf2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32mf2_m: ; CHECK: # %bb.0: # %entry @@ -1008,8 +866,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u32m1_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m1_m: ; CHECK: # %bb.0: # %entry @@ -1022,8 +878,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u32m1_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m1_m: ; CHECK: # %bb.0: # %entry @@ -1036,8 +890,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u32m2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m2_m: ; CHECK: # %bb.0: # %entry @@ -1050,8 +902,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u32m2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m2_m: ; CHECK: # %bb.0: # %entry @@ -1064,8 +914,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u32m4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m4_m: ; CHECK: # %bb.0: # %entry @@ -1078,8 +926,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u32m4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m4_m: ; CHECK: # %bb.0: # %entry @@ -1092,8 +938,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u32m8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m8_m: ; CHECK: # %bb.0: # %entry @@ -1106,8 +950,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u32m8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m8_m: ; CHECK: # %bb.0: # %entry @@ -1120,8 +962,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u64m1_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m1_m: ; CHECK: # %bb.0: # %entry @@ -1134,8 +974,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u64m1_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m1_m: ; CHECK: # %bb.0: # %entry @@ -1148,8 +986,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u64m2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m2_m: ; CHECK: # %bb.0: # %entry @@ -1162,8 +998,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u64m2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m2_m: ; CHECK: # %bb.0: # %entry @@ -1176,8 +1010,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u64m4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m4_m: ; CHECK: # %bb.0: # %entry @@ -1190,8 +1022,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u64m4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m4_m: ; CHECK: # %bb.0: # %entry @@ -1204,8 +1034,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u64m8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m8_m: ; CHECK: # %bb.0: # %entry @@ -1218,8 +1046,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u64m8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m8_m: ; CHECK: # %bb.0: # %entry @@ -1232,4 +1058,3 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll index a0a583c046c49..2458312a397e6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry @@ -27,9 +24,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -52,9 +46,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -77,9 +68,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -102,9 +90,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -127,9 +112,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -152,9 +134,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry @@ -177,9 +156,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -202,9 +178,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -227,9 +200,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -252,9 +222,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -277,9 +244,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry @@ -302,9 +266,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -327,9 +288,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -352,9 +310,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -377,9 +332,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -402,9 +354,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry @@ -427,9 +376,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -452,9 +398,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -477,9 +420,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -502,9 +442,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry @@ -527,9 +464,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -552,9 +486,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -577,9 +508,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -602,9 +530,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry @@ -627,9 +552,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -652,9 +574,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -677,9 +596,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -702,9 +618,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry @@ -727,9 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -752,9 +662,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -777,9 +684,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -802,8 +706,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -826,8 +728,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -850,8 +750,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -874,8 +772,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -898,8 +794,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -922,8 +816,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -946,8 +838,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -970,8 +860,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -994,8 +882,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1018,8 +904,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -1042,8 +926,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1066,8 +948,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1090,8 +970,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1114,8 +992,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -1138,8 +1014,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1162,8 +1036,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1186,8 +1058,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -1210,8 +1080,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1234,8 +1102,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1258,8 +1124,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -1282,8 +1146,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1306,8 +1168,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1330,8 +1190,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1212,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1378,8 +1234,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1402,8 +1256,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -1426,8 +1278,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1450,8 +1300,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1474,8 +1322,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1498,8 +1344,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1522,8 +1366,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1546,8 +1388,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1570,8 +1410,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1594,8 +1432,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1618,8 +1454,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1642,8 +1476,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1498,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1690,8 +1520,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1714,8 +1542,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1738,8 +1564,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1762,8 +1586,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1786,8 +1608,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1810,8 +1630,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1834,8 +1652,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1858,8 +1674,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1882,8 +1696,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1906,8 +1718,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1930,8 +1740,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1954,8 +1762,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1784,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2002,8 +1806,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2026,8 +1828,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2050,8 +1850,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2074,8 +1872,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2098,7 +1894,6 @@ entry: ret void } - define void @test_vssseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -2121,7 +1916,6 @@ entry: ret void } - define void @test_vssseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2144,7 +1938,6 @@ entry: ret void } - define void @test_vssseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2167,7 +1960,6 @@ entry: ret void } - define void @test_vssseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2190,7 +1982,6 @@ entry: ret void } - define void @test_vssseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2213,7 +2004,6 @@ entry: ret void } - define void @test_vssseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -2236,7 +2026,6 @@ entry: ret void } - define void @test_vssseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2259,7 +2048,6 @@ entry: ret void } - define void @test_vssseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2282,7 +2070,6 @@ entry: ret void } - define void @test_vssseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2305,7 +2092,6 @@ entry: ret void } - define void @test_vssseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -2328,7 +2114,6 @@ entry: ret void } - define void @test_vssseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2351,7 +2136,6 @@ entry: ret void } - define void @test_vssseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2374,7 +2158,6 @@ entry: ret void } - define void @test_vssseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2397,7 +2180,6 @@ entry: ret void } - define void @test_vssseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -2420,7 +2202,6 @@ entry: ret void } - define void @test_vssseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2443,7 +2224,6 @@ entry: ret void } - define void @test_vssseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2466,7 +2246,6 @@ entry: ret void } - define void @test_vssseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -2489,7 +2268,6 @@ entry: ret void } - define void @test_vssseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2512,7 +2290,6 @@ entry: ret void } - define void @test_vssseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2535,7 +2312,6 @@ entry: ret void } - define void @test_vssseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -2558,7 +2334,6 @@ entry: ret void } - define void @test_vssseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2581,7 +2356,6 @@ entry: ret void } - define void @test_vssseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2604,7 +2378,6 @@ entry: ret void } - define void @test_vssseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -2627,7 +2400,6 @@ entry: ret void } - define void @test_vssseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -2650,7 +2422,6 @@ entry: ret void } - define void @test_vssseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2673,7 +2444,6 @@ entry: ret void } - define void @test_vssseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2696,7 +2466,6 @@ entry: ret void } - define void @test_vssseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2719,7 +2488,6 @@ entry: ret void } - define void @test_vssseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2742,7 +2510,6 @@ entry: ret void } - define void @test_vssseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2765,7 +2532,6 @@ entry: ret void } - define void @test_vssseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2788,7 +2554,6 @@ entry: ret void } - define void @test_vssseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2811,7 +2576,6 @@ entry: ret void } - define void @test_vssseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2834,7 +2598,6 @@ entry: ret void } - define void @test_vssseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2857,7 +2620,6 @@ entry: ret void } - define void @test_vssseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2880,7 +2642,6 @@ entry: ret void } - define void @test_vssseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2903,7 +2664,6 @@ entry: ret void } - define void @test_vssseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2926,7 +2686,6 @@ entry: ret void } - define void @test_vssseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2949,7 +2708,6 @@ entry: ret void } - define void @test_vssseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2972,7 +2730,6 @@ entry: ret void } - define void @test_vssseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2995,7 +2752,6 @@ entry: ret void } - define void @test_vssseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3018,7 +2774,6 @@ entry: ret void } - define void @test_vssseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3041,7 +2796,6 @@ entry: ret void } - define void @test_vssseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3064,7 +2818,6 @@ entry: ret void } - define void @test_vssseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3087,7 +2840,6 @@ entry: ret void } - define void @test_vssseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3110,7 +2862,6 @@ entry: ret void } - define void @test_vssseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3133,7 +2884,6 @@ entry: ret void } - define void @test_vssseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3156,7 +2906,6 @@ entry: ret void } - define void @test_vssseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3179,7 +2928,6 @@ entry: ret void } - define void @test_vssseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3202,7 +2950,6 @@ entry: ret void } - define void @test_vssseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3225,7 +2972,6 @@ entry: ret void } - define void @test_vssseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3248,7 +2994,6 @@ entry: ret void } - define void @test_vssseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3271,7 +3016,6 @@ entry: ret void } - define void @test_vssseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3294,7 +3038,6 @@ entry: ret void } - define void @test_vssseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3317,7 +3060,6 @@ entry: ret void } - define void @test_vssseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3340,7 +3082,6 @@ entry: ret void } - define void @test_vssseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -3363,7 +3104,6 @@ entry: ret void } - define void @test_vssseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -3386,7 +3126,6 @@ entry: ret void } - define void @test_vssseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3409,7 +3148,6 @@ entry: ret void } - define void @test_vssseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3432,7 +3170,6 @@ entry: ret void } - define void @test_vssseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3455,7 +3192,6 @@ entry: ret void } - define void @test_vssseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -3478,7 +3214,6 @@ entry: ret void } - define void @test_vssseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3501,7 +3236,6 @@ entry: ret void } - define void @test_vssseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3524,7 +3258,6 @@ entry: ret void } - define void @test_vssseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3547,7 +3280,6 @@ entry: ret void } - define void @test_vssseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -3570,7 +3302,6 @@ entry: ret void } - define void @test_vssseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3593,7 +3324,6 @@ entry: ret void } - define void @test_vssseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3616,7 +3346,6 @@ entry: ret void } - define void @test_vssseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3639,7 +3368,6 @@ entry: ret void } - define void @test_vssseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -3662,7 +3390,6 @@ entry: ret void } - define void @test_vssseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3685,7 +3412,6 @@ entry: ret void } - define void @test_vssseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3708,7 +3434,6 @@ entry: ret void } - define void @test_vssseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -3731,7 +3456,6 @@ entry: ret void } - define void @test_vssseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -3754,7 +3478,6 @@ entry: ret void } - define void @test_vssseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3777,7 +3500,6 @@ entry: ret void } - define void @test_vssseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -3800,7 +3522,6 @@ entry: ret void } - define void @test_vssseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3823,7 +3544,6 @@ entry: ret void } - define void @test_vssseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3846,7 +3566,6 @@ entry: ret void } - define void @test_vssseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -3869,7 +3588,6 @@ entry: ret void } - define void @test_vssseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3892,7 +3610,6 @@ entry: ret void } - define void @test_vssseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll index bdd809841d2d6..30c8090325845 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry @@ -27,9 +24,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -52,9 +46,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -77,9 +68,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -102,9 +90,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -127,9 +112,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -152,9 +134,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry @@ -177,9 +156,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -202,9 +178,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -227,9 +200,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -252,9 +222,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -277,9 +244,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry @@ -302,9 +266,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -327,9 +288,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -352,9 +310,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -377,9 +332,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -402,9 +354,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry @@ -427,9 +376,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -452,9 +398,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -477,9 +420,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -502,9 +442,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry @@ -527,9 +464,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -552,9 +486,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -577,9 +508,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -602,9 +530,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry @@ -627,9 +552,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -652,9 +574,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -677,9 +596,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -702,9 +618,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry @@ -727,9 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -752,9 +662,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -777,9 +684,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -802,8 +706,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -826,8 +728,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -850,8 +750,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -874,8 +772,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -898,8 +794,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -922,8 +816,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -946,8 +838,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -970,8 +860,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -994,8 +882,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1018,8 +904,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -1042,8 +926,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1066,8 +948,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1090,8 +970,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1114,8 +992,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -1138,8 +1014,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1162,8 +1036,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1186,8 +1058,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -1210,8 +1080,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1234,8 +1102,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1258,8 +1124,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -1282,8 +1146,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1306,8 +1168,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1330,8 +1190,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1212,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1378,8 +1234,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1402,8 +1256,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -1426,8 +1278,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1450,8 +1300,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1474,8 +1322,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1498,8 +1344,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1522,8 +1366,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1546,8 +1388,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1570,8 +1410,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1594,8 +1432,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1618,8 +1454,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1642,8 +1476,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1498,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1690,8 +1520,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1714,8 +1542,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1738,8 +1564,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1762,8 +1586,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1786,8 +1608,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1810,8 +1630,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1834,8 +1652,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1858,8 +1674,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1882,8 +1696,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1906,8 +1718,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1930,8 +1740,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1954,8 +1762,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1784,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2002,8 +1806,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2026,8 +1828,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2050,8 +1850,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2074,8 +1872,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2098,7 +1894,6 @@ entry: ret void } - define void @test_vssseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -2121,7 +1916,6 @@ entry: ret void } - define void @test_vssseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2144,7 +1938,6 @@ entry: ret void } - define void @test_vssseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2167,7 +1960,6 @@ entry: ret void } - define void @test_vssseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2190,7 +1982,6 @@ entry: ret void } - define void @test_vssseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2213,7 +2004,6 @@ entry: ret void } - define void @test_vssseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -2236,7 +2026,6 @@ entry: ret void } - define void @test_vssseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2259,7 +2048,6 @@ entry: ret void } - define void @test_vssseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2282,7 +2070,6 @@ entry: ret void } - define void @test_vssseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2305,7 +2092,6 @@ entry: ret void } - define void @test_vssseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -2328,7 +2114,6 @@ entry: ret void } - define void @test_vssseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2351,7 +2136,6 @@ entry: ret void } - define void @test_vssseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2374,7 +2158,6 @@ entry: ret void } - define void @test_vssseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2397,7 +2180,6 @@ entry: ret void } - define void @test_vssseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -2420,7 +2202,6 @@ entry: ret void } - define void @test_vssseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2443,7 +2224,6 @@ entry: ret void } - define void @test_vssseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2466,7 +2246,6 @@ entry: ret void } - define void @test_vssseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -2489,7 +2268,6 @@ entry: ret void } - define void @test_vssseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2512,7 +2290,6 @@ entry: ret void } - define void @test_vssseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2535,7 +2312,6 @@ entry: ret void } - define void @test_vssseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -2558,7 +2334,6 @@ entry: ret void } - define void @test_vssseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2581,7 +2356,6 @@ entry: ret void } - define void @test_vssseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2604,7 +2378,6 @@ entry: ret void } - define void @test_vssseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -2627,7 +2400,6 @@ entry: ret void } - define void @test_vssseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -2650,7 +2422,6 @@ entry: ret void } - define void @test_vssseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2673,7 +2444,6 @@ entry: ret void } - define void @test_vssseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2696,7 +2466,6 @@ entry: ret void } - define void @test_vssseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2719,7 +2488,6 @@ entry: ret void } - define void @test_vssseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2742,7 +2510,6 @@ entry: ret void } - define void @test_vssseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2765,7 +2532,6 @@ entry: ret void } - define void @test_vssseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2788,7 +2554,6 @@ entry: ret void } - define void @test_vssseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2811,7 +2576,6 @@ entry: ret void } - define void @test_vssseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2834,7 +2598,6 @@ entry: ret void } - define void @test_vssseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2857,7 +2620,6 @@ entry: ret void } - define void @test_vssseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2880,7 +2642,6 @@ entry: ret void } - define void @test_vssseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2903,7 +2664,6 @@ entry: ret void } - define void @test_vssseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2926,7 +2686,6 @@ entry: ret void } - define void @test_vssseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2949,7 +2708,6 @@ entry: ret void } - define void @test_vssseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2972,7 +2730,6 @@ entry: ret void } - define void @test_vssseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2995,7 +2752,6 @@ entry: ret void } - define void @test_vssseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3018,7 +2774,6 @@ entry: ret void } - define void @test_vssseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3041,7 +2796,6 @@ entry: ret void } - define void @test_vssseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3064,7 +2818,6 @@ entry: ret void } - define void @test_vssseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3087,7 +2840,6 @@ entry: ret void } - define void @test_vssseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3110,7 +2862,6 @@ entry: ret void } - define void @test_vssseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3133,7 +2884,6 @@ entry: ret void } - define void @test_vssseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3156,7 +2906,6 @@ entry: ret void } - define void @test_vssseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3179,7 +2928,6 @@ entry: ret void } - define void @test_vssseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3202,7 +2950,6 @@ entry: ret void } - define void @test_vssseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3225,7 +2972,6 @@ entry: ret void } - define void @test_vssseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3248,7 +2994,6 @@ entry: ret void } - define void @test_vssseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3271,7 +3016,6 @@ entry: ret void } - define void @test_vssseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3294,7 +3038,6 @@ entry: ret void } - define void @test_vssseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3317,7 +3060,6 @@ entry: ret void } - define void @test_vssseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3340,7 +3082,6 @@ entry: ret void } - define void @test_vssseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -3363,7 +3104,6 @@ entry: ret void } - define void @test_vssseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -3386,7 +3126,6 @@ entry: ret void } - define void @test_vssseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3409,7 +3148,6 @@ entry: ret void } - define void @test_vssseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3432,7 +3170,6 @@ entry: ret void } - define void @test_vssseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3455,7 +3192,6 @@ entry: ret void } - define void @test_vssseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -3478,7 +3214,6 @@ entry: ret void } - define void @test_vssseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3501,7 +3236,6 @@ entry: ret void } - define void @test_vssseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3524,7 +3258,6 @@ entry: ret void } - define void @test_vssseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3547,7 +3280,6 @@ entry: ret void } - define void @test_vssseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -3570,7 +3302,6 @@ entry: ret void } - define void @test_vssseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3593,7 +3324,6 @@ entry: ret void } - define void @test_vssseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3616,7 +3346,6 @@ entry: ret void } - define void @test_vssseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3639,7 +3368,6 @@ entry: ret void } - define void @test_vssseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -3662,7 +3390,6 @@ entry: ret void } - define void @test_vssseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3685,7 +3412,6 @@ entry: ret void } - define void @test_vssseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3708,7 +3434,6 @@ entry: ret void } - define void @test_vssseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -3731,7 +3456,6 @@ entry: ret void } - define void @test_vssseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -3754,7 +3478,6 @@ entry: ret void } - define void @test_vssseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3777,7 +3500,6 @@ entry: ret void } - define void @test_vssseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -3800,7 +3522,6 @@ entry: ret void } - define void @test_vssseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3823,7 +3544,6 @@ entry: ret void } - define void @test_vssseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3846,7 +3566,6 @@ entry: ret void } - define void @test_vssseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -3869,7 +3588,6 @@ entry: ret void } - define void @test_vssseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3892,7 +3610,6 @@ entry: ret void } - define void @test_vssseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll index 661eca171404f..837016bd41ac4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.ssub.sat.nxv1i8(, ) - define @ssub_nxv1i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv1i8_vv: ; CHECK: # %bb.0: @@ -39,8 +37,6 @@ define @ssub_nxv1i8_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv2i8(, ) - define @ssub_nxv2i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv2i8_vv: ; CHECK: # %bb.0: @@ -74,8 +70,6 @@ define @ssub_nxv2i8_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv4i8(, ) - define @ssub_nxv4i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv4i8_vv: ; CHECK: # %bb.0: @@ -109,8 +103,6 @@ define @ssub_nxv4i8_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv8i8(, ) - define @ssub_nxv8i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv8i8_vv: ; CHECK: # %bb.0: @@ -144,8 +136,6 @@ define @ssub_nxv8i8_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv16i8(, ) - define @ssub_nxv16i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv16i8_vv: ; CHECK: # %bb.0: @@ -179,8 +169,6 @@ define @ssub_nxv16i8_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv32i8(, ) - define @ssub_nxv32i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv32i8_vv: ; CHECK: # %bb.0: @@ -214,8 +202,6 @@ define @ssub_nxv32i8_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv64i8(, ) - define @ssub_nxv64i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv64i8_vv: ; CHECK: # %bb.0: @@ -249,8 +235,6 @@ define @ssub_nxv64i8_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv1i16(, ) - define @ssub_nxv1i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv1i16_vv: ; CHECK: # %bb.0: @@ -284,8 +268,6 @@ define @ssub_nxv1i16_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv2i16(, ) - define @ssub_nxv2i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv2i16_vv: ; CHECK: # %bb.0: @@ -319,8 +301,6 @@ define @ssub_nxv2i16_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv4i16(, ) - define @ssub_nxv4i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv4i16_vv: ; CHECK: # %bb.0: @@ -354,8 +334,6 @@ define @ssub_nxv4i16_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv8i16(, ) - define @ssub_nxv8i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv8i16_vv: ; CHECK: # %bb.0: @@ -389,8 +367,6 @@ define @ssub_nxv8i16_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv16i16(, ) - define @ssub_nxv16i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv16i16_vv: ; CHECK: # %bb.0: @@ -424,8 +400,6 @@ define @ssub_nxv16i16_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv32i16(, ) - define @ssub_nxv32i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv32i16_vv: ; CHECK: # %bb.0: @@ -459,8 +433,6 @@ define @ssub_nxv32i16_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv1i32(, ) - define @ssub_nxv1i32_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv1i32_vv: ; CHECK: # %bb.0: @@ -494,8 +466,6 @@ define @ssub_nxv1i32_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv2i32(, ) - define @ssub_nxv2i32_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv2i32_vv: ; CHECK: # %bb.0: @@ -529,8 +499,6 @@ define @ssub_nxv2i32_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv4i32(, ) - define @ssub_nxv4i32_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv4i32_vv: ; CHECK: # %bb.0: @@ -564,8 +532,6 @@ define @ssub_nxv4i32_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv8i32(, ) - define @ssub_nxv8i32_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv8i32_vv: ; CHECK: # %bb.0: @@ -599,8 +565,6 @@ define @ssub_nxv8i32_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv16i32(, ) - define @ssub_nxv16i32_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv16i32_vv: ; CHECK: # %bb.0: @@ -634,8 +598,6 @@ define @ssub_nxv16i32_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv1i64(, ) - define @ssub_nxv1i64_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv1i64_vv: ; CHECK: # %bb.0: @@ -683,8 +645,6 @@ define @ssub_nxv1i64_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv2i64(, ) - define @ssub_nxv2i64_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv2i64_vv: ; CHECK: # %bb.0: @@ -732,8 +692,6 @@ define @ssub_nxv2i64_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv4i64(, ) - define @ssub_nxv4i64_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv4i64_vv: ; CHECK: # %bb.0: @@ -781,8 +739,6 @@ define @ssub_nxv4i64_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv8i64(, ) - define @ssub_nxv8i64_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv8i64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll index ebf8d5eeb40bc..0ac2ef7e251c0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.ssub.sat.nxv8i7(, , , i32) - define @vssub_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vx_nxv8i7: ; CHECK: # %bb.0: @@ -24,8 +22,6 @@ define @vssub_vx_nxv8i7( %a, i7 signext %b, < ret %v } -declare @llvm.vp.ssub.sat.nxv1i8(, , , i32) - define @vssub_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv1i8: ; CHECK: # %bb.0: @@ -105,8 +101,6 @@ define @vssub_vi_nxv1i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ssub.sat.nxv2i8(, , , i32) - define @vssub_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv2i8: ; CHECK: # %bb.0: @@ -173,8 +167,6 @@ define @vssub_vi_nxv2i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ssub.sat.nxv3i8(, , , i32) - define @vssub_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv3i8: ; CHECK: # %bb.0: @@ -241,8 +233,6 @@ define @vssub_vi_nxv3i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ssub.sat.nxv4i8(, , , i32) - define @vssub_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv4i8: ; CHECK: # %bb.0: @@ -309,8 +299,6 @@ define @vssub_vi_nxv4i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ssub.sat.nxv8i8(, , , i32) - define @vssub_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv8i8: ; CHECK: # %bb.0: @@ -377,8 +365,6 @@ define @vssub_vi_nxv8i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ssub.sat.nxv16i8(, , , i32) - define @vssub_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv16i8: ; CHECK: # %bb.0: @@ -445,8 +431,6 @@ define @vssub_vi_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv32i8(, , , i32) - define @vssub_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv32i8: ; CHECK: # %bb.0: @@ -513,8 +497,6 @@ define @vssub_vi_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv64i8(, , , i32) - define @vssub_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv64i8: ; CHECK: # %bb.0: @@ -583,8 +565,6 @@ define @vssub_vi_nxv64i8_unmasked( %va, i32 ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.ssub.sat.nxv128i8(, , , i32) - define @vssub_vi_nxv128i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vi_nxv128i8: ; CHECK: # %bb.0: @@ -635,8 +615,6 @@ define @vssub_vi_nxv128i8_unmasked( %va, ret %v } -declare @llvm.vp.ssub.sat.nxv1i16(, , , i32) - define @vssub_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv1i16: ; CHECK: # %bb.0: @@ -703,8 +681,6 @@ define @vssub_vi_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv2i16(, , , i32) - define @vssub_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv2i16: ; CHECK: # %bb.0: @@ -771,8 +747,6 @@ define @vssub_vi_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv4i16(, , , i32) - define @vssub_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv4i16: ; CHECK: # %bb.0: @@ -839,8 +813,6 @@ define @vssub_vi_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv8i16(, , , i32) - define @vssub_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv8i16: ; CHECK: # %bb.0: @@ -907,8 +879,6 @@ define @vssub_vi_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv16i16(, , , i32) - define @vssub_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv16i16: ; CHECK: # %bb.0: @@ -975,8 +945,6 @@ define @vssub_vi_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.ssub.sat.nxv32i16(, , , i32) - define @vssub_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1043,8 +1011,6 @@ define @vssub_vi_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.ssub.sat.nxv1i32(, , , i32) - define @vssub_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1111,8 +1077,6 @@ define @vssub_vi_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv2i32(, , , i32) - define @vssub_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1179,8 +1143,6 @@ define @vssub_vi_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv4i32(, , , i32) - define @vssub_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1247,8 +1209,6 @@ define @vssub_vi_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv8i32(, , , i32) - define @vssub_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1315,8 +1275,6 @@ define @vssub_vi_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv16i32(, , , i32) - define @vssub_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1385,8 +1343,6 @@ define @vssub_vi_nxv16i32_unmasked( %va, ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.ssub.sat.nxv32i32(, , , i32) - define @vssub_vi_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vi_nxv32i32: ; CHECK: # %bb.0: @@ -1438,8 +1394,6 @@ define @vssub_vi_nxv32i32_unmasked( %va, ret %v } -declare @llvm.vp.ssub.sat.nxv1i64(, , , i32) - define @vssub_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1534,8 +1488,6 @@ define @vssub_vi_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv2i64(, , , i32) - define @vssub_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1630,8 +1582,6 @@ define @vssub_vi_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv4i64(, , , i32) - define @vssub_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1726,8 +1676,6 @@ define @vssub_vi_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv8i64(, , , i32) - define @vssub_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub.ll b/llvm/test/CodeGen/RISCV/rvv/vssub.ll index 0b00f6d801b4b..c0ae21f6e4025 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vssub.nxv1i8.nxv1i8( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv2i8.nxv2i8( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv4i8.nxv4i8( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv8i8.nxv8i8( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv16i8.nxv16i8( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv32i8.nxv32i8( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv64i8.nxv64i8( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i16.nxv1i16( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv2i16.nxv2i16( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv4i16.nxv4i16( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv8i16.nxv8i16( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv16i16.nxv16i16( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv32i16.nxv32i16( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i32.nxv1i32( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv2i32.nxv2i32( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv4i32.nxv4i32( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv8i32.nxv8i32( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv16i32.nxv16i32( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv2i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv4i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv8i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv16i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv32i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv64i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv2i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv4i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv8i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv16i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv32i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv2i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv4i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv8i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv16i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vssub_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vssub_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vssub_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll index ac6ae6811ccde..3fa74ab285bb1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.usub.sat.nxv1i8(, ) - define @usub_nxv1i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv1i8_vv: ; CHECK: # %bb.0: @@ -39,8 +37,6 @@ define @usub_nxv1i8_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv2i8(, ) - define @usub_nxv2i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv2i8_vv: ; CHECK: # %bb.0: @@ -74,8 +70,6 @@ define @usub_nxv2i8_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv4i8(, ) - define @usub_nxv4i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv4i8_vv: ; CHECK: # %bb.0: @@ -109,8 +103,6 @@ define @usub_nxv4i8_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv8i8(, ) - define @usub_nxv8i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv8i8_vv: ; CHECK: # %bb.0: @@ -144,8 +136,6 @@ define @usub_nxv8i8_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv16i8(, ) - define @usub_nxv16i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv16i8_vv: ; CHECK: # %bb.0: @@ -179,8 +169,6 @@ define @usub_nxv16i8_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv32i8(, ) - define @usub_nxv32i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv32i8_vv: ; CHECK: # %bb.0: @@ -214,8 +202,6 @@ define @usub_nxv32i8_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv64i8(, ) - define @usub_nxv64i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv64i8_vv: ; CHECK: # %bb.0: @@ -249,8 +235,6 @@ define @usub_nxv64i8_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv1i16(, ) - define @usub_nxv1i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv1i16_vv: ; CHECK: # %bb.0: @@ -284,8 +268,6 @@ define @usub_nxv1i16_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv2i16(, ) - define @usub_nxv2i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv2i16_vv: ; CHECK: # %bb.0: @@ -319,8 +301,6 @@ define @usub_nxv2i16_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv4i16(, ) - define @usub_nxv4i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv4i16_vv: ; CHECK: # %bb.0: @@ -354,8 +334,6 @@ define @usub_nxv4i16_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv8i16(, ) - define @usub_nxv8i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv8i16_vv: ; CHECK: # %bb.0: @@ -389,8 +367,6 @@ define @usub_nxv8i16_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv16i16(, ) - define @usub_nxv16i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv16i16_vv: ; CHECK: # %bb.0: @@ -424,8 +400,6 @@ define @usub_nxv16i16_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv32i16(, ) - define @usub_nxv32i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv32i16_vv: ; CHECK: # %bb.0: @@ -459,8 +433,6 @@ define @usub_nxv32i16_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv1i32(, ) - define @usub_nxv1i32_vv( %va, %b) { ; CHECK-LABEL: usub_nxv1i32_vv: ; CHECK: # %bb.0: @@ -494,8 +466,6 @@ define @usub_nxv1i32_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv2i32(, ) - define @usub_nxv2i32_vv( %va, %b) { ; CHECK-LABEL: usub_nxv2i32_vv: ; CHECK: # %bb.0: @@ -529,8 +499,6 @@ define @usub_nxv2i32_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv4i32(, ) - define @usub_nxv4i32_vv( %va, %b) { ; CHECK-LABEL: usub_nxv4i32_vv: ; CHECK: # %bb.0: @@ -564,8 +532,6 @@ define @usub_nxv4i32_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv8i32(, ) - define @usub_nxv8i32_vv( %va, %b) { ; CHECK-LABEL: usub_nxv8i32_vv: ; CHECK: # %bb.0: @@ -599,8 +565,6 @@ define @usub_nxv8i32_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv16i32(, ) - define @usub_nxv16i32_vv( %va, %b) { ; CHECK-LABEL: usub_nxv16i32_vv: ; CHECK: # %bb.0: @@ -634,8 +598,6 @@ define @usub_nxv16i32_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv1i64(, ) - define @usub_nxv1i64_vv( %va, %b) { ; CHECK-LABEL: usub_nxv1i64_vv: ; CHECK: # %bb.0: @@ -683,8 +645,6 @@ define @usub_nxv1i64_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv2i64(, ) - define @usub_nxv2i64_vv( %va, %b) { ; CHECK-LABEL: usub_nxv2i64_vv: ; CHECK: # %bb.0: @@ -732,8 +692,6 @@ define @usub_nxv2i64_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv4i64(, ) - define @usub_nxv4i64_vv( %va, %b) { ; CHECK-LABEL: usub_nxv4i64_vv: ; CHECK: # %bb.0: @@ -781,8 +739,6 @@ define @usub_nxv4i64_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv8i64(, ) - define @usub_nxv8i64_vv( %va, %b) { ; CHECK-LABEL: usub_nxv8i64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll index d54901c93d53c..bde279a4d1f2b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.usub.sat.nxv8i7(, , , i32) - define @vssubu_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vx_nxv8i7: ; CHECK: # %bb.0: @@ -22,8 +20,6 @@ define @vssubu_vx_nxv8i7( %a, i7 signext %b, ret %v } -declare @llvm.vp.usub.sat.nxv1i8(, , , i32) - define @vssubu_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv1i8: ; CHECK: # %bb.0: @@ -103,8 +99,6 @@ define @vssubu_vi_nxv1i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.usub.sat.nxv2i8(, , , i32) - define @vssubu_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv2i8: ; CHECK: # %bb.0: @@ -171,8 +165,6 @@ define @vssubu_vi_nxv2i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.usub.sat.nxv3i8(, , , i32) - define @vssubu_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv3i8: ; CHECK: # %bb.0: @@ -239,8 +231,6 @@ define @vssubu_vi_nxv3i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.usub.sat.nxv4i8(, , , i32) - define @vssubu_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv4i8: ; CHECK: # %bb.0: @@ -307,8 +297,6 @@ define @vssubu_vi_nxv4i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.usub.sat.nxv8i8(, , , i32) - define @vssubu_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv8i8: ; CHECK: # %bb.0: @@ -375,8 +363,6 @@ define @vssubu_vi_nxv8i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.usub.sat.nxv16i8(, , , i32) - define @vssubu_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv16i8: ; CHECK: # %bb.0: @@ -443,8 +429,6 @@ define @vssubu_vi_nxv16i8_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv32i8(, , , i32) - define @vssubu_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv32i8: ; CHECK: # %bb.0: @@ -511,8 +495,6 @@ define @vssubu_vi_nxv32i8_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv64i8(, , , i32) - define @vssubu_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv64i8: ; CHECK: # %bb.0: @@ -581,8 +563,6 @@ define @vssubu_vi_nxv64i8_unmasked( %va, i3 ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.usub.sat.nxv128i8(, , , i32) - define @vssubu_vi_nxv128i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vi_nxv128i8: ; CHECK: # %bb.0: @@ -633,8 +613,6 @@ define @vssubu_vi_nxv128i8_unmasked( %va, ret %v } -declare @llvm.vp.usub.sat.nxv1i16(, , , i32) - define @vssubu_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv1i16: ; CHECK: # %bb.0: @@ -701,8 +679,6 @@ define @vssubu_vi_nxv1i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv2i16(, , , i32) - define @vssubu_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv2i16: ; CHECK: # %bb.0: @@ -769,8 +745,6 @@ define @vssubu_vi_nxv2i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv4i16(, , , i32) - define @vssubu_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv4i16: ; CHECK: # %bb.0: @@ -837,8 +811,6 @@ define @vssubu_vi_nxv4i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv8i16(, , , i32) - define @vssubu_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv8i16: ; CHECK: # %bb.0: @@ -905,8 +877,6 @@ define @vssubu_vi_nxv8i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv16i16(, , , i32) - define @vssubu_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv16i16: ; CHECK: # %bb.0: @@ -973,8 +943,6 @@ define @vssubu_vi_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.usub.sat.nxv32i16(, , , i32) - define @vssubu_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1041,8 +1009,6 @@ define @vssubu_vi_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.usub.sat.nxv1i32(, , , i32) - define @vssubu_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1109,8 +1075,6 @@ define @vssubu_vi_nxv1i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv2i32(, , , i32) - define @vssubu_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1177,8 +1141,6 @@ define @vssubu_vi_nxv2i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv4i32(, , , i32) - define @vssubu_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1245,8 +1207,6 @@ define @vssubu_vi_nxv4i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv8i32(, , , i32) - define @vssubu_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1313,8 +1273,6 @@ define @vssubu_vi_nxv8i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv16i32(, , , i32) - define @vssubu_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1383,8 +1341,6 @@ define @vssubu_vi_nxv16i32_unmasked( %va, ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.usub.sat.nxv32i32(, , , i32) - define @vssubu_vi_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vi_nxv32i32: ; CHECK: # %bb.0: @@ -1436,8 +1392,6 @@ define @vssubu_vi_nxv32i32_unmasked( %va, ret %v } -declare @llvm.vp.usub.sat.nxv1i64(, , , i32) - define @vssubu_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1532,8 +1486,6 @@ define @vssubu_vi_nxv1i64_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv2i64(, , , i32) - define @vssubu_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1628,8 +1580,6 @@ define @vssubu_vi_nxv2i64_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv4i64(, , , i32) - define @vssubu_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1724,8 +1674,6 @@ define @vssubu_vi_nxv4i64_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv8i64(, , , i32) - define @vssubu_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu.ll index 859329e005aff..699a2fd4f528a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vssubu.nxv1i8.nxv1i8( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv2i8.nxv2i8( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv4i8.nxv4i8( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv8i8.nxv8i8( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv16i8.nxv16i8( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv32i8.nxv32i8( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv64i8.nxv64i8( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i16.nxv1i16( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv2i16.nxv2i16( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv4i16.nxv4i16( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv8i16.nxv8i16( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv16i16.nxv16i16( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv32i16.nxv32i16( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i32.nxv1i32( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv2i32.nxv2i32( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv4i32.nxv4i32( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv8i32.nxv8i32( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv16i32.nxv16i32( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv2i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv4i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv8i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv16i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv32i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv64i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv2i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv4i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv8i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv16i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv32i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv2i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv4i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv8i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv16i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-vp-mask.ll index 0207d0864aab4..3430e56b67eae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsub-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-vp-mask.ll @@ -4,9 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK - -declare @llvm.vp.sub.nxv2i1(, , , i32) - define @vsub_vv_nxv2i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i1: ; CHECK: # %bb.0: @@ -17,8 +14,6 @@ define @vsub_vv_nxv2i1( %va, %v } -declare @llvm.vp.sub.nxv4i1(, , , i32) - define @vsub_vv_nxv4i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i1: ; CHECK: # %bb.0: @@ -29,8 +24,6 @@ define @vsub_vv_nxv4i1( %va, %v } -declare @llvm.vp.sub.nxv8i1(, , , i32) - define @vsub_vv_nxv8i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i1: ; CHECK: # %bb.0: @@ -41,8 +34,6 @@ define @vsub_vv_nxv8i1( %va, %v } -declare @llvm.vp.sub.nxv16i1(, , , i32) - define @vsub_vv_nxv16i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv16i1: ; CHECK: # %bb.0: @@ -53,8 +44,6 @@ define @vsub_vv_nxv16i1( %va, %v } -declare @llvm.vp.sub.nxv32i1(, , , i32) - define @vsub_vv_nxv32i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv32i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll index e28da6bc4ec64..92fbe88ae9333 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.sub.nxv8i7(, , , i32) - define @vsub_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv8i7: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define @vsub_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.sub.nxv1i8(, , , i32) - define @vsub_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv1i8: ; CHECK: # %bb.0: @@ -64,8 +60,6 @@ define @vsub_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sub.nxv2i8(, , , i32) - define @vsub_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i8: ; CHECK: # %bb.0: @@ -110,8 +104,6 @@ define @vsub_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sub.nxv4i8(, , , i32) - define @vsub_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i8: ; CHECK: # %bb.0: @@ -156,8 +148,6 @@ define @vsub_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sub.nxv5i8(, , , i32) - define @vsub_vv_nxv5i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv5i8: ; CHECK: # %bb.0: @@ -202,8 +192,6 @@ define @vsub_vx_nxv5i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sub.nxv8i8(, , , i32) - define @vsub_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i8: ; CHECK: # %bb.0: @@ -248,8 +236,6 @@ define @vsub_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sub.nxv16i8(, , , i32) - define @vsub_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv16i8: ; CHECK: # %bb.0: @@ -294,8 +280,6 @@ define @vsub_vx_nxv16i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.sub.nxv32i8(, , , i32) - define @vsub_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv32i8: ; CHECK: # %bb.0: @@ -340,8 +324,6 @@ define @vsub_vx_nxv32i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.sub.nxv64i8(, , , i32) - define @vsub_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv64i8: ; CHECK: # %bb.0: @@ -386,8 +368,6 @@ define @vsub_vx_nxv64i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.sub.nxv1i16(, , , i32) - define @vsub_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv1i16: ; CHECK: # %bb.0: @@ -432,8 +412,6 @@ define @vsub_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.sub.nxv2i16(, , , i32) - define @vsub_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i16: ; CHECK: # %bb.0: @@ -478,8 +456,6 @@ define @vsub_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.sub.nxv4i16(, , , i32) - define @vsub_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i16: ; CHECK: # %bb.0: @@ -524,8 +500,6 @@ define @vsub_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.sub.nxv8i16(, , , i32) - define @vsub_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i16: ; CHECK: # %bb.0: @@ -570,8 +544,6 @@ define @vsub_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.sub.nxv16i16(, , , i32) - define @vsub_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv16i16: ; CHECK: # %bb.0: @@ -616,8 +588,6 @@ define @vsub_vx_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.sub.nxv32i16(, , , i32) - define @vsub_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv32i16: ; CHECK: # %bb.0: @@ -662,8 +632,6 @@ define @vsub_vx_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.sub.nxv1i32(, , , i32) - define @vsub_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv1i32: ; CHECK: # %bb.0: @@ -708,8 +676,6 @@ define @vsub_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv2i32(, , , i32) - define @vsub_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i32: ; CHECK: # %bb.0: @@ -754,8 +720,6 @@ define @vsub_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv4i32(, , , i32) - define @vsub_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i32: ; CHECK: # %bb.0: @@ -800,8 +764,6 @@ define @vsub_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv8i32(, , , i32) - define @vsub_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i32: ; CHECK: # %bb.0: @@ -846,8 +808,6 @@ define @vsub_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv16i32(, , , i32) - define @vsub_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv16i32: ; CHECK: # %bb.0: @@ -892,8 +852,6 @@ define @vsub_vx_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.sub.nxv1i64(, , , i32) - define @vsub_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv1i64: ; CHECK: # %bb.0: @@ -966,8 +924,6 @@ define @vsub_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.sub.nxv2i64(, , , i32) - define @vsub_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1040,8 +996,6 @@ define @vsub_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.sub.nxv4i64(, , , i32) - define @vsub_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1114,8 +1068,6 @@ define @vsub_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.sub.nxv8i64(, , , i32) - define @vsub_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub.ll b/llvm/test/CodeGen/RISCV/rvv/vsub.ll index 6d41d9c2e1c4d..d5b445a4aa233 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vsub.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -327,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -349,13 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -373,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -395,13 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -419,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -441,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -465,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -487,13 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -511,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -533,13 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -557,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -579,13 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -604,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -650,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -672,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -696,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,13 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -742,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -764,13 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -788,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -810,13 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -835,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -857,13 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -881,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -903,13 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -927,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -949,13 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -973,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -995,13 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1020,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1042,13 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1066,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1088,13 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1112,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1134,13 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1180,13 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1226,13 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1250,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1272,13 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1296,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1318,13 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1342,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1364,13 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1388,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1410,13 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1434,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1456,13 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1480,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1502,13 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1526,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1548,13 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1572,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1594,13 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1618,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1640,13 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1664,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1686,13 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1710,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1732,13 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1756,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1778,13 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1802,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1824,13 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1848,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsub_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1882,13 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1918,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsub_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1952,13 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1988,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsub_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2022,13 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2058,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsub_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2092,13 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll index 6ba2b405c943e..851bb555116ed 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll @@ -4,12 +4,6 @@ ; The intrinsics are not supported with RV32. -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -67,12 +54,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -89,13 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -113,12 +87,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -135,13 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -159,12 +120,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -181,13 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -205,12 +153,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -227,13 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -251,12 +186,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -273,13 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -297,12 +219,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -319,13 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -343,12 +252,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -365,13 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -389,12 +285,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -411,13 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -435,12 +318,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -457,13 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -481,12 +351,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -503,13 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -527,12 +384,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -549,13 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -573,12 +417,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -595,13 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -619,12 +450,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -641,13 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -665,12 +483,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -687,13 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -711,12 +516,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -733,13 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -757,12 +549,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -779,13 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -803,12 +582,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -825,13 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -849,12 +615,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -871,13 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -895,12 +648,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -917,13 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -941,12 +681,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1bf16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1bf16_nxv1bf16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1bf16_nxv1bf16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -963,13 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1bf16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -987,12 +714,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2bf16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2bf16_nxv2bf16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2bf16_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1009,13 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2bf16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1033,12 +747,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4bf16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4bf16_nxv4bf16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4bf16_nxv4bf16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1055,13 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4bf16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1079,12 +780,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8bf16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8bf16_nxv8bf16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8bf16_nxv8bf16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1101,13 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8bf16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1125,12 +813,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1147,13 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1171,12 +846,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1193,13 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1217,12 +879,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1239,13 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1263,12 +912,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1285,13 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1309,12 +945,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1331,13 +961,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1355,12 +978,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1377,13 +994,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1401,12 +1011,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1423,13 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1447,12 +1044,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1469,13 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll index 69b1173d9531c..eb178cef9a08f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -970,12 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -992,13 +713,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1016,12 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1038,13 +746,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1062,12 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1084,13 +779,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1108,12 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1130,13 +812,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1154,12 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1176,13 +845,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1200,12 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1222,13 +878,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1246,12 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1268,13 +911,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1292,12 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1314,13 +944,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1338,12 +961,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1360,13 +977,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1384,12 +994,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1406,13 +1010,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1430,12 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1452,13 +1043,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1476,12 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1498,13 +1076,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1522,12 +1093,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1544,13 +1109,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1568,12 +1126,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1590,13 +1142,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1614,12 +1159,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1636,13 +1175,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1660,12 +1192,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1682,13 +1208,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1706,12 +1225,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1728,13 +1241,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1752,12 +1258,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1774,13 +1274,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1798,12 +1291,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1820,13 +1307,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1844,12 +1324,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1866,13 +1340,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1890,12 +1357,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1912,13 +1373,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1936,12 +1390,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1958,13 +1406,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1423,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2004,13 +1439,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2028,12 +1456,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2050,13 +1472,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2074,12 +1489,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2096,13 +1505,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2120,12 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2142,13 +1538,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2166,12 +1555,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2188,13 +1571,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2212,12 +1588,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2234,13 +1604,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2258,12 +1621,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2280,13 +1637,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2304,12 +1654,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2326,13 +1670,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2350,12 +1687,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2372,13 +1703,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2396,12 +1720,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2418,13 +1736,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2442,12 +1753,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2464,13 +1769,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2488,12 +1786,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2510,13 +1802,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2534,12 +1819,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2556,13 +1835,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2580,12 +1852,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2602,13 +1868,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2626,12 +1885,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2648,13 +1901,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2672,12 +1918,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2694,13 +1934,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2718,12 +1951,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2740,13 +1967,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2764,12 +1984,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2786,13 +2000,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2810,12 +2017,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2832,13 +2033,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2856,12 +2050,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2878,13 +2066,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2902,12 +2083,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2924,13 +2099,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2948,12 +2116,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2970,13 +2132,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2994,12 +2149,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3016,13 +2165,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3040,12 +2182,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3062,13 +2198,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3086,12 +2215,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3108,13 +2231,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3132,12 +2248,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3154,13 +2264,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3178,12 +2281,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3200,13 +2297,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3224,12 +2314,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3246,13 +2330,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3270,12 +2347,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3292,13 +2363,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3316,12 +2380,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3338,13 +2396,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3362,12 +2413,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3384,13 +2429,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3408,12 +2446,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3430,13 +2462,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3454,12 +2479,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv64i8.nxv64i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3476,13 +2495,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3500,12 +2512,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3522,13 +2528,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3546,12 +2545,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3568,13 +2561,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3592,12 +2578,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3614,13 +2594,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3638,12 +2611,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3660,13 +2627,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3684,12 +2644,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3706,13 +2660,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3730,12 +2677,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3752,13 +2693,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3776,12 +2710,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3798,13 +2726,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3822,12 +2743,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3844,13 +2759,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3868,12 +2776,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3890,13 +2792,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3914,12 +2809,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3936,13 +2825,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3960,12 +2842,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3982,13 +2858,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4006,12 +2875,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4028,13 +2891,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4052,12 +2908,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4074,13 +2924,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4098,12 +2941,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4120,13 +2957,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4144,12 +2974,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4166,13 +2990,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4190,12 +3007,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4212,13 +3023,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4236,12 +3040,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4258,13 +3056,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4282,12 +3073,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4304,13 +3089,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4328,12 +3106,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4350,13 +3122,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4374,12 +3139,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4396,13 +3155,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4420,12 +3172,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4442,13 +3188,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4466,12 +3205,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1bf16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1bf16_nxv1bf16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1bf16_nxv1bf16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4488,13 +3221,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1bf16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4512,12 +3238,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2bf16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2bf16_nxv2bf16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2bf16_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4534,13 +3254,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2bf16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4558,12 +3271,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4bf16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4bf16_nxv4bf16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4bf16_nxv4bf16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4580,13 +3287,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4bf16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4604,12 +3304,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8bf16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8bf16_nxv8bf16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8bf16_nxv8bf16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4626,13 +3320,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8bf16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4650,12 +3337,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16bf16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16bf16_nxv16bf16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16bf16_nxv16bf16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -4672,13 +3353,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16bf16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -4696,12 +3370,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4718,13 +3386,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4742,12 +3403,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4764,13 +3419,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4788,12 +3436,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4810,13 +3452,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4834,12 +3469,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4856,13 +3485,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4880,12 +3502,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4902,13 +3518,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4926,12 +3535,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4948,13 +3551,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4972,12 +3568,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4994,13 +3584,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5018,12 +3601,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5040,13 +3617,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5064,12 +3634,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5086,13 +3650,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll index 316c7ccb7e415..afbe2377acdcc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,9 +24,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -52,9 +46,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -77,9 +68,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -102,9 +90,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -127,9 +112,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -152,9 +134,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -177,9 +156,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -202,9 +178,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -227,9 +200,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -252,9 +222,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -277,9 +244,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -302,9 +266,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -327,9 +288,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -352,9 +310,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -377,9 +332,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -402,9 +354,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -427,9 +376,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -452,9 +398,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -477,9 +420,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -502,9 +442,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -527,9 +464,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -552,9 +486,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -577,9 +508,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -602,9 +530,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -627,9 +552,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -652,9 +574,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -677,9 +596,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -702,9 +618,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -727,9 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -752,9 +662,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -777,9 +684,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -802,9 +706,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -827,9 +728,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -852,9 +750,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -877,9 +772,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -902,9 +794,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -927,9 +816,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -952,9 +838,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -977,9 +860,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1002,9 +882,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1027,9 +904,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1052,9 +926,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1077,9 +948,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1102,9 +970,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1127,9 +992,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1152,9 +1014,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1177,9 +1036,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1202,9 +1058,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1227,9 +1080,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1252,9 +1102,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1277,9 +1124,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1302,9 +1146,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1327,9 +1168,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1352,9 +1190,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1377,9 +1212,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1402,9 +1234,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1427,9 +1256,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1452,9 +1278,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1477,9 +1300,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1502,9 +1322,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1527,9 +1344,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1552,9 +1366,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1577,9 +1388,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1602,9 +1410,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1627,9 +1432,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1652,9 +1454,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1677,9 +1476,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1702,9 +1498,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1727,9 +1520,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1752,9 +1542,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1777,9 +1564,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1802,9 +1586,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1827,9 +1608,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1852,9 +1630,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1877,9 +1652,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1902,9 +1674,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1927,9 +1696,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1952,9 +1718,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1977,9 +1740,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2002,9 +1762,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2027,9 +1784,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2052,9 +1806,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2077,9 +1828,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2102,9 +1850,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2127,9 +1872,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2152,9 +1894,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2177,9 +1916,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2202,9 +1938,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2227,9 +1960,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2252,9 +1982,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2277,9 +2004,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2302,9 +2026,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2327,9 +2048,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2352,9 +2070,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2377,9 +2092,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2402,9 +2114,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2427,9 +2136,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2452,9 +2158,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2477,9 +2180,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2502,9 +2202,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2527,9 +2224,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2552,9 +2246,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2577,9 +2268,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2602,9 +2290,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2627,9 +2312,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2652,9 +2334,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2677,9 +2356,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -2702,9 +2378,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2727,9 +2400,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -2752,9 +2422,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2777,9 +2444,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2802,9 +2466,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2827,9 +2488,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2852,9 +2510,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2877,9 +2532,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2902,9 +2554,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2927,9 +2576,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2952,9 +2598,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2977,9 +2620,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3002,9 +2642,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3027,9 +2664,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3052,9 +2686,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3077,9 +2708,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3102,9 +2730,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3127,9 +2752,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3152,9 +2774,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3177,9 +2796,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3202,9 +2818,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3227,9 +2840,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3252,9 +2862,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3277,9 +2884,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3302,9 +2906,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3327,9 +2928,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3352,9 +2950,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3377,9 +2972,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3402,9 +2994,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3427,9 +3016,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3452,9 +3038,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3477,9 +3060,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3502,9 +3082,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3527,9 +3104,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3552,9 +3126,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3577,9 +3148,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3602,9 +3170,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3627,9 +3192,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3652,9 +3214,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3677,9 +3236,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3702,9 +3258,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3727,9 +3280,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3752,9 +3302,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3777,9 +3324,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3802,9 +3346,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3827,9 +3368,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3852,9 +3390,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3877,9 +3412,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3902,9 +3434,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3927,9 +3456,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3952,9 +3478,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3977,9 +3500,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4002,9 +3522,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4027,9 +3544,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4052,9 +3566,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4077,9 +3588,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4102,9 +3610,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4127,9 +3632,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4152,9 +3654,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4177,9 +3676,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4202,9 +3698,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4227,9 +3720,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4252,9 +3742,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4277,9 +3764,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4302,9 +3786,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4327,9 +3808,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4352,9 +3830,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4377,9 +3852,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4402,9 +3874,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4427,9 +3896,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4452,9 +3918,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4477,9 +3940,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4502,9 +3962,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4527,9 +3984,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4552,9 +4006,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4577,9 +4028,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4602,9 +4050,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4627,9 +4072,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4652,9 +4094,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4677,9 +4116,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4702,9 +4138,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4727,9 +4160,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4752,9 +4182,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4777,9 +4204,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4802,9 +4226,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4827,9 +4248,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4852,9 +4270,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4877,9 +4292,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4902,9 +4314,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4927,9 +4336,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4952,9 +4358,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4977,9 +4380,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5002,9 +4402,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5027,9 +4424,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5052,9 +4446,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5077,9 +4468,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5102,9 +4490,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5127,9 +4512,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5152,9 +4534,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5177,9 +4556,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5202,9 +4578,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5227,9 +4600,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5252,9 +4622,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5277,9 +4644,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5302,9 +4666,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5327,9 +4688,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5352,9 +4710,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5377,9 +4732,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5402,9 +4754,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5427,9 +4776,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5452,9 +4798,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5477,9 +4820,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5502,9 +4842,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5527,9 +4864,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5552,9 +4886,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5577,9 +4908,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5602,9 +4930,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5627,9 +4952,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5652,9 +4974,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5677,9 +4996,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5702,9 +5018,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5727,9 +5040,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5752,9 +5062,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5777,9 +5084,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5802,9 +5106,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5827,9 +5128,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5852,9 +5150,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5877,9 +5172,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5902,9 +5194,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5927,9 +5216,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5952,9 +5238,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5977,9 +5260,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6002,9 +5282,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6027,9 +5304,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6052,9 +5326,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6077,9 +5348,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6102,9 +5370,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6127,9 +5392,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6152,9 +5414,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6177,9 +5436,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6202,9 +5458,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6227,9 +5480,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6252,9 +5502,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6277,9 +5524,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6302,9 +5546,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6327,9 +5568,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6352,9 +5590,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6377,9 +5612,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6402,9 +5634,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6427,7 +5656,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6450,7 +5678,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6473,7 +5700,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6496,7 +5722,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6519,7 +5744,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6542,7 +5766,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6565,7 +5788,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6588,7 +5810,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6611,7 +5832,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6634,7 +5854,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -6657,7 +5876,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -6680,7 +5898,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -6703,7 +5920,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -6726,7 +5942,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -6749,7 +5964,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -6772,7 +5986,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6795,7 +6008,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6818,7 +6030,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6841,7 +6052,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6864,7 +6074,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6887,7 +6096,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6910,7 +6118,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6933,7 +6140,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6956,7 +6162,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6979,7 +6184,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -7002,7 +6206,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -7025,7 +6228,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -7048,7 +6250,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7071,7 +6272,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7094,7 +6294,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7117,7 +6316,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7140,7 +6338,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7163,7 +6360,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7186,7 +6382,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7209,7 +6404,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7232,7 +6426,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7255,7 +6448,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -7278,7 +6470,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -7301,7 +6492,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -7324,7 +6514,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7347,7 +6536,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7370,7 +6558,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7393,7 +6580,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7416,7 +6602,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7439,7 +6624,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7462,7 +6646,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7485,7 +6668,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7508,7 +6690,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7531,7 +6712,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7554,7 +6734,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7577,7 +6756,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7600,7 +6778,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7623,7 +6800,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7646,7 +6822,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7669,7 +6844,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7692,7 +6866,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7715,7 +6888,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7738,7 +6910,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7761,7 +6932,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7784,7 +6954,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7807,7 +6976,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7830,7 +6998,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7853,7 +7020,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7876,7 +7042,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7899,7 +7064,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7922,7 +7086,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7945,7 +7108,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7968,7 +7130,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7991,7 +7152,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8014,7 +7174,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8037,7 +7196,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8060,7 +7218,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8083,7 +7240,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8106,7 +7262,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8129,7 +7284,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8152,7 +7306,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8175,7 +7328,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8198,7 +7350,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8221,7 +7372,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8244,7 +7394,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8267,7 +7416,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8290,7 +7438,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8313,7 +7460,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8336,7 +7482,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8359,7 +7504,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -8382,7 +7526,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -8405,7 +7548,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -8428,7 +7570,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8451,7 +7592,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8474,7 +7614,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8497,7 +7636,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8520,7 +7658,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8543,7 +7680,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8566,7 +7702,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8589,7 +7724,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8612,7 +7746,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8635,7 +7768,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8658,7 +7790,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8681,7 +7812,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8704,7 +7834,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8727,7 +7856,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8750,7 +7878,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8773,7 +7900,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8796,7 +7922,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8819,7 +7944,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8842,7 +7966,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8865,7 +7988,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8888,7 +8010,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8911,7 +8032,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8934,7 +8054,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8957,7 +8076,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8980,7 +8098,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9003,7 +8120,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9026,7 +8142,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9049,7 +8164,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9072,7 +8186,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9095,7 +8208,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9118,7 +8230,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9141,7 +8252,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9164,7 +8274,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9187,7 +8296,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9210,7 +8318,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9233,7 +8340,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9256,7 +8362,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9279,7 +8384,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9302,7 +8406,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9325,7 +8428,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9348,7 +8450,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9371,7 +8472,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9394,7 +8494,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9417,7 +8516,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9440,7 +8538,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9463,7 +8560,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9486,7 +8582,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9509,7 +8604,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9532,7 +8626,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9555,7 +8648,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9578,7 +8670,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9601,7 +8692,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9624,7 +8714,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9647,7 +8736,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9670,7 +8758,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9693,7 +8780,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9716,7 +8802,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9739,7 +8824,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9762,7 +8846,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9785,7 +8868,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9808,7 +8890,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9831,7 +8912,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9854,7 +8934,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9877,7 +8956,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9900,7 +8978,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9923,7 +9000,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9946,7 +9022,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9969,7 +9044,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9992,7 +9066,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10015,7 +9088,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10038,7 +9110,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10061,7 +9132,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10084,7 +9154,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10107,7 +9176,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10130,7 +9198,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10153,7 +9220,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10176,7 +9242,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10199,7 +9264,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10222,7 +9286,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10245,7 +9308,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10268,7 +9330,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10291,7 +9352,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10314,7 +9374,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10337,7 +9396,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10360,7 +9418,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -10383,7 +9440,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -10406,7 +9462,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -10429,7 +9484,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -10452,7 +9506,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -10475,7 +9528,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -10498,7 +9550,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10521,7 +9572,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10544,7 +9594,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10567,7 +9616,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10590,7 +9638,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10613,7 +9660,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10636,7 +9682,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10659,7 +9704,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10682,7 +9726,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10705,7 +9748,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -10728,7 +9770,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -10751,7 +9792,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -10774,7 +9814,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10797,7 +9836,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10820,7 +9858,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10843,7 +9880,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10866,7 +9902,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10889,7 +9924,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10912,7 +9946,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10935,7 +9968,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10958,7 +9990,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10981,7 +10012,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -11004,7 +10034,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -11027,7 +10056,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -11050,7 +10078,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11073,7 +10100,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11096,7 +10122,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11119,7 +10144,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11142,7 +10166,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11165,7 +10188,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11188,7 +10210,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11211,7 +10232,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11234,7 +10254,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11257,7 +10276,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11280,7 +10298,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11303,7 +10320,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11326,7 +10342,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11349,7 +10364,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11372,7 +10386,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11395,7 +10408,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11418,7 +10430,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11441,7 +10452,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11464,7 +10474,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11487,7 +10496,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11510,7 +10518,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11533,7 +10540,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11556,7 +10562,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11579,7 +10584,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11602,7 +10606,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11625,7 +10628,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11648,7 +10650,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11671,7 +10672,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11694,7 +10694,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11717,7 +10716,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11740,7 +10738,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11763,7 +10760,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11786,7 +10782,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11809,7 +10804,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11832,7 +10826,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11855,7 +10848,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll index b297d33611242..a59b70a0319f8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -38,9 +35,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -63,9 +57,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -88,9 +79,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -113,9 +101,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -138,9 +123,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -163,9 +145,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -188,9 +167,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -213,9 +189,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -238,9 +211,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -263,9 +233,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -288,9 +255,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -313,9 +277,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -338,9 +299,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -363,9 +321,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -388,9 +343,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -413,9 +365,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -438,9 +387,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -463,9 +409,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -488,9 +431,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -513,9 +453,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -538,9 +475,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -574,9 +508,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -599,9 +530,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -624,9 +552,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -649,9 +574,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -674,9 +596,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -699,9 +618,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -724,9 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -749,9 +662,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -774,9 +684,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -799,9 +706,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -824,9 +728,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -849,9 +750,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -874,9 +772,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -899,9 +794,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -924,9 +816,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -949,9 +838,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -974,9 +860,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -999,9 +882,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1024,9 +904,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1060,9 +937,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1085,9 +959,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1110,9 +981,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1135,9 +1003,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1160,9 +1025,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1185,9 +1047,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1210,9 +1069,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1235,9 +1091,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1260,9 +1113,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1285,9 +1135,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1310,9 +1157,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1335,9 +1179,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1360,9 +1201,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1385,9 +1223,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1410,9 +1245,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1435,9 +1267,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1460,9 +1289,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1485,9 +1311,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1510,9 +1333,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1546,9 +1366,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1571,9 +1388,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1596,9 +1410,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1621,9 +1432,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1646,9 +1454,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1671,9 +1476,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1696,9 +1498,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1721,9 +1520,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1746,9 +1542,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1771,9 +1564,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1796,9 +1586,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1821,9 +1608,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1846,9 +1630,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1871,9 +1652,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1896,9 +1674,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1921,9 +1696,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1957,9 +1729,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1982,9 +1751,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2007,9 +1773,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2032,9 +1795,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2057,9 +1817,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2082,9 +1839,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2107,9 +1861,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2132,9 +1883,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2157,9 +1905,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2182,9 +1927,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2207,9 +1949,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2232,9 +1971,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2257,9 +1993,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2282,9 +2015,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2307,9 +2037,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2332,9 +2059,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2368,9 +2092,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2393,9 +2114,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2418,9 +2136,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2443,9 +2158,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2468,9 +2180,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2493,9 +2202,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2518,9 +2224,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2543,9 +2246,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2568,9 +2268,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2593,9 +2290,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2618,9 +2312,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2643,9 +2334,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2668,9 +2356,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2693,9 +2378,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2718,9 +2400,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2743,9 +2422,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2779,9 +2455,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2804,9 +2477,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2829,9 +2499,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2854,9 +2521,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2879,9 +2543,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2904,9 +2565,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2929,9 +2587,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2954,9 +2609,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2979,9 +2631,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3004,9 +2653,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3029,9 +2675,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3054,9 +2697,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3079,9 +2719,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3104,9 +2741,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3129,9 +2763,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3154,9 +2785,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3179,9 +2807,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3204,9 +2829,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3229,9 +2851,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -3254,9 +2873,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3279,9 +2895,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3304,9 +2917,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3329,9 +2939,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -3354,9 +2961,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3379,9 +2983,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3404,9 +3005,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3429,9 +3027,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3454,9 +3049,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3479,9 +3071,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3504,9 +3093,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3529,9 +3115,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3554,9 +3137,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3579,9 +3159,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3604,9 +3181,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -3629,9 +3203,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3654,9 +3225,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3679,9 +3247,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3704,9 +3269,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -3729,9 +3291,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3754,9 +3313,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3779,9 +3335,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3804,9 +3357,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -3829,9 +3379,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3854,9 +3401,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3879,9 +3423,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3904,9 +3445,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3929,9 +3467,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3954,9 +3489,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3979,9 +3511,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4004,9 +3533,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -4029,9 +3555,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4054,9 +3577,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4079,9 +3599,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4104,9 +3621,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4129,9 +3643,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4154,9 +3665,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4179,9 +3687,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4204,9 +3709,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4229,9 +3731,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4254,9 +3753,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4279,9 +3775,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4304,9 +3797,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4329,9 +3819,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4354,9 +3841,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4379,9 +3863,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4404,9 +3885,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -4429,9 +3907,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4454,9 +3929,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4479,9 +3951,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4504,9 +3973,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4529,9 +3995,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4554,9 +4017,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4579,9 +4039,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4604,9 +4061,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4629,9 +4083,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4654,9 +4105,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4679,9 +4127,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4704,9 +4149,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4729,9 +4171,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4754,9 +4193,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4779,9 +4215,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4804,9 +4237,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4829,9 +4259,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4854,9 +4281,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4879,9 +4303,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4904,9 +4325,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4929,9 +4347,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4954,9 +4369,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4979,9 +4391,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5004,9 +4413,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5029,9 +4435,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5054,9 +4457,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5079,9 +4479,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5104,9 +4501,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5129,9 +4523,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5154,9 +4545,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5179,9 +4567,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5204,9 +4589,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5229,9 +4611,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5254,9 +4633,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5279,9 +4655,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5304,9 +4677,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5329,9 +4699,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5354,9 +4721,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5379,9 +4743,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5404,9 +4765,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5429,9 +4787,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5454,9 +4809,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5479,9 +4831,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5504,9 +4853,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5529,9 +4875,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5554,9 +4897,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5579,9 +4919,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5604,9 +4941,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5629,9 +4963,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5654,9 +4985,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5679,9 +5007,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5704,9 +5029,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5729,9 +5051,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5754,9 +5073,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5779,9 +5095,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5804,9 +5117,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5829,9 +5139,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5854,9 +5161,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5879,9 +5183,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5904,9 +5205,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5929,9 +5227,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5954,9 +5249,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -5979,9 +5271,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -6004,9 +5293,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -6029,9 +5315,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6054,9 +5337,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6079,9 +5359,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6104,9 +5381,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6129,9 +5403,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6154,9 +5425,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6179,9 +5447,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6204,9 +5469,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6229,9 +5491,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6254,9 +5513,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6279,9 +5535,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6304,9 +5557,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6329,9 +5579,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6354,9 +5601,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6379,9 +5623,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6404,9 +5645,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6429,9 +5667,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6454,9 +5689,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6479,9 +5711,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6504,9 +5733,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6529,9 +5755,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6554,9 +5777,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6579,9 +5799,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6604,9 +5821,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6629,9 +5843,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6654,9 +5865,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6679,9 +5887,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6704,9 +5909,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6729,9 +5931,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6754,9 +5953,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6779,9 +5975,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6804,9 +5997,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6829,9 +6019,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6854,9 +6041,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6879,9 +6063,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6904,9 +6085,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6929,9 +6107,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6954,9 +6129,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6979,9 +6151,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7004,9 +6173,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7029,9 +6195,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7054,9 +6217,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7079,9 +6239,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7104,9 +6261,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7129,9 +6283,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7154,9 +6305,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7179,9 +6327,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7204,9 +6349,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7229,9 +6371,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7254,9 +6393,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7279,9 +6415,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7304,9 +6437,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7329,9 +6459,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7354,9 +6481,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7379,9 +6503,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7404,9 +6525,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7429,9 +6547,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7454,9 +6569,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7479,9 +6591,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7504,9 +6613,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7529,9 +6635,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7554,9 +6657,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7579,9 +6679,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7604,9 +6701,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7629,9 +6723,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7654,9 +6745,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7679,9 +6767,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7704,9 +6789,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -7729,9 +6811,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7754,9 +6833,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7779,9 +6855,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7804,9 +6877,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7829,9 +6899,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7854,9 +6921,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7879,9 +6943,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7904,9 +6965,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7929,9 +6987,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7954,9 +7009,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7979,9 +7031,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8004,9 +7053,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8029,9 +7075,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8054,9 +7097,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8079,9 +7119,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8104,9 +7141,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8129,9 +7163,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8154,9 +7185,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8179,9 +7207,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8204,9 +7229,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8229,9 +7251,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8254,9 +7273,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8279,9 +7295,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8304,9 +7317,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8329,9 +7339,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8354,9 +7361,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8379,9 +7383,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8404,9 +7405,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8429,9 +7427,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8454,9 +7449,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8479,9 +7471,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8504,9 +7493,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8529,7 +7515,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8552,7 +7537,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8575,7 +7559,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8598,7 +7581,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8621,7 +7603,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8644,7 +7625,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8667,7 +7647,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8690,7 +7669,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8713,7 +7691,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8736,7 +7713,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8759,7 +7735,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8782,7 +7757,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -8805,7 +7779,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -8828,7 +7801,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -8851,7 +7823,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -8874,7 +7845,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -8897,7 +7867,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -8920,7 +7889,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -8943,7 +7911,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -8966,7 +7933,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8989,7 +7955,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9012,7 +7977,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9035,7 +7999,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9058,7 +8021,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9081,7 +8043,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9104,7 +8065,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9127,7 +8087,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9150,7 +8109,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9173,7 +8131,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9196,7 +8153,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9219,7 +8175,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -9242,7 +8197,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -9265,7 +8219,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -9288,7 +8241,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -9311,7 +8263,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -9334,7 +8285,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9357,7 +8307,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9380,7 +8329,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9403,7 +8351,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9426,7 +8373,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9449,7 +8395,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9472,7 +8417,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9495,7 +8439,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9518,7 +8461,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9541,7 +8483,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9564,7 +8505,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9587,7 +8527,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -9610,7 +8549,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -9633,7 +8571,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -9656,7 +8593,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -9679,7 +8615,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -9702,7 +8637,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9725,7 +8659,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9748,7 +8681,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9771,7 +8703,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9794,7 +8725,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9817,7 +8747,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9840,7 +8769,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9863,7 +8791,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9886,7 +8813,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9909,7 +8835,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9932,7 +8857,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9955,7 +8879,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -9978,7 +8901,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10001,7 +8923,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10024,7 +8945,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10047,7 +8967,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10070,7 +8989,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10093,7 +9011,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10116,7 +9033,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10139,7 +9055,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10162,7 +9077,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10185,7 +9099,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10208,7 +9121,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10231,7 +9143,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10254,7 +9165,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10277,7 +9187,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10300,7 +9209,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10323,7 +9231,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10346,7 +9253,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10369,7 +9275,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10392,7 +9297,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10415,7 +9319,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10438,7 +9341,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10461,7 +9363,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10484,7 +9385,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10507,7 +9407,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10530,7 +9429,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10553,7 +9451,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10576,7 +9473,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10599,7 +9495,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10622,7 +9517,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10645,7 +9539,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10668,7 +9561,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10691,7 +9583,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10714,7 +9605,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10737,7 +9627,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10760,7 +9649,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10783,7 +9671,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10806,7 +9693,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10829,7 +9715,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10852,7 +9737,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10875,7 +9759,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10898,7 +9781,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10921,7 +9803,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10944,7 +9825,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10967,7 +9847,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10990,7 +9869,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11013,7 +9891,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11036,7 +9913,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11059,7 +9935,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11082,7 +9957,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -11105,7 +9979,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -11128,7 +10001,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -11151,7 +10023,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -11174,7 +10045,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11197,7 +10067,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11220,7 +10089,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11243,7 +10111,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11266,7 +10133,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11289,7 +10155,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11312,7 +10177,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11335,7 +10199,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11358,7 +10221,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11381,7 +10243,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11404,7 +10265,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11427,7 +10287,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11450,7 +10309,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11473,7 +10331,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11496,7 +10353,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11519,7 +10375,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11542,7 +10397,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11565,7 +10419,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11588,7 +10441,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11611,7 +10463,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11634,7 +10485,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11657,7 +10507,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11680,7 +10529,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11703,7 +10551,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11726,7 +10573,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11749,7 +10595,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11772,7 +10617,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11795,7 +10639,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11818,7 +10661,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11841,7 +10683,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11864,7 +10705,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11887,7 +10727,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11910,7 +10749,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11933,7 +10771,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11956,7 +10793,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11979,7 +10815,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12002,7 +10837,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12025,7 +10859,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12048,7 +10881,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12071,7 +10903,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12094,7 +10925,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12117,7 +10947,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12140,7 +10969,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12163,7 +10991,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12186,7 +11013,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12209,7 +11035,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12232,7 +11057,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12255,7 +11079,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12278,7 +11101,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12301,7 +11123,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12324,7 +11145,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12347,7 +11167,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12370,7 +11189,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12393,7 +11211,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12416,7 +11233,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12439,7 +11255,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12462,7 +11277,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12485,7 +11299,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12508,7 +11321,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12531,7 +11343,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12554,7 +11365,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12577,7 +11387,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12600,7 +11409,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12623,7 +11431,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12646,7 +11453,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12669,7 +11475,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12692,7 +11497,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12715,7 +11519,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -12738,7 +11541,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12761,7 +11563,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12784,7 +11585,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12807,7 +11607,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12830,7 +11629,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12853,7 +11651,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12876,7 +11673,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12899,7 +11695,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12922,7 +11717,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12945,7 +11739,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12968,7 +11761,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12991,7 +11783,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13014,7 +11805,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13037,7 +11827,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13060,7 +11849,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13083,7 +11871,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13106,7 +11893,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13129,7 +11915,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13152,7 +11937,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13175,7 +11959,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13198,7 +11981,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13221,7 +12003,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13244,7 +12025,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13267,7 +12047,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13290,7 +12069,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13313,7 +12091,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13336,7 +12113,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13359,7 +12135,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13382,7 +12157,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13405,7 +12179,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13428,7 +12201,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13451,7 +12223,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13474,7 +12245,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13497,7 +12267,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13520,7 +12289,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13543,7 +12311,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13566,7 +12333,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13589,7 +12355,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13612,7 +12377,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13635,7 +12399,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13658,7 +12421,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13681,7 +12443,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13704,7 +12465,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13727,7 +12487,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -13750,7 +12509,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -13773,7 +12531,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -13796,7 +12553,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -13819,7 +12575,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -13842,7 +12597,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -13865,7 +12619,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -13888,7 +12641,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -13911,7 +12663,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13934,7 +12685,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13957,7 +12707,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13980,7 +12729,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14003,7 +12751,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14026,7 +12773,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14049,7 +12795,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14072,7 +12817,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14095,7 +12839,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -14118,7 +12861,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -14141,7 +12883,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -14164,7 +12905,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -14187,7 +12927,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -14210,7 +12949,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -14233,7 +12971,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -14256,7 +12993,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -14279,7 +13015,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14302,7 +13037,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14325,7 +13059,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14348,7 +13081,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14371,7 +13103,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14394,7 +13125,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14417,7 +13147,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14440,7 +13169,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14463,7 +13191,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -14486,7 +13213,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -14509,7 +13235,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -14532,7 +13257,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -14555,7 +13279,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -14578,7 +13301,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -14601,7 +13323,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -14624,7 +13345,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -14647,7 +13367,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14670,7 +13389,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14693,7 +13411,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14716,7 +13433,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14739,7 +13455,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14762,7 +13477,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14785,7 +13499,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14808,7 +13521,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14831,7 +13543,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -14854,7 +13565,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -14877,7 +13587,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -14900,7 +13609,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -14923,7 +13631,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14946,7 +13653,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14969,7 +13675,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14992,7 +13697,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15015,7 +13719,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15038,7 +13741,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15061,7 +13763,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15084,7 +13785,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15107,7 +13807,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -15130,7 +13829,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -15153,7 +13851,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -15176,7 +13873,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -15199,7 +13895,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15222,7 +13917,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15245,7 +13939,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15268,7 +13961,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15291,7 +13983,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15314,7 +14005,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15337,7 +14027,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15360,7 +14049,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15383,7 +14071,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -15406,7 +14093,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -15429,7 +14115,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -15452,7 +14137,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -15475,7 +14159,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15498,7 +14181,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15521,7 +14203,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15544,7 +14225,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15567,7 +14247,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15590,7 +14269,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15613,7 +14291,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15636,7 +14313,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15659,7 +14335,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -15682,7 +14357,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -15705,7 +14379,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -15728,7 +14401,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll index ad8097631acd3..4b86cc771d617 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.trunc.nxv2i1.nxv2i16(, , i32) - define @vtrunc_nxv2i1_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i1_nxv2i16: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define @vtrunc_nxv2i1_nxv2i16_unmasked( %a, ret %v } -declare @llvm.vp.trunc.nxv2i1.nxv2i32(, , i32) - define @vtrunc_nxv2i1_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i1_nxv2i32: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define @vtrunc_nxv2i1_nxv2i32_unmasked( %a, ret %v } -declare @llvm.vp.trunc.nxv2i1.nxv2i64(, , i32) - define @vtrunc_nxv2i1_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i1_nxv2i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll index 1c687ef23bfa8..0c1ca369521f7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.trunc.nxv2i7.nxv2i16(, , i32) - define @vtrunc_nxv2i7_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i7_nxv2i16: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define @vtrunc_nxv2i7_nxv2i16( %a, %v } -declare @llvm.vp.trunc.nxv2i8.nxv2i15(, , i32) - define @vtrunc_nxv2i8_nxv2i15( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i15: ; CHECK: # %bb.0: @@ -26,8 +22,6 @@ define @vtrunc_nxv2i8_nxv2i15( %a, %v } -declare @llvm.vp.trunc.nxv2i8.nxv2i16(, , i32) - define @vtrunc_nxv2i8_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16: ; CHECK: # %bb.0: @@ -48,8 +42,6 @@ define @vtrunc_nxv2i8_nxv2i16_unmasked( %a, ret %v } -declare @llvm.vp.trunc.nxv2i8.nxv2i32(, , i32) - define @vtrunc_nxv2i8_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32: ; CHECK: # %bb.0: @@ -74,8 +66,6 @@ define @vtrunc_nxv2i8_nxv2i32_unmasked( %a, ret %v } -declare @llvm.vp.trunc.nxv2i8.nxv2i64(, , i32) - define @vtrunc_nxv2i8_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64: ; CHECK: # %bb.0: @@ -104,8 +94,6 @@ define @vtrunc_nxv2i8_nxv2i64_unmasked( %a, ret %v } -declare @llvm.vp.trunc.nxv2i16.nxv2i32(, , i32) - define @vtrunc_nxv2i16_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32: ; CHECK: # %bb.0: @@ -126,8 +114,6 @@ define @vtrunc_nxv2i16_nxv2i32_unmasked( %a ret %v } -declare @llvm.vp.trunc.nxv2i16.nxv2i64(, , i32) - define @vtrunc_nxv2i16_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64: ; CHECK: # %bb.0: @@ -152,8 +138,6 @@ define @vtrunc_nxv2i16_nxv2i64_unmasked( %a ret %v } -declare @llvm.vp.trunc.nxv15i16.nxv15i64(, , i32) - define @vtrunc_nxv15i16_nxv15i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv15i16_nxv15i64: ; CHECK: # %bb.0: @@ -185,8 +169,6 @@ define @vtrunc_nxv15i16_nxv15i64( %a, %v } -declare @llvm.vp.trunc.nxv2i32.nxv2i64(, , i32) - define @vtrunc_nxv2i32_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64: ; CHECK: # %bb.0: @@ -209,8 +191,6 @@ define @vtrunc_nxv2i32_nxv2i64_unmasked( %a ret %v } -declare @llvm.vp.trunc.nxv32i7.nxv32i32(, , i32) - define @vtrunc_nxv32i7_nxv32i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv32i7_nxv32i32: ; CHECK: # %bb.0: @@ -243,8 +223,6 @@ define @vtrunc_nxv32i7_nxv32i32( %a, %v } -declare @llvm.vp.trunc.nxv32i8.nxv32i32(, , i32) - define @vtrunc_nxv32i8_nxv32i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv32i8_nxv32i32: ; CHECK: # %bb.0: @@ -277,8 +255,6 @@ define @vtrunc_nxv32i8_nxv32i32( %a, %v } -declare @llvm.vp.trunc.nxv32i32.nxv32i64(, , i32) - define @vtrunc_nxv32i64_nxv32i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv32i64_nxv32i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll index 616dc697b2847..d82b1576d4cb5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll @@ -31,8 +31,6 @@ define @vuitofp_nxv2bf16_nxv2i1_unmasked( ret %v } -declare @llvm.vp.uitofp.nxv2f16.nxv2i1(, , i32) - define @vuitofp_nxv2f16_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f16_nxv2i1: ; CHECK: # %bb.0: @@ -58,8 +56,6 @@ define @vuitofp_nxv2f16_nxv2i1_unmasked( %v ret %v } -declare @llvm.vp.uitofp.nxv2f32.nxv2i1(, , i32) - define @vuitofp_nxv2f32_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i1: ; CHECK: # %bb.0: @@ -85,8 +81,6 @@ define @vuitofp_nxv2f32_nxv2i1_unmasked( % ret %v } -declare @llvm.vp.uitofp.nxv2f64.nxv2i1(, , i32) - define @vuitofp_nxv2f64_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll index e1edaaadadf1d..c0c749ebf3186 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll @@ -118,8 +118,6 @@ define @vuitofp_nxv2bf16_nxv2i64_unmasked( %v } -declare @llvm.vp.uitofp.nxv2f16.nxv2i7(, , i32) - define @vuitofp_nxv2f16_nxv2i7( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i7: ; ZVFH: # %bb.0: @@ -144,8 +142,6 @@ define @vuitofp_nxv2f16_nxv2i7( %va, %v } -declare @llvm.vp.uitofp.nxv2f16.nxv2i8(, , i32) - define @vuitofp_nxv2f16_nxv2i8( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i8: ; ZVFH: # %bb.0: @@ -186,8 +182,6 @@ define @vuitofp_nxv2f16_nxv2i8_unmasked( %v ret %v } -declare @llvm.vp.uitofp.nxv2f16.nxv2i16(, , i32) - define @vuitofp_nxv2f16_nxv2i16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i16: ; ZVFH: # %bb.0: @@ -224,8 +218,6 @@ define @vuitofp_nxv2f16_nxv2i16_unmasked( ret %v } -declare @llvm.vp.uitofp.nxv2f16.nxv2i32(, , i32) - define @vuitofp_nxv2f16_nxv2i32( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i32: ; ZVFH: # %bb.0: @@ -264,8 +256,6 @@ define @vuitofp_nxv2f16_nxv2i32_unmasked( ret %v } -declare @llvm.vp.uitofp.nxv2f16.nxv2i64(, , i32) - define @vuitofp_nxv2f16_nxv2i64( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i64: ; ZVFH: # %bb.0: @@ -306,8 +296,6 @@ define @vuitofp_nxv2f16_nxv2i64_unmasked( ret %v } -declare @llvm.vp.uitofp.nxv2f32.nxv2i8(, , i32) - define @vuitofp_nxv2f32_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i8: ; CHECK: # %bb.0: @@ -330,8 +318,6 @@ define @vuitofp_nxv2f32_nxv2i8_unmasked( % ret %v } -declare @llvm.vp.uitofp.nxv2f32.nxv2i16(, , i32) - define @vuitofp_nxv2f32_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i16: ; CHECK: # %bb.0: @@ -354,8 +340,6 @@ define @vuitofp_nxv2f32_nxv2i16_unmasked( ret %v } -declare @llvm.vp.uitofp.nxv2f32.nxv2i32(, , i32) - define @vuitofp_nxv2f32_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i32: ; CHECK: # %bb.0: @@ -376,8 +360,6 @@ define @vuitofp_nxv2f32_nxv2i32_unmasked( ret %v } -declare @llvm.vp.uitofp.nxv2f32.nxv2i64(, , i32) - define @vuitofp_nxv2f32_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i64: ; CHECK: # %bb.0: @@ -400,8 +382,6 @@ define @vuitofp_nxv2f32_nxv2i64_unmasked( ret %v } -declare @llvm.vp.uitofp.nxv2f64.nxv2i8(, , i32) - define @vuitofp_nxv2f64_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i8: ; CHECK: # %bb.0: @@ -424,8 +404,6 @@ define @vuitofp_nxv2f64_nxv2i8_unmasked( ret %v } -declare @llvm.vp.uitofp.nxv2f64.nxv2i16(, , i32) - define @vuitofp_nxv2f64_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i16: ; CHECK: # %bb.0: @@ -448,8 +426,6 @@ define @vuitofp_nxv2f64_nxv2i16_unmasked( %v } -declare @llvm.vp.uitofp.nxv2f64.nxv2i32(, , i32) - define @vuitofp_nxv2f64_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i32: ; CHECK: # %bb.0: @@ -472,8 +448,6 @@ define @vuitofp_nxv2f64_nxv2i32_unmasked( %v } -declare @llvm.vp.uitofp.nxv2f64.nxv2i64(, , i32) - define @vuitofp_nxv2f64_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i64: ; CHECK: # %bb.0: @@ -494,8 +468,6 @@ define @vuitofp_nxv2f64_nxv2i64_unmasked( %v } -declare @llvm.vp.uitofp.nxv32f16.nxv32i32(, , i32) - define @vuitofp_nxv32f16_nxv32i32( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_nxv32f16_nxv32i32: ; ZVFH: # %bb.0: @@ -552,8 +524,6 @@ define @vuitofp_nxv32f16_nxv32i32( %va, ret %v } -declare @llvm.vp.uitofp.nxv32f32.nxv32i32(, , i32) - define @vuitofp_nxv32f32_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv32f32_nxv32i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-vp.ll index 433f5d2717e48..4fca25d0178ee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-vp.ll @@ -37,11 +37,6 @@ bb: ret %tmp4 } -declare @llvm.vp.sext.nxv2i32.nxv2i8(, , i32) -declare @llvm.vp.zext.nxv2i32.nxv2i8(, , i32) -declare @llvm.vp.add.nxv2i32(, , , i32) -declare @llvm.vp.merge.nxv2i32(, , , i32) - define @vwadd_vv_vpnxv2i32_vpnxv2i16_vpnxv2i16( %x, %y, %m, i32 signext %evl) { ; CHECK-LABEL: vwadd_vv_vpnxv2i32_vpnxv2i16_vpnxv2i16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.ll index 44742b71f3dcc..94d685e993ae0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,14 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +141,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,14 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,14 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +211,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,14 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,14 +262,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,14 +297,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,14 +332,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,14 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,14 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,14 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,14 +471,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,14 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,12 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -756,14 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -781,12 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -804,14 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -829,12 +591,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -852,14 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -877,12 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -925,12 +659,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -973,12 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -996,14 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1021,12 +727,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1044,14 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1069,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1092,14 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1117,12 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1140,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1165,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1188,14 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1213,12 +863,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1236,14 +880,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1261,12 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1284,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1309,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1332,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1357,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1380,14 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1405,12 +999,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1428,14 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w.ll index 578e558aba5ab..05a3e5eac4e44 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w.ll @@ -8,12 +8,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs -early-live-intervals | FileCheck %s -declare @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -30,14 +24,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -77,14 +57,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -102,12 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -124,14 +90,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -149,12 +107,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -218,14 +156,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -243,12 +173,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -265,14 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -291,12 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -313,14 +223,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -338,12 +240,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -360,14 +256,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -385,12 +273,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -407,14 +289,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,12 +306,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,14 +322,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -479,12 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -501,14 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -527,12 +373,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -549,14 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -574,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -596,14 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -621,12 +439,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -643,14 +455,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -668,12 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -690,14 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -716,12 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv1i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -738,14 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv1i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -763,12 +539,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv2i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry @@ -785,14 +555,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv2i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry @@ -810,12 +572,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv4i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry @@ -832,14 +588,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv4i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry @@ -857,12 +605,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv8i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry @@ -879,14 +621,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv8i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry @@ -904,12 +638,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv16i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry @@ -926,14 +654,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv16i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry @@ -951,12 +671,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv32i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry @@ -973,14 +687,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv32i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry @@ -998,12 +704,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv1i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry @@ -1020,14 +720,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv1i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry @@ -1045,12 +737,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv2i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry @@ -1067,14 +753,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv2i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry @@ -1092,12 +770,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv4i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry @@ -1114,14 +786,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv4i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry @@ -1139,12 +803,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv8i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry @@ -1161,14 +819,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv8i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry @@ -1186,12 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv16i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry @@ -1208,14 +852,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv16i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry @@ -1233,12 +869,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv1i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry @@ -1255,14 +885,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv1i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry @@ -1280,12 +902,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv2i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry @@ -1302,14 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv2i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry @@ -1327,12 +935,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv4i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry @@ -1349,14 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv4i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry @@ -1374,12 +968,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv8i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry @@ -1396,14 +984,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv8i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.ll index a4060cb026b70..0c17e46a6db8e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,14 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +141,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,14 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,14 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +211,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,14 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,14 +262,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,14 +297,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,14 +332,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,14 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,14 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,14 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,14 +471,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,14 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,12 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -756,14 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -781,12 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -804,14 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -829,12 +591,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -852,14 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -877,12 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -925,12 +659,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -973,12 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -996,14 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1021,12 +727,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1044,14 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1069,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1092,14 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1117,12 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1140,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1165,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1188,14 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1213,12 +863,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1236,14 +880,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1261,12 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1284,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1309,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1332,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1357,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1380,14 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1405,12 +999,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1428,14 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w.ll index ab9a038236f80..e19a212b37ac7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -523,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -545,14 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -570,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -592,14 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv1i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -734,14 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv1i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -759,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv2i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry @@ -781,14 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv2i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry @@ -806,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv4i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry @@ -828,14 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv4i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv8i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv8i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv16i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv16i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv32i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv32i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv1i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv1i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry @@ -1041,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv2i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry @@ -1063,14 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv2i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry @@ -1088,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv4i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry @@ -1110,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv4i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry @@ -1135,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv8i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry @@ -1157,14 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv8i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry @@ -1182,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv16i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry @@ -1204,14 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv16i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry @@ -1229,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv1i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry @@ -1251,14 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv1i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry @@ -1276,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv2i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry @@ -1298,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv2i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry @@ -1323,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv4i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry @@ -1345,14 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv4i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry @@ -1370,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv8i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry @@ -1392,14 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv8i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-vp.ll index 02bc8d2731153..a869b5d8117b3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.sext.nxv1i32.nxv1i16(, , i32) -declare @llvm.vp.mul.nxv1i32(, , , i32) -declare @llvm.vp.add.nxv1i32(, , , i32) -declare @llvm.vp.merge.nxv1i32(, , , i32) - define @vwmacc_vv_nxv1i32_unmasked_tu( %a, ; CHECK-LABEL: vwmacc_vv_nxv1i32_unmasked_tu: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc.ll index 58f4e8262b3d1..8b5aa1217cce4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwmacc.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv2i16.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv4i16.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv8i16.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv16i16.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -215,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv32i16.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv1i32.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -333,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv2i32.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -380,13 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv4i32.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -427,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv8i32.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -474,13 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv16i32.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -521,13 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -544,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -568,13 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv2i64.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -591,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -615,13 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv4i64.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -638,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -662,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv8i64.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -685,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -709,13 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv1i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -732,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -756,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv2i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -779,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv2i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -803,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv4i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -826,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv4i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -850,13 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv8i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -873,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv8i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -897,13 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv16i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -920,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv16i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -944,13 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv32i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -967,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv32i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv1i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1014,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1038,13 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv2i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1061,13 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv2i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1085,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv4i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1108,13 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv4i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1132,13 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv8i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1155,13 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv8i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1179,13 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv16i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1202,13 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv16i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1226,13 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv1i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1249,13 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1273,13 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv2i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1296,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv2i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1320,13 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv4i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1343,13 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv4i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1367,13 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv8i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1390,13 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv8i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-vp.ll index 486a5b09b677c..84d8587e64eca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-vp.ll @@ -4,12 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.sext.nxv1i32.nxv1i16(, , i32) -declare @llvm.vp.zext.nxv1i32.nxv1i16(, , i32) -declare @llvm.vp.mul.nxv1i32(, , , i32) -declare @llvm.vp.add.nxv1i32(, , , i32) -declare @llvm.vp.merge.nxv1i32(, , , i32) - define @vwmacc_vv_nxv1i32_unmasked_tu( %a, ; CHECK-LABEL: vwmacc_vv_nxv1i32_unmasked_tu: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu.ll index 108ec3d49f36f..b8123735dcf5d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -215,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -333,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -380,13 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -427,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -474,13 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -521,13 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -544,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -568,13 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -591,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -615,13 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -638,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -662,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -685,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -709,13 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv1i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -732,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv1i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -756,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv2i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -779,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv2i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -803,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv4i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -826,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv4i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -850,13 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv8i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -873,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv8i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -897,13 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv16i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -920,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv16i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -944,13 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv32i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -967,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv32i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv1i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1014,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv1i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1038,13 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv2i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1061,13 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv2i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1085,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv4i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1108,13 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv4i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1132,13 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv8i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1155,13 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv8i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1179,13 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv16i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1202,13 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv16i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1226,13 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv1i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1249,13 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv1i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1273,13 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv2i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1296,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv2i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1320,13 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv4i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1343,13 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv4i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1367,13 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv8i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1390,13 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv8i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-vp.ll index 125270be4fc85..f9ea353a37bc8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.zext.nxv1i32.nxv1i16(, , i32) -declare @llvm.vp.mul.nxv1i32(, , , i32) -declare @llvm.vp.add.nxv1i32(, , , i32) -declare @llvm.vp.merge.nxv1i32(, , , i32) - define @vwmacc_vv_nxv1i32_unmasked_tu( %a, ; CHECK-LABEL: vwmacc_vv_nxv1i32_unmasked_tu: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu.ll index a308695d315b6..efe60d18b26fc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwmaccu.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv2i16.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv4i16.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv8i16.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv16i16.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -215,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv32i16.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv1i32.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -333,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv2i32.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -380,13 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv4i32.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -427,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv8i32.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -474,13 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv16i32.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -521,13 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -544,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -568,13 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv2i64.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -591,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -615,13 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv4i64.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -638,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -662,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv8i64.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -685,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -709,13 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv1i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -732,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv1i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -756,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv2i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -779,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv2i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -803,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv4i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -826,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv4i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -850,13 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv8i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -873,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv8i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -897,13 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv16i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -920,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv16i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -944,13 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv32i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -967,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv32i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv1i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1014,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv1i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1038,13 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv2i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1061,13 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv2i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1085,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv4i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1108,13 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv4i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1132,13 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv8i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1155,13 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv8i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1179,13 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv16i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1202,13 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv16i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1226,13 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv1i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1249,13 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv1i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1273,13 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv2i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1296,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv2i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1320,13 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv4i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1343,13 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv4i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1367,13 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv8i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1390,13 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv8i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus.ll index 18c69b9d92b1b..20346c2097e35 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwmaccus.nxv1i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv1i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv2i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv2i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv4i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv4i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv8i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv8i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv16i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -215,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv16i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv32i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv32i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv1i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv1i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -333,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv2i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv2i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -380,13 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv4i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv4i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -427,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv8i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv8i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -474,13 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv16i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv16i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -521,13 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv1i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -544,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv1i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -568,13 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv2i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -591,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv2i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -615,13 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv4i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -638,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv4i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -662,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv8i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -685,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv8i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul.ll index c1dad7f662140..687ecf5f83ba8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,14 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +141,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,14 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,14 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +211,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,14 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,14 +262,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,14 +297,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,14 +332,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,14 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,14 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,14 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,14 +471,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,14 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,12 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -756,14 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -781,12 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -804,14 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -829,12 +591,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -852,14 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -877,12 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -925,12 +659,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -973,12 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -996,14 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1021,12 +727,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1044,14 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1069,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1092,14 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1117,12 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1140,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1165,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1188,14 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1213,12 +863,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1236,14 +880,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1261,12 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1284,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1309,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1332,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1357,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1380,14 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1405,12 +999,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1428,14 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu.ll index 5553a0dca4ca3..01e7a47f11ed6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,14 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +141,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,14 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,14 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +211,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,14 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,14 +262,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,14 +297,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,14 +332,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,14 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,14 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,14 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,14 +471,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,14 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,12 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -756,14 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -781,12 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -804,14 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -829,12 +591,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -852,14 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -877,12 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -925,12 +659,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -973,12 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -996,14 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1021,12 +727,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1044,14 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1069,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1092,14 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1117,12 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1140,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1165,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1188,14 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1213,12 +863,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1236,14 +880,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1261,12 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1284,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1309,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1332,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1357,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1380,14 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1405,12 +999,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1428,14 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu.ll index 4302562d44eab..5e4ac670e8830 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmulu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,14 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +141,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,14 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,14 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +211,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,14 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,14 +262,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,14 +297,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,14 +332,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,14 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,14 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,14 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,14 +471,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,14 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,12 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -756,14 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -781,12 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -804,14 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -829,12 +591,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -852,14 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -877,12 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -925,12 +659,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -973,12 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -996,14 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1021,12 +727,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1044,14 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1069,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1092,14 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1117,12 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1140,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1165,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1188,14 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1213,12 +863,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1236,14 +880,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1261,12 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1284,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1309,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1332,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1357,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1380,14 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1405,12 +999,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1428,14 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwredsum.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsum.ll index fb46f61581a9c..53b49286a869f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwredsum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwredsum.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwredsum.nxv4i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv4i16.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv4i16.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv4i16.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv4i16.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv4i16.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv4i16.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv2i32.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv2i32.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv2i32.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv2i32.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv2i32.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv1i64.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv1i64.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv1i64.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv1i64.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv1i64.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwredsumu.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsumu.ll index 87d1c6113fbf7..20cc5271ffb6b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwredsumu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwredsumu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwredsumu.nxv4i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv4i16.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv4i16.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv4i16.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv4i16.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv4i16.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv4i16.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv2i32.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv2i32.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv2i32.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv2i32.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv2i32.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv1i64.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv1i64.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv1i64.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv1i64.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv1i64.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll index 87bb5fa0238ce..1853465767637 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll @@ -8,8 +8,6 @@ ; i32 -> i64 ; ============================================================================== -declare @llvm.vp.shl.nxv2i64(, , , i32) - define @vwsll_vv_nxv2i64_sext( %a, %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vv_nxv2i64_sext: ; CHECK: # %bb.0: @@ -263,8 +261,6 @@ define @vwsll_vi_nxv2i64( %a, i32 ; ============================================================================== -declare @llvm.vp.shl.nxv4i32(, , , i32) - define @vwsll_vv_nxv4i32_sext( %a, %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vv_nxv4i32_sext: ; CHECK: # %bb.0: @@ -486,13 +482,10 @@ define @vwsll_vi_nxv4i32( %a, %z } - ; ============================================================================== ; i8 -> i16 ; ============================================================================== -declare @llvm.vp.shl.nxv8i16(, , , i32) - define @vwsll_vv_nxv8i16_sext( %a, %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vv_nxv8i16_sext: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsll.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll.ll index e51efd15d48ab..85b05c491ea7a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsll.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsll.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vwsll.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv2i16.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv4i16.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv8i16.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,14 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +141,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv16i16.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,14 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv32i16.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,14 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +211,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv1i32.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,14 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv2i32.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,14 +262,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv4i32.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,14 +297,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv8i32.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,14 +332,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv16i32.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,14 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv1i64.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,14 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv2i64.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,14 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv4i64.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,14 +471,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv8i64.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,14 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,12 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv1i16.nxv1i8( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv1i16_nxv1i8( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -756,14 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv1i16_nxv1i8( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -781,12 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv2i16.nxv2i8( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv2i16_nxv2i8( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -804,14 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv2i16_nxv2i8( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -829,12 +591,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv4i16.nxv4i8( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv4i16_nxv4i8( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -852,14 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv4i16_nxv4i8( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -877,12 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv8i16.nxv8i8( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv8i16_nxv8i8( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv8i16_nxv8i8( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -925,12 +659,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv16i16.nxv16i8( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv16i16_nxv16i8( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv16i16_nxv16i8( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -973,12 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv32i16.nxv32i8( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv32i16_nxv32i8( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -996,14 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv32i16_nxv32i8( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1021,12 +727,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv1i32.nxv1i16( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv1i32_nxv1i16( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1044,14 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv1i32_nxv1i16( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1069,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv2i32.nxv2i16( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv2i32_nxv2i16( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1092,14 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv2i32_nxv2i16( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1117,12 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv4i32.nxv4i16( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv4i32_nxv4i16( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1140,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv4i32_nxv4i16( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1165,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv8i32.nxv8i16( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv8i32_nxv8i16( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1188,14 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv8i32_nxv8i16( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1213,12 +863,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv16i32.nxv16i16( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv16i32_nxv16i16( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1236,14 +880,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv16i32_nxv16i16( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1261,12 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv1i64.nxv1i32( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv1i64_nxv1i32( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1284,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv1i64_nxv1i32( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1309,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv2i64.nxv2i32( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv2i64_nxv2i32( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1332,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv2i64_nxv2i32( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1357,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv4i64.nxv4i32( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv4i64_nxv4i32( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1380,14 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv4i64_nxv4i32( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1405,12 +999,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv8i64.nxv8i32( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv8i64_nxv8i32( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1428,14 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv8i64_nxv8i32( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.ll index 8b5cba68576e0..52f36b19de102 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,14 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +141,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,14 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,14 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +211,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,14 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,14 +262,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,14 +297,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,14 +332,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,14 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,14 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,14 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,14 +471,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,14 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,12 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -756,14 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -781,12 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -804,14 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -829,12 +591,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -852,14 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -877,12 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -925,12 +659,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -973,12 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -996,14 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1021,12 +727,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1044,14 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1069,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1092,14 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1117,12 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1140,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1165,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1188,14 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1213,12 +863,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1236,14 +880,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1261,12 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1284,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1309,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1332,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1357,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1380,14 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1405,12 +999,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1428,14 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w.ll index d5d712ab3b2d7..9fdbe2edf017e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -523,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -545,14 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -570,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -592,14 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv1i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -734,14 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv1i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -759,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv2i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry @@ -781,14 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv2i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry @@ -806,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv4i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry @@ -828,14 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv4i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv8i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv8i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv16i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv16i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv32i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv32i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv1i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv1i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry @@ -1041,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv2i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry @@ -1063,14 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv2i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry @@ -1088,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv4i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry @@ -1110,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv4i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry @@ -1135,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv8i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry @@ -1157,14 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv8i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry @@ -1182,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv16i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry @@ -1204,14 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv16i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry @@ -1229,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv1i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry @@ -1251,14 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv1i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry @@ -1276,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv2i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry @@ -1298,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv2i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry @@ -1323,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv4i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry @@ -1345,14 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv4i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry @@ -1370,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv8i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry @@ -1392,14 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv8i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.ll index ef4ac20fd1ee3..e7e6f2019013a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,14 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +141,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,14 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,14 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +211,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,14 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,14 +262,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,14 +297,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,14 +332,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,14 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,14 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,14 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,14 +471,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,14 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,12 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -756,14 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -781,12 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -804,14 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -829,12 +591,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -852,14 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -877,12 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -925,12 +659,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -973,12 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -996,14 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1021,12 +727,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1044,14 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1069,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1092,14 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1117,12 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1140,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1165,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1188,14 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1213,12 +863,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1236,14 +880,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1261,12 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1284,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1309,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1332,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1357,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1380,14 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1405,12 +999,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1428,14 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w.ll index 22688b1e58002..50977a4e35b8e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -523,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -545,14 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -570,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -592,14 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv1i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -734,14 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv1i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -759,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv2i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry @@ -781,14 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv2i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry @@ -806,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv4i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry @@ -828,14 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv4i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv8i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv8i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv16i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv16i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv32i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv32i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv1i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv1i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry @@ -1041,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv2i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry @@ -1063,14 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv2i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry @@ -1088,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv4i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry @@ -1110,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv4i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry @@ -1135,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv8i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry @@ -1157,14 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv8i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry @@ -1182,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv16i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry @@ -1204,14 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv16i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry @@ -1229,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv1i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry @@ -1251,14 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv1i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry @@ -1276,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv2i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry @@ -1298,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv2i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry @@ -1323,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv4i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry @@ -1345,14 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv4i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry @@ -1370,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv8i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry @@ -1392,14 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv8i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll index 1694a7af0a0b9..9287ffa4794eb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.xor.nxv8i7(, , , i32) - define @vxor_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv8i7: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define @vxor_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.xor.nxv1i8(, , , i32) - define @vxor_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv1i8: ; CHECK: # %bb.0: @@ -104,8 +100,6 @@ define @vxor_vi_nxv1i8_unmasked_1( %va, i32 z ret %v } -declare @llvm.vp.xor.nxv2i8(, , , i32) - define @vxor_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i8: ; CHECK: # %bb.0: @@ -190,8 +184,6 @@ define @vxor_vi_nxv2i8_unmasked_1( %va, i32 z ret %v } -declare @llvm.vp.xor.nxv4i8(, , , i32) - define @vxor_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv4i8: ; CHECK: # %bb.0: @@ -276,8 +268,6 @@ define @vxor_vi_nxv4i8_unmasked_1( %va, i32 z ret %v } -declare @llvm.vp.xor.nxv8i8(, , , i32) - define @vxor_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv8i8: ; CHECK: # %bb.0: @@ -362,8 +352,6 @@ define @vxor_vi_nxv8i8_unmasked_1( %va, i32 z ret %v } -declare @llvm.vp.xor.nxv15i8(, , , i32) - define @vxor_vv_nxv15i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv15i8: ; CHECK: # %bb.0: @@ -448,8 +436,6 @@ define @vxor_vi_nxv15i8_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv16i8(, , , i32) - define @vxor_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv16i8: ; CHECK: # %bb.0: @@ -534,8 +520,6 @@ define @vxor_vi_nxv16i8_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv32i8(, , , i32) - define @vxor_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv32i8: ; CHECK: # %bb.0: @@ -620,8 +604,6 @@ define @vxor_vi_nxv32i8_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv64i8(, , , i32) - define @vxor_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv64i8: ; CHECK: # %bb.0: @@ -706,8 +688,6 @@ define @vxor_vi_nxv64i8_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv1i16(, , , i32) - define @vxor_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv1i16: ; CHECK: # %bb.0: @@ -804,8 +784,6 @@ define @vxor_vi_nxv1i16_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv2i16(, , , i32) - define @vxor_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i16: ; CHECK: # %bb.0: @@ -890,8 +868,6 @@ define @vxor_vi_nxv2i16_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv4i16(, , , i32) - define @vxor_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv4i16: ; CHECK: # %bb.0: @@ -976,8 +952,6 @@ define @vxor_vi_nxv4i16_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv8i16(, , , i32) - define @vxor_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv8i16: ; CHECK: # %bb.0: @@ -1062,8 +1036,6 @@ define @vxor_vi_nxv8i16_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv16i16(, , , i32) - define @vxor_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv16i16: ; CHECK: # %bb.0: @@ -1148,8 +1120,6 @@ define @vxor_vi_nxv16i16_unmasked_1( %va, ret %v } -declare @llvm.vp.xor.nxv32i16(, , , i32) - define @vxor_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1234,8 +1204,6 @@ define @vxor_vi_nxv32i16_unmasked_1( %va, ret %v } -declare @llvm.vp.xor.nxv1i32(, , , i32) - define @vxor_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1320,8 +1288,6 @@ define @vxor_vi_nxv1i32_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv2i32(, , , i32) - define @vxor_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1406,8 +1372,6 @@ define @vxor_vi_nxv2i32_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv4i32(, , , i32) - define @vxor_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1492,8 +1456,6 @@ define @vxor_vi_nxv4i32_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv8i32(, , , i32) - define @vxor_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1578,8 +1540,6 @@ define @vxor_vi_nxv8i32_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv16i32(, , , i32) - define @vxor_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1664,8 +1624,6 @@ define @vxor_vi_nxv16i32_unmasked_1( %va, ret %v } -declare @llvm.vp.xor.nxv1i64(, , , i32) - define @vxor_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1778,8 +1736,6 @@ define @vxor_vi_nxv1i64_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv2i64(, , , i32) - define @vxor_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1892,8 +1848,6 @@ define @vxor_vi_nxv2i64_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv4i64(, , , i32) - define @vxor_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv4i64: ; CHECK: # %bb.0: @@ -2006,8 +1960,6 @@ define @vxor_vi_nxv4i64_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv8i64(, , , i32) - define @vxor_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor.ll b/llvm/test/CodeGen/RISCV/rvv/vxor.ll index 05fdbac438743..da78f57df69ea 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vxor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vxor.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vxor_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vxor_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vxor_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vxor_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll index 34461c7a7a312..46609758391a1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll @@ -5,17 +5,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vaadd.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); -declare @llvm.riscv.vasub.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - ; Test same rounding mode in one block. define @test1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: test1: @@ -417,10 +406,6 @@ for.end: ret void } -declare iXLen @llvm.riscv.vsetvli.iXLen(iXLen, iXLen immarg, iXLen immarg) -declare @llvm.riscv.vle.nxv1i8.iXLen(, ptr nocapture, iXLen) -declare void @llvm.riscv.vse.nxv1i8.iXLen(, ptr nocapture, iXLen) - ; Test loop with dominating vxrm write. Make sure there is no write in the loop. define void @test11(ptr nocapture %ptr_dest, ptr nocapture readonly %ptr_op1, ptr nocapture readonly %ptr_op2, iXLen %n) { ; CHECK-LABEL: test11: diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll index e14236c0258c4..e5741bcdc82ad 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v < %s | FileCheck %s -declare @llvm.vp.zext.nxv2i16.nxv2i1(, , i32) - define @vzext_nxv2i1_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i1_nxv2i16: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define @vzext_nxv2i1_nxv2i16_unmasked( %a, i ret %v } -declare @llvm.vp.zext.nxv2i32.nxv2i1(, , i32) - define @vzext_nxv2i1_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i1_nxv2i32: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define @vzext_nxv2i1_nxv2i32_unmasked( %a, i ret %v } -declare @llvm.vp.zext.nxv2i64.nxv2i1(, , i32) - define @vzext_nxv2i1_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i1_nxv2i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll index 34337b1af1df5..9713b617b8384 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v < %s | FileCheck %s -declare @llvm.vp.zext.nxv2i16.nxv2i8(, , i32) - define @vzext_nxv2i8_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i8_nxv2i16: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define @vzext_nxv2i8_nxv2i16_unmasked( %a, i ret %v } -declare @llvm.vp.zext.nxv2i32.nxv2i8(, , i32) - define @vzext_nxv2i8_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i8_nxv2i32: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define @vzext_nxv2i8_nxv2i32_unmasked( %a, i ret %v } -declare @llvm.vp.zext.nxv2i64.nxv2i8(, , i32) - define @vzext_nxv2i8_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i8_nxv2i64: ; CHECK: # %bb.0: @@ -74,8 +68,6 @@ define @vzext_nxv2i8_nxv2i64_unmasked( %a, i ret %v } -declare @llvm.vp.zext.nxv2i32.nxv2i16(, , i32) - define @vzext_nxv2i16_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i16_nxv2i32: ; CHECK: # %bb.0: @@ -98,8 +90,6 @@ define @vzext_nxv2i16_nxv2i32_unmasked( %a, ret %v } -declare @llvm.vp.zext.nxv2i64.nxv2i16(, , i32) - define @vzext_nxv2i16_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i16_nxv2i64: ; CHECK: # %bb.0: @@ -122,8 +112,6 @@ define @vzext_nxv2i16_nxv2i64_unmasked( %a, ret %v } -declare @llvm.vp.zext.nxv2i64.nxv2i32(, , i32) - define @vzext_nxv2i32_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i32_nxv2i64: ; CHECK: # %bb.0: @@ -146,8 +134,6 @@ define @vzext_nxv2i32_nxv2i64_unmasked( %a, ret %v } -declare @llvm.vp.zext.nxv32i32.nxv32i8(, , i32) - define @vzext_nxv32i8_nxv32i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv32i8_nxv32i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext.ll b/llvm/test/CodeGen/RISCV/rvv/vzext.ll index 122a9daf1d1ea..34ac26f99fec3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vzext.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vzext.nxv1i64.nxv1i8( - , - , - iXLen); - define @intrinsic_vzext_vf8_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv2i64.nxv2i8( - , - , - iXLen); - define @intrinsic_vzext_vf8_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv2i64.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf8_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv4i64.nxv4i8( - , - , - iXLen); - define @intrinsic_vzext_vf8_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv4i64.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf8_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv8i64.nxv8i8( - , - , - iXLen); - define @intrinsic_vzext_vf8_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv8i64.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf8_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv1i64.nxv1i16( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv2i64.nxv2i16( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv2i64.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -268,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv4i64.nxv4i16( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -289,13 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv4i64.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -312,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv8i64.nxv8i16( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -333,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv8i64.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -356,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv1i32.nxv1i8( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -377,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i32.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -400,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv2i32.nxv2i8( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -421,13 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv2i32.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -444,11 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv4i32.nxv4i8( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -465,13 +340,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv4i32.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -488,11 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv8i32.nxv8i8( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -509,13 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv8i32.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -532,11 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv16i32.nxv16i8( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -553,13 +404,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv16i32.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -576,11 +420,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv1i64.nxv1i32( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -597,13 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -620,11 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv2i64.nxv2i32( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -641,13 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv2i64.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -664,11 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv4i64.nxv4i32( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -685,13 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv4i64.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -708,11 +516,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv8i64.nxv8i32( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -729,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv8i64.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -752,11 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv1i32.nxv1i16( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -773,13 +564,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i32.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -796,11 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv2i32.nxv2i16( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -817,13 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv2i32.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -840,11 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv4i32.nxv4i16( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -861,13 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv4i32.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -884,11 +644,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv8i32.nxv8i16( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -905,13 +660,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv8i32.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -928,11 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv16i32.nxv16i16( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -949,13 +692,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv16i32.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -972,11 +708,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv1i16.nxv1i8( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -993,13 +724,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1016,11 +740,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv2i16.nxv2i8( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1037,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv2i16.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1060,11 +772,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv4i16.nxv4i8( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1081,13 +788,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv4i16.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1104,11 +804,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv8i16.nxv8i8( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1125,13 +820,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv8i16.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1148,11 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv16i16.nxv16i8( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1169,13 +852,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv16i16.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1192,11 +868,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv32i16.nxv32i8( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1213,13 +884,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv32i16.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv32i16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll b/llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll index 5872a0995feba..bc10da7e7b6e2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll @@ -25,7 +25,3 @@ entry: ret void } -; Function Attrs: argmemonly mustprogress nofree nounwind willreturn -declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg) #1 - -attributes #1 = { argmemonly mustprogress nofree nounwind willreturn } diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll index 37899e4a80e92..9aaa945fc471c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll @@ -15,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -41,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -54,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -67,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -80,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -93,8 +81,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -106,8 +92,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -119,8 +103,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -132,8 +114,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -145,8 +125,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -158,8 +136,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -171,8 +147,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32mf2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -184,8 +158,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32m1(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -197,8 +169,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32m2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -210,8 +180,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32m4(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -223,8 +191,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32m8(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -236,8 +202,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define @test_sf_vc_v_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -249,8 +213,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv1i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -262,8 +224,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv2i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -275,8 +235,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv4i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -288,8 +246,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv8i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -301,8 +257,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv16i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -314,8 +268,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv32i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -327,8 +279,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv64i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -340,8 +290,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv1i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -353,8 +301,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv2i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -366,8 +312,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv4i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -379,8 +323,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv8i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -392,8 +334,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv16i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -405,8 +345,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv32i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_se_e32mf2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -418,8 +356,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv1i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_se_e32m1(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -431,8 +367,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv2i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_se_e32m2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -444,8 +378,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv4i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_se_e32m4(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -457,8 +389,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv8i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_se_e32m8(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -470,8 +400,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv16i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_e8mf8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8mf8: ; CHECK: # %bb.0: # %entry @@ -483,8 +411,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv1i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_e8mf4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8mf4: ; CHECK: # %bb.0: # %entry @@ -496,8 +422,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv2i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_e8mf2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8mf2: ; CHECK: # %bb.0: # %entry @@ -509,8 +433,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv4i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_e8m1(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8m1: ; CHECK: # %bb.0: # %entry @@ -522,8 +444,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv8i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_e8m2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8m2: ; CHECK: # %bb.0: # %entry @@ -535,8 +455,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv16i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_e8m4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8m4: ; CHECK: # %bb.0: # %entry @@ -548,8 +466,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv32i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_e8m8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8m8: ; CHECK: # %bb.0: # %entry @@ -561,8 +477,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv64i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_e16mf4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16mf4: ; CHECK: # %bb.0: # %entry @@ -574,8 +488,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv1i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_e16mf2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16mf2: ; CHECK: # %bb.0: # %entry @@ -587,8 +499,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv2i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_e16m1(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16m1: ; CHECK: # %bb.0: # %entry @@ -600,8 +510,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv4i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_e16m2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16m2: ; CHECK: # %bb.0: # %entry @@ -613,8 +521,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv8i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_e16m4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16m4: ; CHECK: # %bb.0: # %entry @@ -626,8 +532,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv16i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_e16m8(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16m8: ; CHECK: # %bb.0: # %entry @@ -639,8 +543,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv32i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_e32mf2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32mf2: ; CHECK: # %bb.0: # %entry @@ -652,8 +554,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv1i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_e32m1(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32m1: ; CHECK: # %bb.0: # %entry @@ -665,8 +565,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv2i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_e32m2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32m2: ; CHECK: # %bb.0: # %entry @@ -678,8 +576,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv4i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_e32m4(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32m4: ; CHECK: # %bb.0: # %entry @@ -691,8 +587,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv8i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_e32m8(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32m8: ; CHECK: # %bb.0: # %entry @@ -704,8 +598,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv16i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define void @test_sf_vc_i_se_e8mf8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -717,8 +609,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -730,8 +620,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -743,8 +631,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -756,8 +642,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -769,8 +653,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -782,8 +664,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -795,8 +675,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -808,8 +686,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -821,8 +697,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -834,8 +708,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -847,8 +719,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -860,8 +730,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -873,8 +741,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -886,8 +752,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -899,8 +763,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -912,8 +774,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -925,8 +785,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -938,8 +796,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e64m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -951,8 +807,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e64m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -964,8 +818,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e64m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -977,8 +829,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e64m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -990,8 +840,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e8mf8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1003,8 +851,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e8mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1016,8 +862,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e8mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1029,8 +873,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e8m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1042,8 +884,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e8m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1055,8 +895,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e8m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1068,8 +906,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e8m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1081,8 +917,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1094,8 +928,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1107,8 +939,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e16m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1120,8 +950,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e16m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1133,8 +961,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e16m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1146,8 +972,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e16m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1159,8 +983,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1172,8 +994,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e32m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1185,8 +1005,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e32m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1198,8 +1016,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e32m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1211,8 +1027,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e32m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1224,8 +1038,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e64m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1237,8 +1049,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e64m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1250,8 +1060,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e64m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1263,8 +1071,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e64m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1276,8 +1082,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e8mf8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1289,8 +1093,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv1i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e8mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1302,8 +1104,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv2i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e8mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1315,8 +1115,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv4i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e8m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8m1: ; CHECK: # %bb.0: # %entry @@ -1328,8 +1126,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv8i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e8m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8m2: ; CHECK: # %bb.0: # %entry @@ -1341,8 +1137,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv16i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e8m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8m4: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1148,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv32i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e8m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8m8: ; CHECK: # %bb.0: # %entry @@ -1367,8 +1159,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv64i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1380,8 +1170,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv1i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1393,8 +1181,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv2i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e16m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16m1: ; CHECK: # %bb.0: # %entry @@ -1406,8 +1192,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv4i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e16m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16m2: ; CHECK: # %bb.0: # %entry @@ -1419,8 +1203,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv8i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e16m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16m4: ; CHECK: # %bb.0: # %entry @@ -1432,8 +1214,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv16i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e16m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16m8: ; CHECK: # %bb.0: # %entry @@ -1445,8 +1225,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv32i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1458,8 +1236,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv1i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e32m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32m1: ; CHECK: # %bb.0: # %entry @@ -1471,8 +1247,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv2i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e32m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32m2: ; CHECK: # %bb.0: # %entry @@ -1484,8 +1258,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv4i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e32m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32m4: ; CHECK: # %bb.0: # %entry @@ -1497,8 +1269,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv8i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e32m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32m8: ; CHECK: # %bb.0: # %entry @@ -1510,8 +1280,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv16i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e64m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e64m1: ; CHECK: # %bb.0: # %entry @@ -1523,8 +1291,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv1i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e64m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e64m2: ; CHECK: # %bb.0: # %entry @@ -1536,8 +1302,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv2i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e64m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e64m4: ; CHECK: # %bb.0: # %entry @@ -1549,8 +1313,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv4i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e64m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e64m8: ; CHECK: # %bb.0: # %entry @@ -1562,8 +1324,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1575,8 +1335,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1588,8 +1346,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e16m1(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1601,8 +1357,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e16m2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1614,8 +1368,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e16m4(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1627,8 +1379,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e16m8(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1640,8 +1390,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1653,8 +1401,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e32m1(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1412,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e32m2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1679,8 +1423,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e32m4(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1692,8 +1434,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e32m8(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1705,8 +1445,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e64m1(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1718,8 +1456,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e64m2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1731,8 +1467,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e64m4(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1744,8 +1478,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e64m8(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1757,8 +1489,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1770,8 +1500,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv1f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1783,8 +1511,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv2f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e16m1(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e16m1: ; CHECK: # %bb.0: # %entry @@ -1796,8 +1522,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv4f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e16m2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e16m2: ; CHECK: # %bb.0: # %entry @@ -1809,8 +1533,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv8f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e16m4(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e16m4: ; CHECK: # %bb.0: # %entry @@ -1822,8 +1544,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv16f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e16m8(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e16m8: ; CHECK: # %bb.0: # %entry @@ -1835,8 +1555,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv32f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1848,8 +1566,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv1f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e32m1(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e32m1: ; CHECK: # %bb.0: # %entry @@ -1861,8 +1577,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv2f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e32m2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e32m2: ; CHECK: # %bb.0: # %entry @@ -1874,8 +1588,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv4f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e32m4(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e32m4: ; CHECK: # %bb.0: # %entry @@ -1887,8 +1599,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv8f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e32m8(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e32m8: ; CHECK: # %bb.0: # %entry @@ -1900,8 +1610,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv16f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e64m1(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e64m1: ; CHECK: # %bb.0: # %entry @@ -1913,8 +1621,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv1f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e64m2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e64m2: ; CHECK: # %bb.0: # %entry @@ -1926,8 +1632,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv2f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e64m4(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e64m4: ; CHECK: # %bb.0: # %entry @@ -1939,8 +1643,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv4f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e64m8(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e64m8: ; CHECK: # %bb.0: # %entry @@ -1952,8 +1654,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv8f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_x_se_e16mf4(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1965,8 +1665,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv1f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_fv_x_se_e16mf2(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1676,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv2f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_fv_x_se_e16m1(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1991,8 +1687,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv4f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_fv_x_se_e16m2(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2004,8 +1698,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv8f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_fv_x_se_e16m4(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2017,8 +1709,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv16f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_fv_x_se_e16m8(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2030,8 +1720,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv32f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_fv_x_se_e32mf2(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2043,8 +1731,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv1f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_fv_x_se_e32m1(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2056,8 +1742,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv2f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_fv_x_se_e32m2(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2069,8 +1753,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv4f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_fv_x_se_e32m4(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2082,8 +1764,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv8f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_fv_x_se_e32m8(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2095,8 +1775,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv16f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_fv_i_se_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2108,8 +1786,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2121,8 +1797,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e16m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2134,8 +1808,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e16m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2147,8 +1819,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e16m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2160,8 +1830,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e16m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2173,8 +1841,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2186,8 +1852,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e32m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2199,8 +1863,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e32m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2212,8 +1874,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e32m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2225,8 +1885,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e32m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2238,4 +1896,3 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll index 23628a98feb7c..c30e91448e519 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll @@ -15,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e8mf4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e8mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -41,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e8m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -54,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e8m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -67,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e8m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -80,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e8m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -93,8 +81,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e16mf4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -106,8 +92,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e16mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -119,8 +103,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e16m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -132,8 +114,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e16m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -145,8 +125,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e16m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -158,8 +136,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e16m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -171,8 +147,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e32mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -184,8 +158,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e32m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -197,8 +169,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e32m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -210,8 +180,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e32m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -223,8 +191,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e32m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -236,8 +202,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e64m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -249,8 +213,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e64m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -262,8 +224,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e64m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -275,8 +235,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e64m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -288,8 +246,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e8mf8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -301,8 +257,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e8mf4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -314,8 +268,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e8mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -327,8 +279,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e8m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -340,8 +290,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e8m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -353,8 +301,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e8m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -366,8 +312,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e8m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -379,8 +323,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e16mf4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -392,8 +334,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e16mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -405,8 +345,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e16m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -418,8 +356,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e16m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -431,8 +367,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e16m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -444,8 +378,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e16m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -457,8 +389,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e32mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -470,8 +400,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e32m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -483,8 +411,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e32m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -496,8 +422,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e32m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -509,8 +433,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e32m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -522,8 +444,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e64m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -535,8 +455,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e64m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -548,8 +466,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e64m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -561,8 +477,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e64m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -574,8 +488,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e8mf8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -587,8 +499,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e8mf4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -600,8 +510,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e8mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -613,8 +521,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e8m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8m1: ; CHECK: # %bb.0: # %entry @@ -626,8 +532,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e8m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8m2: ; CHECK: # %bb.0: # %entry @@ -639,8 +543,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e8m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8m4: ; CHECK: # %bb.0: # %entry @@ -652,8 +554,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e8m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8m8: ; CHECK: # %bb.0: # %entry @@ -665,8 +565,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e16mf4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -678,8 +576,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e16mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -691,8 +587,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e16m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16m1: ; CHECK: # %bb.0: # %entry @@ -704,8 +598,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e16m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16m2: ; CHECK: # %bb.0: # %entry @@ -717,8 +609,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e16m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16m4: ; CHECK: # %bb.0: # %entry @@ -730,8 +620,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e16m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16m8: ; CHECK: # %bb.0: # %entry @@ -743,8 +631,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e32mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -756,8 +642,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e32m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32m1: ; CHECK: # %bb.0: # %entry @@ -769,8 +653,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e32m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32m2: ; CHECK: # %bb.0: # %entry @@ -782,8 +664,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e32m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32m4: ; CHECK: # %bb.0: # %entry @@ -795,8 +675,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e32m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32m8: ; CHECK: # %bb.0: # %entry @@ -808,8 +686,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e64m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e64m1: ; CHECK: # %bb.0: # %entry @@ -821,8 +697,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e64m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e64m2: ; CHECK: # %bb.0: # %entry @@ -834,8 +708,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e64m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e64m4: ; CHECK: # %bb.0: # %entry @@ -847,8 +719,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e64m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e64m8: ; CHECK: # %bb.0: # %entry @@ -860,8 +730,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_xv_se_e8mf8( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -873,8 +741,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) - define void @test_sf_vc_xv_se_e8mf4( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -886,8 +752,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) - define void @test_sf_vc_xv_se_e8mf2( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -899,8 +763,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) - define void @test_sf_vc_xv_se_e8m1( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -912,8 +774,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) - define void @test_sf_vc_xv_se_e8m2( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -925,8 +785,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) - define void @test_sf_vc_xv_se_e8m4( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -938,8 +796,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) - define void @test_sf_vc_xv_se_e8m8( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -951,8 +807,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv64i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) - define void @test_sf_vc_xv_se_e16mf4( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -964,8 +818,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define void @test_sf_vc_xv_se_e16mf2( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -977,8 +829,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define void @test_sf_vc_xv_se_e16m1( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -990,8 +840,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define void @test_sf_vc_xv_se_e16m2( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1003,8 +851,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define void @test_sf_vc_xv_se_e16m4( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1016,8 +862,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define void @test_sf_vc_xv_se_e16m8( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1029,8 +873,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define void @test_sf_vc_xv_se_e32mf2( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1042,8 +884,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define void @test_sf_vc_xv_se_e32m1( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1055,8 +895,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define void @test_sf_vc_xv_se_e32m2( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1068,8 +906,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define void @test_sf_vc_xv_se_e32m4( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1081,8 +917,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define void @test_sf_vc_xv_se_e32m8( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1094,8 +928,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_se_e8mf8( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1107,8 +939,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv1i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_se_e8mf4( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1120,8 +950,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv2i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_se_e8mf2( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1133,8 +961,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv4i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_se_e8m1( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1146,8 +972,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv8i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_se_e8m2( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1159,8 +983,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv16i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_se_e8m4( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1172,8 +994,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv32i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_se_e8m8( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1185,8 +1005,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv64i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_se_e16mf4( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1198,8 +1016,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv1i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_se_e16mf2( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1211,8 +1027,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv2i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_se_e16m1( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1224,8 +1038,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv4i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_se_e16m2( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1237,8 +1049,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv8i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_se_e16m4( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1250,8 +1060,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv16i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_se_e16m8( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1263,8 +1071,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv32i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_se_e32mf2( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1276,8 +1082,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_se_e32m1( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1289,8 +1093,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_se_e32m2( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1302,8 +1104,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_se_e32m4( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1315,8 +1115,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_se_e32m8( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1328,8 +1126,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_e8mf8( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1341,8 +1137,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv1i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_e8mf4( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1148,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv2i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_e8mf2( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1367,8 +1159,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv4i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_e8m1( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8m1: ; CHECK: # %bb.0: # %entry @@ -1380,8 +1170,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv8i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_e8m2( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8m2: ; CHECK: # %bb.0: # %entry @@ -1393,8 +1181,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv16i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_e8m4( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8m4: ; CHECK: # %bb.0: # %entry @@ -1406,8 +1192,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv32i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_e8m8( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8m8: ; CHECK: # %bb.0: # %entry @@ -1419,8 +1203,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv64i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_e16mf4( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1432,8 +1214,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv1i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_e16mf2( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1445,8 +1225,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv2i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_e16m1( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16m1: ; CHECK: # %bb.0: # %entry @@ -1458,8 +1236,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv4i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_e16m2( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16m2: ; CHECK: # %bb.0: # %entry @@ -1471,8 +1247,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv8i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_e16m4( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16m4: ; CHECK: # %bb.0: # %entry @@ -1484,8 +1258,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv16i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_e16m8( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16m8: ; CHECK: # %bb.0: # %entry @@ -1497,8 +1269,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv32i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_e32mf2( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1510,8 +1280,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_e32m1( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32m1: ; CHECK: # %bb.0: # %entry @@ -1523,8 +1291,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_e32m2( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32m2: ; CHECK: # %bb.0: # %entry @@ -1536,8 +1302,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_e32m4( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32m4: ; CHECK: # %bb.0: # %entry @@ -1549,8 +1313,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_e32m8( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32m8: ; CHECK: # %bb.0: # %entry @@ -1562,8 +1324,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define void @test_sf_vc_iv_se_e8mf8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1575,8 +1335,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e8mf4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1588,8 +1346,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e8mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1601,8 +1357,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e8m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1614,8 +1368,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e8m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1627,8 +1379,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e8m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1640,8 +1390,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e8m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1653,8 +1401,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e16mf4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1412,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e16mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1679,8 +1423,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e16m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1692,8 +1434,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e16m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1705,8 +1445,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e16m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1718,8 +1456,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e16m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1731,8 +1467,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e32mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1744,8 +1478,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e32m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1757,8 +1489,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e32m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1770,8 +1500,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e32m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1783,8 +1511,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e32m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1796,8 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e64m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1809,8 +1533,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e64m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1822,8 +1544,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e64m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1835,8 +1555,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e64m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1848,8 +1566,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e8mf8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1861,8 +1577,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e8mf4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1874,8 +1588,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e8mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1887,8 +1599,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e8m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1900,8 +1610,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e8m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1913,8 +1621,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e8m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1926,8 +1632,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e8m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1939,8 +1643,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e16mf4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1952,8 +1654,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e16mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1965,8 +1665,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e16m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1676,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e16m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1991,8 +1687,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e16m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2004,8 +1698,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e16m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2017,8 +1709,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e32mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2030,8 +1720,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e32m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2043,8 +1731,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e32m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2056,8 +1742,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e32m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2069,8 +1753,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e32m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2082,8 +1764,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e64m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2095,8 +1775,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e64m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2108,8 +1786,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e64m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2121,8 +1797,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e64m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2134,8 +1808,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e8mf8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -2147,8 +1819,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv1i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e8mf4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -2160,8 +1830,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv2i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e8mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -2173,8 +1841,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv4i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e8m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8m1: ; CHECK: # %bb.0: # %entry @@ -2186,8 +1852,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv8i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e8m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8m2: ; CHECK: # %bb.0: # %entry @@ -2199,8 +1863,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv16i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e8m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8m4: ; CHECK: # %bb.0: # %entry @@ -2212,8 +1874,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv32i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e8m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8m8: ; CHECK: # %bb.0: # %entry @@ -2225,8 +1885,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv64i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e16mf4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2238,8 +1896,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv1i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e16mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2251,8 +1907,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv2i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e16m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16m1: ; CHECK: # %bb.0: # %entry @@ -2264,8 +1918,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv4i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e16m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16m2: ; CHECK: # %bb.0: # %entry @@ -2277,8 +1929,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv8i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e16m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16m4: ; CHECK: # %bb.0: # %entry @@ -2290,8 +1940,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv16i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e16m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16m8: ; CHECK: # %bb.0: # %entry @@ -2303,8 +1951,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv32i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e32mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2316,8 +1962,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv1i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e32m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32m1: ; CHECK: # %bb.0: # %entry @@ -2329,8 +1973,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv2i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e32m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32m2: ; CHECK: # %bb.0: # %entry @@ -2342,8 +1984,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv4i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e32m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32m4: ; CHECK: # %bb.0: # %entry @@ -2355,8 +1995,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv8i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e32m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32m8: ; CHECK: # %bb.0: # %entry @@ -2368,8 +2006,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv16i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e64m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e64m1: ; CHECK: # %bb.0: # %entry @@ -2381,8 +2017,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv1i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e64m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e64m2: ; CHECK: # %bb.0: # %entry @@ -2394,8 +2028,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv2i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e64m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e64m4: ; CHECK: # %bb.0: # %entry @@ -2407,8 +2039,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv4i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e64m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e64m8: ; CHECK: # %bb.0: # %entry @@ -2420,8 +2050,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv8i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvv_se_e16mf4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2433,8 +2061,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f16.nxv1i16.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e16mf4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2446,8 +2072,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e16mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2459,8 +2083,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f16.nxv2i16.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e16mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2472,8 +2094,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e16m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2485,8 +2105,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f16.nxv4i16.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e16m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2498,8 +2116,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e16m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2511,8 +2127,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f16.nxv8i16.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e16m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2524,8 +2138,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e16m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2537,8 +2149,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f16.nxv16i16.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e16m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2550,8 +2160,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e16m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2563,8 +2171,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32f16.nxv32i16.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e16m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2576,8 +2182,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e32mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2589,8 +2193,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f32.nxv1i32.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e32mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2602,8 +2204,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e32m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2615,8 +2215,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f32.nxv2i32.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e32m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2628,8 +2226,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e32m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2641,8 +2237,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f32.nxv4i32.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e32m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2654,8 +2248,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e32m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2667,8 +2259,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f32.nxv8i32.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e32m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2680,8 +2270,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e32m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2693,8 +2281,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f32.nxv16i32.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e32m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2706,8 +2292,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e64m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2719,8 +2303,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f64.nxv1i64.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e64m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2732,8 +2314,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e64m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2745,8 +2325,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f64.nxv2i64.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e64m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2758,8 +2336,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e64m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2771,8 +2347,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f64.nxv4i64.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e64m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2784,8 +2358,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e64m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2797,8 +2369,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f64.nxv8i64.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e64m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2810,8 +2380,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvx_se_e16mf4( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2823,8 +2391,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f16.nxv1f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define @test_sf_vc_v_fvx_se_e16mf4( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2836,8 +2402,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv1f16.nxv1f16.i16.iXLen(iXLen, , i16, iXLen) - define void @test_sf_vc_fvx_se_e16mf2( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2849,8 +2413,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f16.nxv2f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define @test_sf_vc_v_fvx_se_e16mf2( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2862,8 +2424,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv2f16.nxv2f16.i16.iXLen(iXLen, , i16, iXLen) - define void @test_sf_vc_fvx_se_e16m1( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2875,8 +2435,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f16.nxv4f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define @test_sf_vc_v_fvx_se_e16m1( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2888,8 +2446,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv4f16.nxv4f16.i16.iXLen(iXLen, , i16, iXLen) - define void @test_sf_vc_fvx_se_e16m2( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2901,8 +2457,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f16.nxv8f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define @test_sf_vc_v_fvx_se_e16m2( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2914,8 +2468,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv8f16.nxv8f16.i16.iXLen(iXLen, , i16, iXLen) - define void @test_sf_vc_fvx_se_e16m4( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2927,8 +2479,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f16.nxv16f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define @test_sf_vc_v_fvx_se_e16m4( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2940,8 +2490,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv16f16.nxv16f16.i16.iXLen(iXLen, , i16, iXLen) - define void @test_sf_vc_fvx_se_e16m8( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2953,8 +2501,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32f16.nxv32f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define @test_sf_vc_v_fvx_se_e16m8( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2966,8 +2512,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv32f16.nxv32f16.i16.iXLen(iXLen, , i16, iXLen) - define void @test_sf_vc_fvx_se_e32mf2( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2979,8 +2523,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f32.nxv1f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define @test_sf_vc_v_fvx_se_e32mf2( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2992,8 +2534,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv1f32.nxv1f32.i32.iXLen(iXLen, , i32, iXLen) - define void @test_sf_vc_fvx_se_e32m1( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3005,8 +2545,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f32.nxv2f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define @test_sf_vc_v_fvx_se_e32m1( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3018,8 +2556,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv2f32.nxv2f32.i32.iXLen(iXLen, , i32, iXLen) - define void @test_sf_vc_fvx_se_e32m2( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3031,8 +2567,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f32.nxv4f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define @test_sf_vc_v_fvx_se_e32m2( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3044,8 +2578,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv4f32.nxv4f32.i32.iXLen(iXLen, , i32, iXLen) - define void @test_sf_vc_fvx_se_e32m4( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3057,8 +2589,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f32.nxv8f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define @test_sf_vc_v_fvx_se_e32m4( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3070,8 +2600,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv8f32.nxv8f32.i32.iXLen(iXLen, , i32, iXLen) - define void @test_sf_vc_fvx_se_e32m8( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3083,8 +2611,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f32.nxv16f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define @test_sf_vc_v_fvx_se_e32m8( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3096,8 +2622,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv16f32.nxv16f32.i32.iXLen(iXLen, , i32, iXLen) - define void @test_sf_vc_fvi_se_e16mf4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3109,8 +2633,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f16.nxv1f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e16mf4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3122,8 +2644,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv1f16.nxv1f16.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3135,8 +2655,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f16.nxv2f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e16mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3148,8 +2666,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv2f16.nxv2f16.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3161,8 +2677,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f16.nxv4f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e16m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3174,8 +2688,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv4f16.nxv4f16.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3187,8 +2699,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f16.nxv8f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e16m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3200,8 +2710,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv8f16.nxv8f16.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3213,8 +2721,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f16.nxv16f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e16m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3226,8 +2732,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv16f16.nxv16f16.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3239,8 +2743,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32f16.nxv32f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e16m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3252,8 +2754,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv32f16.nxv32f16.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3265,8 +2765,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f32.nxv1f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e32mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3278,8 +2776,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv1f32.nxv1f32.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3291,8 +2787,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f32.nxv2f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e32m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3304,8 +2798,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv2f32.nxv2f32.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3317,8 +2809,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f32.nxv4f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e32m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3330,8 +2820,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv4f32.nxv4f32.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3343,8 +2831,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f32.nxv8f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e32m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3356,8 +2842,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv8f32.nxv8f32.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3369,8 +2853,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f32.nxv16f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e32m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3382,8 +2864,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv16f32.nxv16f32.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvf_se_e16mf4( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3395,8 +2875,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f16.nxv1f16.f16.iXLen(iXLen, iXLen, , half, iXLen) - define @test_sf_vc_v_fvf_se_e16mf4( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3408,8 +2886,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv1f16.nxv1f16.iXLen.f16(iXLen, , half, iXLen) - define void @test_sf_vc_fvf_se_e16mf2( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3421,8 +2897,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f16.nxv2f16.f16.iXLen(iXLen, iXLen, , half, iXLen) - define @test_sf_vc_v_fvf_se_e16mf2( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3434,8 +2908,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv2f16.nxv2f16.iXLen.f16(iXLen, , half, iXLen) - define void @test_sf_vc_fvf_se_e16m1( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3447,8 +2919,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f16.nxv4f16.f16.iXLen(iXLen, iXLen, , half, iXLen) - define @test_sf_vc_v_fvf_se_e16m1( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3460,8 +2930,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv4f16.nxv4f16.iXLen.f16(iXLen, , half, iXLen) - define void @test_sf_vc_fvf_se_e16m2( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3473,8 +2941,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f16.nxv8f16.f16.iXLen(iXLen, iXLen, , half, iXLen) - define @test_sf_vc_v_fvf_se_e16m2( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3486,8 +2952,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv8f16.nxv8f16.iXLen.f16(iXLen, , half, iXLen) - define void @test_sf_vc_fvf_se_e16m4( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3499,8 +2963,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f16.nxv16f16.f16.iXLen(iXLen, iXLen, , half, iXLen) - define @test_sf_vc_v_fvf_se_e16m4( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3512,8 +2974,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv16f16.nxv16f16.iXLen.f16(iXLen, , half, iXLen) - define void @test_sf_vc_fvf_se_e16m8( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3525,8 +2985,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32f16.nxv32f16.f16.iXLen(iXLen, iXLen, , half, iXLen) - define @test_sf_vc_v_fvf_se_e16m8( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3538,8 +2996,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv32f16.nxv32f16.iXLen.f16(iXLen, , half, iXLen) - define void @test_sf_vc_fvf_se_e32mf2( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3551,8 +3007,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f32.nxv1f32.f32.iXLen(iXLen, iXLen, , float, iXLen) - define @test_sf_vc_v_fvf_se_e32mf2( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3564,8 +3018,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv1f32.nxv1f32.iXLen.f32(iXLen, , float, iXLen) - define void @test_sf_vc_fvf_se_e32m1( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3577,8 +3029,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f32.nxv2f32.f32.iXLen(iXLen, iXLen, , float, iXLen) - define @test_sf_vc_v_fvf_se_e32m1( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3590,8 +3040,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv2f32.nxv2f32.iXLen.f32(iXLen, , float, iXLen) - define void @test_sf_vc_fvf_se_e32m2( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3603,8 +3051,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f32.nxv4f32.f32.iXLen(iXLen, iXLen, , float, iXLen) - define @test_sf_vc_v_fvf_se_e32m2( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3616,8 +3062,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv4f32.nxv4f32.iXLen.f32(iXLen, , float, iXLen) - define void @test_sf_vc_fvf_se_e32m4( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3629,8 +3073,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f32.nxv8f32.f32.iXLen(iXLen, iXLen, , float, iXLen) - define @test_sf_vc_v_fvf_se_e32m4( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3642,8 +3084,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv8f32.nxv8f32.iXLen.f32(iXLen, , float, iXLen) - define void @test_sf_vc_fvf_se_e32m8( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3655,8 +3095,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f32.nxv16f32.f32.iXLen(iXLen, iXLen, , float, iXLen) - define @test_sf_vc_v_fvf_se_e32m8( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3668,4 +3106,3 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv16f32.nxv16f32.iXLen.f32(iXLen, , float, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll index b09e9f0e3365c..775b4e6f8affb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll @@ -15,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -41,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e8m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -54,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e8m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -67,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e8m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -80,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e8m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -94,8 +82,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -107,8 +93,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -120,8 +104,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -133,8 +115,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -146,8 +126,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -159,8 +137,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e16m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -173,8 +149,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -186,8 +160,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -199,8 +171,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -212,8 +182,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -225,8 +193,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -239,8 +205,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -252,8 +216,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -265,8 +227,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -278,8 +238,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -292,8 +250,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e8mf8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -305,8 +261,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -318,8 +272,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -331,8 +283,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e8m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -344,8 +294,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e8m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -357,8 +305,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e8m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -370,8 +316,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e8m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -384,8 +328,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -397,8 +339,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -410,8 +350,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -423,8 +361,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -436,8 +372,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -449,8 +383,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e16m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -463,8 +395,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -476,8 +406,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -489,8 +417,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -502,8 +428,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -515,8 +439,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -529,8 +451,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -542,8 +462,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -555,8 +473,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -568,8 +484,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -582,8 +496,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e8mf8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -595,8 +507,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -608,8 +518,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -621,8 +529,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e8m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8m1: ; CHECK: # %bb.0: # %entry @@ -634,8 +540,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e8m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8m2: ; CHECK: # %bb.0: # %entry @@ -647,8 +551,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e8m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8m4: ; CHECK: # %bb.0: # %entry @@ -660,8 +562,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e8m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8m8: ; CHECK: # %bb.0: # %entry @@ -674,8 +574,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -687,8 +585,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -700,8 +596,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e16m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16m1: ; CHECK: # %bb.0: # %entry @@ -713,8 +607,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e16m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16m2: ; CHECK: # %bb.0: # %entry @@ -726,8 +618,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e16m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16m4: ; CHECK: # %bb.0: # %entry @@ -739,8 +629,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e16m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16m8: ; CHECK: # %bb.0: # %entry @@ -753,8 +641,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -766,8 +652,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32m1: ; CHECK: # %bb.0: # %entry @@ -779,8 +663,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32m2: ; CHECK: # %bb.0: # %entry @@ -792,8 +674,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32m4: ; CHECK: # %bb.0: # %entry @@ -805,8 +685,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e32m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32m8: ; CHECK: # %bb.0: # %entry @@ -819,8 +697,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e64m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e64m1: ; CHECK: # %bb.0: # %entry @@ -832,8 +708,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e64m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e64m2: ; CHECK: # %bb.0: # %entry @@ -845,8 +719,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e64m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e64m4: ; CHECK: # %bb.0: # %entry @@ -858,8 +730,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e64m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e64m8: ; CHECK: # %bb.0: # %entry @@ -872,8 +742,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_xvv_se_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -885,8 +753,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvv_se_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -898,8 +764,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvv_se_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -911,8 +775,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvv_se_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -924,8 +786,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvv_se_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -937,8 +797,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvv_se_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -950,8 +808,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvv_se_e8m8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -963,8 +819,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv64i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvv_se_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -976,8 +830,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvv_se_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -989,8 +841,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvv_se_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1002,8 +852,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvv_se_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1015,8 +863,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvv_se_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1028,8 +874,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvv_se_e16m8( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1041,8 +885,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvv_se_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1054,8 +896,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_xvv_se_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1067,8 +907,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_xvv_se_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1080,8 +918,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_xvv_se_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1093,8 +929,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_xvv_se_e32m8( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1106,8 +940,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_se_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1119,8 +951,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_se_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1132,8 +962,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_se_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1145,8 +973,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_se_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1158,8 +984,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_se_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1171,8 +995,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_se_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1184,8 +1006,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_se_e8m8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1197,8 +1017,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_se_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1210,8 +1028,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_se_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1223,8 +1039,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_se_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1236,8 +1050,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_se_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1249,8 +1061,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_se_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1262,8 +1072,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_se_e16m8( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1275,8 +1083,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_se_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1288,8 +1094,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_se_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1301,8 +1105,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_se_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1314,8 +1116,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_se_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1327,8 +1127,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_se_e32m8( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1340,8 +1138,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1353,8 +1149,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv1i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1366,8 +1160,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv2i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1379,8 +1171,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv4i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8m1: ; CHECK: # %bb.0: # %entry @@ -1392,8 +1182,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv8i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8m2: ; CHECK: # %bb.0: # %entry @@ -1405,8 +1193,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv16i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8m4: ; CHECK: # %bb.0: # %entry @@ -1418,8 +1204,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv32i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_e8m8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8m8: ; CHECK: # %bb.0: # %entry @@ -1431,8 +1215,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv64i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1444,8 +1226,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv1i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1457,8 +1237,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv2i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16m1: ; CHECK: # %bb.0: # %entry @@ -1470,8 +1248,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv4i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16m2: ; CHECK: # %bb.0: # %entry @@ -1483,8 +1259,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv8i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16m4: ; CHECK: # %bb.0: # %entry @@ -1496,8 +1270,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv16i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_e16m8( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16m8: ; CHECK: # %bb.0: # %entry @@ -1509,8 +1281,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv32i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1522,8 +1292,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv1i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32m1: ; CHECK: # %bb.0: # %entry @@ -1535,8 +1303,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv2i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32m2: ; CHECK: # %bb.0: # %entry @@ -1548,8 +1314,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv4i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32m4: ; CHECK: # %bb.0: # %entry @@ -1561,8 +1325,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv8i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_e32m8( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32m8: ; CHECK: # %bb.0: # %entry @@ -1574,8 +1336,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv16i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_ivv_se_e8mf8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1587,8 +1347,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1600,8 +1358,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1613,8 +1369,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1626,8 +1380,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1639,8 +1391,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1652,8 +1402,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1665,8 +1413,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1678,8 +1424,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1691,8 +1435,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1704,8 +1446,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1717,8 +1457,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1730,8 +1468,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1743,8 +1479,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1756,8 +1490,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1769,8 +1501,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1782,8 +1512,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1795,8 +1523,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1808,8 +1534,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e64m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1821,8 +1545,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e64m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1834,8 +1556,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e64m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1847,8 +1567,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e64m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1860,8 +1578,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e8mf8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1873,8 +1589,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e8mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1886,8 +1600,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e8mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1899,8 +1611,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e8m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1912,8 +1622,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e8m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1925,8 +1633,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e8m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1938,8 +1644,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e8m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1951,8 +1655,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e16mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1964,8 +1666,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e16mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1977,8 +1677,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e16m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1990,8 +1688,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e16m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2003,8 +1699,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e16m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2016,8 +1710,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e16m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2029,8 +1721,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2042,8 +1732,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2055,8 +1743,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2068,8 +1754,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2081,8 +1765,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e32m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2094,8 +1776,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e64m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2107,8 +1787,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e64m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2120,8 +1798,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e64m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2133,8 +1809,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e64m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2146,8 +1820,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e8mf8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -2159,8 +1831,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv1i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e8mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -2172,8 +1842,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv2i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e8mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -2185,8 +1853,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv4i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e8m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8m1: ; CHECK: # %bb.0: # %entry @@ -2198,8 +1864,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv8i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e8m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8m2: ; CHECK: # %bb.0: # %entry @@ -2211,8 +1875,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv16i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e8m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8m4: ; CHECK: # %bb.0: # %entry @@ -2224,8 +1886,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv32i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e8m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8m8: ; CHECK: # %bb.0: # %entry @@ -2237,8 +1897,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv64i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e16mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2250,8 +1908,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv1i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e16mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2263,8 +1919,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv2i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e16m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16m1: ; CHECK: # %bb.0: # %entry @@ -2276,8 +1930,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv4i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e16m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16m2: ; CHECK: # %bb.0: # %entry @@ -2289,8 +1941,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv8i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e16m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16m4: ; CHECK: # %bb.0: # %entry @@ -2302,8 +1952,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv16i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e16m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16m8: ; CHECK: # %bb.0: # %entry @@ -2315,8 +1963,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv32i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2328,8 +1974,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv1i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32m1: ; CHECK: # %bb.0: # %entry @@ -2341,8 +1985,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv2i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32m2: ; CHECK: # %bb.0: # %entry @@ -2354,8 +1996,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv4i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32m4: ; CHECK: # %bb.0: # %entry @@ -2367,8 +2007,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv8i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e32m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32m8: ; CHECK: # %bb.0: # %entry @@ -2380,8 +2018,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv16i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e64m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e64m1: ; CHECK: # %bb.0: # %entry @@ -2393,8 +2029,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv1i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e64m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e64m2: ; CHECK: # %bb.0: # %entry @@ -2406,8 +2040,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv2i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e64m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e64m4: ; CHECK: # %bb.0: # %entry @@ -2419,8 +2051,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv4i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e64m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e64m8: ; CHECK: # %bb.0: # %entry @@ -2432,8 +2062,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvv_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2445,8 +2073,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2458,8 +2084,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2471,8 +2095,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2484,8 +2106,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2497,8 +2117,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2510,8 +2128,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2523,8 +2139,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2536,8 +2150,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2549,8 +2161,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2562,8 +2172,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e16m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2576,8 +2184,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e16m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2590,8 +2196,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2603,8 +2207,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2616,8 +2218,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2629,8 +2229,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2642,8 +2240,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2655,8 +2251,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2668,8 +2262,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2681,8 +2273,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2694,8 +2284,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2708,8 +2296,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2722,8 +2308,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2735,8 +2319,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2748,8 +2330,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2761,8 +2341,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2774,8 +2352,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2787,8 +2363,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2800,8 +2374,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2814,8 +2386,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2828,8 +2398,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvx_se_e16mf4( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2841,8 +2409,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_fvvx_se_e16mf4( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2854,8 +2420,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.nxv1f16.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fvvx_se_e16mf2( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2867,8 +2431,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_fvvx_se_e16mf2( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2880,8 +2442,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.nxv2f16.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fvvx_se_e16m1( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2893,8 +2453,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_fvvx_se_e16m1( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2906,8 +2464,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.nxv4f16.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fvvx_se_e16m2( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2919,8 +2475,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_fvvx_se_e16m2( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2932,8 +2486,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.nxv8f16.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fvvx_se_e16m4( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2945,8 +2497,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_fvvx_se_e16m4( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2958,8 +2508,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.nxv16f16.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fvvx_se_e16m8( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2971,8 +2519,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.nxv32i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_fvvx_se_e16m8( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2984,8 +2530,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.nxv32f16.nxv32i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fvvx_se_e32mf2( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2997,8 +2541,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f32.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_fvvx_se_e32mf2( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3010,8 +2552,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.nxv1f32.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fvvx_se_e32m1( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3023,8 +2563,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f32.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_fvvx_se_e32m1( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3036,8 +2574,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.nxv2f32.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fvvx_se_e32m2( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3049,8 +2585,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f32.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_fvvx_se_e32m2( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3062,8 +2596,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.nxv4f32.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fvvx_se_e32m4( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3075,8 +2607,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f32.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_fvvx_se_e32m4( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3088,8 +2618,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.nxv8f32.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fvvx_se_e32m8( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3101,8 +2629,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f32.nxv16i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_fvvx_se_e32m8( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3114,8 +2640,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.nxv16f32.nxv16i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fvvi_se_e16mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3127,8 +2651,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e16mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3140,8 +2662,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3153,8 +2673,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e16mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3166,8 +2684,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3179,8 +2695,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e16m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3192,8 +2706,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3205,8 +2717,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e16m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3218,8 +2728,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3231,8 +2739,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e16m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3244,8 +2750,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3257,8 +2761,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.nxv32i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e16m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3270,8 +2772,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.nxv32f16.nxv32i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3283,8 +2783,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3296,8 +2794,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3309,8 +2805,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3322,8 +2816,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3335,8 +2827,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3348,8 +2838,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3361,8 +2849,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3374,8 +2860,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3387,8 +2871,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.nxv16i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e32m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3400,8 +2882,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.nxv16f32.nxv16i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvf_se_e16mf4( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3413,8 +2893,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fv_fvvf_se_e16mf4( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3426,8 +2904,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.nxv1f16.nxv1i16.f16.iXLen(iXLen, , , half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16mf2( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3439,8 +2915,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fv_fvvf_se_e16mf2( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3452,8 +2926,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.nxv2f16.nxv2i16.f16.iXLen(iXLen, , , half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16m1( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3465,8 +2937,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fv_fvvf_se_e16m1( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3478,8 +2948,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.nxv4f16.nxv4i16.f16.iXLen(iXLen, , , half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16m2( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3491,8 +2959,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fv_fvvf_se_e16m2( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3504,8 +2970,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.nxv8f16.nxv8i16.f16.iXLen(iXLen, , , half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16m4( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3517,8 +2981,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fv_fvvf_se_e16m4( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3530,8 +2992,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.nxv16f16.nxv16i16.f16.iXLen(iXLen, , , half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16m8( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3543,8 +3003,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.nxv32i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fv_fvvf_se_e16m8( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3556,8 +3014,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.nxv32f16.nxv32i16.f16.iXLen(iXLen, , , half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32mf2( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3569,8 +3025,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fv_fvvf_se_e32mf2( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3582,8 +3036,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.nxv1f32.nxv1i32.f32.iXLen(iXLen, , , float %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32m1( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3595,8 +3047,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fv_fvvf_se_e32m1( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3608,8 +3058,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.nxv2f32.nxv2i32.f32.iXLen(iXLen, , , float %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32m2( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3621,8 +3069,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fv_fvvf_se_e32m2( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3634,8 +3080,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.nxv4f32.nxv4i32.f32.iXLen(iXLen, , , float %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32m4( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3647,8 +3091,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.nxv8i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fv_fvvf_se_e32m4( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3660,8 +3102,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.nxv8f32.nxv8i32.f32.iXLen(iXLen, , , float %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32m8( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3673,8 +3113,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.nxv16i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fv_fvvf_se_e32m8( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3686,4 +3124,3 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.nxv16f32.nxv16i32.f32.iXLen(iXLen, , , float %rs1, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll index 29b9238b8e9c0..b51047a53ed7a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll @@ -15,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i16.nxv1i8.nxv1i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i16.nxv2i8.nxv2i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -41,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i16.nxv4i8.nxv4i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e8m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -54,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i16.nxv8i8.nxv8i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e8m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -67,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i16.nxv16i8.nxv16i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e8m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -80,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv32i16.nxv32i8.nxv32i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -93,8 +81,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i32.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -106,8 +92,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i32.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -119,8 +103,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i32.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -132,8 +114,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i32.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -145,8 +125,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i32.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -158,8 +136,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i64.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -171,8 +147,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i64.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -184,8 +158,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i64.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -197,8 +169,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i64.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e8mf8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -210,8 +180,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -223,8 +191,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -236,8 +202,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e8m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -249,8 +213,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e8m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -262,8 +224,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e8m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -275,8 +235,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -288,8 +246,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -301,8 +257,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -314,8 +268,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -327,8 +279,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -340,8 +290,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -353,8 +301,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -366,8 +312,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -379,8 +323,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -392,8 +334,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e8mf8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf8: ; CHECK: # %bb.0: # %entry @@ -405,8 +345,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf4: ; CHECK: # %bb.0: # %entry @@ -418,8 +356,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf2: ; CHECK: # %bb.0: # %entry @@ -431,8 +367,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e8m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8m1: ; CHECK: # %bb.0: # %entry @@ -444,8 +378,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e8m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8m2: ; CHECK: # %bb.0: # %entry @@ -457,8 +389,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e8m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8m4: ; CHECK: # %bb.0: # %entry @@ -470,8 +400,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16mf4: ; CHECK: # %bb.0: # %entry @@ -483,8 +411,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16mf2: ; CHECK: # %bb.0: # %entry @@ -496,8 +422,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e16m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16m1: ; CHECK: # %bb.0: # %entry @@ -509,8 +433,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e16m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16m2: ; CHECK: # %bb.0: # %entry @@ -522,8 +444,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e16m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16m4: ; CHECK: # %bb.0: # %entry @@ -535,8 +455,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e32mf2: ; CHECK: # %bb.0: # %entry @@ -548,8 +466,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e32m1: ; CHECK: # %bb.0: # %entry @@ -561,8 +477,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e32m2: ; CHECK: # %bb.0: # %entry @@ -574,8 +488,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e32m4: ; CHECK: # %bb.0: # %entry @@ -587,8 +499,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_xvw_se_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -600,8 +510,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i16.nxv1i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvw_se_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -613,8 +521,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i16.nxv2i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvw_se_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -626,8 +532,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i16.nxv4i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvw_se_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -639,8 +543,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i16.nxv8i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvw_se_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -652,8 +554,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i16.nxv16i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvw_se_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -665,8 +565,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv32i16.nxv32i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvw_se_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -678,8 +576,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i32.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvw_se_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -691,8 +587,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i32.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvw_se_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -704,8 +598,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i32.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvw_se_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -717,8 +609,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i32.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvw_se_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -730,8 +620,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i32.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvw_se_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -743,8 +631,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i64.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_xvw_se_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -756,8 +642,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i64.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_xvw_se_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -769,8 +653,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i64.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_xvw_se_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -782,8 +664,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i64.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvw_se_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -795,8 +675,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_se_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -808,8 +686,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_se_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -821,8 +697,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_se_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -834,8 +708,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_se_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -847,8 +719,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_se_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -860,8 +730,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_se_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -873,8 +741,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_se_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -886,8 +752,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_se_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -899,8 +763,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_se_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -912,8 +774,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_se_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -925,8 +785,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_se_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -938,8 +796,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvw_se_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -951,8 +807,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvw_se_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -964,8 +818,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvw_se_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -977,8 +829,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvw_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf8: ; CHECK: # %bb.0: # %entry @@ -990,8 +840,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1003,8 +851,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1016,8 +862,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8m1: ; CHECK: # %bb.0: # %entry @@ -1029,8 +873,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8m2: ; CHECK: # %bb.0: # %entry @@ -1042,8 +884,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8m4: ; CHECK: # %bb.0: # %entry @@ -1055,8 +895,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1068,8 +906,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1081,8 +917,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16m1: ; CHECK: # %bb.0: # %entry @@ -1094,8 +928,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16m2: ; CHECK: # %bb.0: # %entry @@ -1107,8 +939,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16m4: ; CHECK: # %bb.0: # %entry @@ -1120,8 +950,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1133,8 +961,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv1i64.iXLen.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvw_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e32m1: ; CHECK: # %bb.0: # %entry @@ -1146,8 +972,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv2i64.iXLen.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvw_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e32m2: ; CHECK: # %bb.0: # %entry @@ -1159,8 +983,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv4i64.iXLen.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvw_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e32m4: ; CHECK: # %bb.0: # %entry @@ -1172,8 +994,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv8i64.iXLen.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_ivw_se_e8mf8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1185,8 +1005,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i16.nxv1i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1198,8 +1016,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i16.nxv2i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1211,8 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i16.nxv4i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1224,8 +1038,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i16.nxv8i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1237,8 +1049,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i16.nxv16i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1250,8 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv32i16.nxv32i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1263,8 +1071,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i32.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1276,8 +1082,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i32.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1289,8 +1093,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i32.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1302,8 +1104,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i32.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1315,8 +1115,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i32.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1328,8 +1126,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i64.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1341,8 +1137,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i64.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1148,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i64.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1367,8 +1159,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i64.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e8mf8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1380,8 +1170,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e8mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1393,8 +1181,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e8mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1406,8 +1192,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e8m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1419,8 +1203,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e8m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1432,8 +1214,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e8m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1445,8 +1225,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e16mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1458,8 +1236,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e16mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1471,8 +1247,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e16m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1484,8 +1258,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e16m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1497,8 +1269,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e16m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1510,8 +1280,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1523,8 +1291,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1536,8 +1302,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1549,8 +1313,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1562,8 +1324,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e8mf8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1575,8 +1335,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e8mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1588,8 +1346,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e8mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1601,8 +1357,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e8m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8m1: ; CHECK: # %bb.0: # %entry @@ -1614,8 +1368,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e8m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8m2: ; CHECK: # %bb.0: # %entry @@ -1627,8 +1379,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e8m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8m4: ; CHECK: # %bb.0: # %entry @@ -1640,8 +1390,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e16mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1653,8 +1401,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e16mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1412,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e16m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16m1: ; CHECK: # %bb.0: # %entry @@ -1679,8 +1423,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e16m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16m2: ; CHECK: # %bb.0: # %entry @@ -1692,8 +1434,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e16m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16m4: ; CHECK: # %bb.0: # %entry @@ -1705,8 +1445,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1718,8 +1456,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e32m1: ; CHECK: # %bb.0: # %entry @@ -1731,8 +1467,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e32m2: ; CHECK: # %bb.0: # %entry @@ -1744,8 +1478,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e32m4: ; CHECK: # %bb.0: # %entry @@ -1757,8 +1489,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1770,8 +1500,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1783,8 +1511,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1796,8 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1809,8 +1533,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1822,8 +1544,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1835,8 +1555,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1848,8 +1566,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1861,8 +1577,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1874,8 +1588,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1887,8 +1599,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1900,8 +1610,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1913,8 +1621,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1926,8 +1632,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1939,8 +1643,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1952,8 +1654,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1965,8 +1665,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1676,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1991,8 +1687,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvx_se_e32mf2( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2004,8 +1698,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_w_fwvx_se_e32mf2( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2017,8 +1709,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.nxv1f16.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fwvx_se_e32m1( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2030,8 +1720,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_w_fwvx_se_e32m1( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2043,8 +1731,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.nxv2f16.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fwvx_se_e32m2( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2056,8 +1742,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_w_fwvx_se_e32m2( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2069,8 +1753,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.nxv4f16.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fwvx_se_e32m4( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2082,8 +1764,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_w_fwvx_se_e32m4( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2095,8 +1775,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.nxv8f16.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fwvx_se_e32m8( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2108,8 +1786,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_w_fwvx_se_e32m8( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2121,8 +1797,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.nxv16f16.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fwvx_se_e64m1( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2134,8 +1808,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_w_fwvx_se_e64m1( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2147,8 +1819,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.nxv1f32.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fwvx_se_e64m2( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2160,8 +1830,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_w_fwvx_se_e64m2( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2173,8 +1841,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.nxv2f32.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fwvx_se_e64m4( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2186,8 +1852,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_w_fwvx_se_e64m4( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2199,8 +1863,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.nxv4f32.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fwvx_se_e64m8( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2212,8 +1874,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_w_fwvx_se_e64m8( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2225,8 +1885,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.nxv8f32.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fwvi_se_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2238,8 +1896,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2251,8 +1907,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2264,8 +1918,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2277,8 +1929,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2290,8 +1940,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2303,8 +1951,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2316,8 +1962,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2329,8 +1973,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e32m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2342,8 +1984,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e32m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2355,8 +1995,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e64m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2368,8 +2006,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e64m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2381,8 +2017,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e64m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2394,8 +2028,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e64m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2407,8 +2039,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e64m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2420,8 +2050,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e64m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2433,8 +2061,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e64m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2446,8 +2072,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e64m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2459,8 +2083,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvf_se_e32mf2( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2472,8 +2094,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fw_fwvf_se_e32mf2( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2485,8 +2105,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.nxv1f16.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) - define void @test_sf_vc_fwvf_se_e32m1( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2498,8 +2116,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fw_fwvf_se_e32m1( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2511,8 +2127,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.nxv2f16.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) - define void @test_sf_vc_fwvf_se_e32m2( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2524,8 +2138,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fw_fwvf_se_e32m2( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2537,8 +2149,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.nxv4f16.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) - define void @test_sf_vc_fwvf_se_e32m4( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2550,8 +2160,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fw_fwvf_se_e32m4( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2563,8 +2171,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.nxv8f16.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) - define void @test_sf_vc_fwvf_se_e32m8( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2576,8 +2182,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fw_fwvf_se_e32m8( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2589,8 +2193,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.nxv16f16.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) - define void @test_sf_vc_fwvf_se_e64m1( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2602,8 +2204,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fw_fwvf_se_e64m1( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2615,8 +2215,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.nxv1f32.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) - define void @test_sf_vc_fwvf_se_e64m2( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2628,8 +2226,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fw_fwvf_se_e64m2( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2641,8 +2237,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.nxv2f32.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) - define void @test_sf_vc_fwvf_se_e64m4( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2654,8 +2248,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fw_fwvf_se_e64m4( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2667,8 +2259,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.nxv4f32.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) - define void @test_sf_vc_fwvf_se_e64m8( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2680,8 +2270,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fw_fwvf_se_e64m8( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2693,4 +2281,3 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.nxv8f32.nxv8i32.f32.iXLen(iXLen, , , float, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqa.ll b/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqa.ll index 03f92c7229c18..b8dddbb531370 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqa.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqa.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvdot \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.th.vmaqa.nxv1i32.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqa_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_vv_nxv1i32_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.mask.nxv1i32.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqa_mask_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_mask_vv_nxv1i32_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.nxv2i32.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqa_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_vv_nxv2i32_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.mask.nxv2i32.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqa_mask_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_mask_vv_nxv2i32_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.nxv4i32.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqa_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_vv_nxv4i32_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.mask.nxv4i32.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqa_mask_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_mask_vv_nxv4i32_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.nxv8i32.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqa_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_vv_nxv8i32_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.mask.nxv8i32.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqa_mask_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_mask_vv_nxv8i32_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -192,14 +136,6 @@ entry: ret %a } - -declare @llvm.riscv.th.vmaqa.nxv1i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqa_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_vx_nxv1i32_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -216,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.mask.nxv1i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqa_mask_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_mask_vx_nxv1i32_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -240,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.nxv2i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqa_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_vx_nxv2i32_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -263,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.mask.nxv2i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqa_mask_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_mask_vx_nxv2i32_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -287,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.nxv4i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqa_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_vx_nxv4i32_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -310,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.mask.nxv4i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqa_mask_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_mask_vx_nxv4i32_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -334,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.nxv8i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqa_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_vx_nxv8i32_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -357,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.mask.nxv8i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqa_mask_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_mask_vx_nxv8i32_i8_nxv32i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqasu.ll b/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqasu.ll index b17035f377c61..7f945cf7f35bb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqasu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqasu.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvdot \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.th.vmaqasu.nxv1i32.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqasu_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_vv_nxv1i32_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.mask.nxv1i32.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqasu_mask_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vv_nxv1i32_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.nxv2i32.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqasu_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_vv_nxv2i32_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.mask.nxv2i32.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqasu_mask_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vv_nxv2i32_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.nxv4i32.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqasu_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_vv_nxv4i32_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.mask.nxv4i32.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqasu_mask_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vv_nxv4i32_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.nxv8i32.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqasu_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_vv_nxv8i32_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.mask.nxv8i32.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqasu_mask_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vv_nxv8i32_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -192,14 +136,6 @@ entry: ret %a } - -declare @llvm.riscv.th.vmaqasu.nxv1i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqasu_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_vx_nxv1i32_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -216,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.mask.nxv1i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqasu_mask_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vx_nxv1i32_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -240,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.nxv2i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqasu_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_vx_nxv2i32_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -263,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.mask.nxv2i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqasu_mask_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vx_nxv2i32_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -287,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.nxv4i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqasu_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_vx_nxv4i32_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -310,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.mask.nxv4i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqasu_mask_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vx_nxv4i32_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -334,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.nxv8i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqasu_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_vx_nxv8i32_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -357,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.mask.nxv8i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqasu_mask_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vx_nxv8i32_i8_nxv32i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqau.ll b/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqau.ll index 809b81fa38435..0ae95e0994033 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqau.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqau.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvdot \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.th.vmaqau.nxv1i32.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqau_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_vv_nxv1i32_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.mask.nxv1i32.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqau_mask_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_mask_vv_nxv1i32_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.nxv2i32.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqau_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_vv_nxv2i32_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.mask.nxv2i32.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqau_mask_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_mask_vv_nxv2i32_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.nxv4i32.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqau_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_vv_nxv4i32_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.mask.nxv4i32.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqau_mask_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_mask_vv_nxv4i32_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.nxv8i32.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqau_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_vv_nxv8i32_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.mask.nxv8i32.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqau_mask_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_mask_vv_nxv8i32_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -192,14 +136,6 @@ entry: ret %a } - -declare @llvm.riscv.th.vmaqau.nxv1i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqau_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_vx_nxv1i32_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -216,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.mask.nxv1i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqau_mask_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_mask_vx_nxv1i32_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -240,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.nxv2i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqau_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_vx_nxv2i32_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -263,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.mask.nxv2i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqau_mask_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_mask_vx_nxv2i32_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -287,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.nxv4i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqau_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_vx_nxv4i32_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -310,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.mask.nxv4i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqau_mask_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_mask_vx_nxv4i32_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -334,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.nxv8i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqau_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_vx_nxv8i32_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -357,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.mask.nxv8i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqau_mask_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_mask_vx_nxv8i32_i8_nxv32i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqaus.ll b/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqaus.ll index cd6e749b656fb..f44b7597fe75f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqaus.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqaus.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvdot \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.th.vmaqaus.nxv1i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqaus_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqaus_vx_nxv1i32_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqaus.mask.nxv1i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqaus_mask_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqaus_mask_vx_nxv1i32_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqaus.nxv2i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqaus_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqaus_vx_nxv2i32_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqaus.mask.nxv2i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqaus_mask_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqaus_mask_vx_nxv2i32_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqaus.nxv4i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqaus_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqaus_vx_nxv4i32_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqaus.mask.nxv4i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqaus_mask_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqaus_mask_vx_nxv4i32_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqaus.nxv8i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqaus_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqaus_vx_nxv8i32_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqaus.mask.nxv8i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqaus_mask_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqaus_mask_vx_nxv8i32_i8_nxv32i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll index 1261d824968d6..d0ad1acd4fba4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll @@ -5,9 +5,6 @@ ; Make sure we don't select a 0 vl to X0 in the custom isel handlers we use ; for these intrinsics. -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2_mask_nxv16i16(ptr %base, %mask) { ; CHECK-LABEL: test_vlseg2_mask_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -21,9 +18,6 @@ entry: ret %2 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_mask_nxv16i16(ptr %base, i64 %offset, %mask) { ; CHECK-LABEL: test_vlsseg2_mask_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -37,8 +31,6 @@ entry: %2 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) ret %2 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) define @test_vloxseg2_mask_nxv16i16_nxv16i16(ptr %base, %index, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i16: @@ -55,9 +47,6 @@ entry: ret %2 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_mask_nxv16i16_nxv16i16(ptr %base, %index, %mask) { ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -73,9 +62,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv16i16(ptr %base, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -110,9 +96,6 @@ entry: ret %2 } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base) { ; CHECK-LABEL: test_vsseg2_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -135,9 +118,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset) { ; CHECK-LABEL: test_vssseg2_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -160,9 +140,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv16i16_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index) { ; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -185,9 +162,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv16i16_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index) { ; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/sadd_sat.ll b/llvm/test/CodeGen/RISCV/sadd_sat.ll index 1d6d07aa67337..27c7518c4f6c4 100644 --- a/llvm/test/CodeGen/RISCV/sadd_sat.ll +++ b/llvm/test/CodeGen/RISCV/sadd_sat.ll @@ -4,12 +4,6 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV32,RV32IZbb ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV64,RV64IZbb -declare i4 @llvm.sadd.sat.i4(i4, i4) -declare i8 @llvm.sadd.sat.i8(i8, i8) -declare i16 @llvm.sadd.sat.i16(i16, i16) -declare i32 @llvm.sadd.sat.i32(i32, i32) -declare i64 @llvm.sadd.sat.i64(i64, i64) - define signext i32 @func(i32 signext %x, i32 signext %y) nounwind { ; RV32-LABEL: func: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll index 9200a77915c56..108a214535c3e 100644 --- a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll @@ -4,12 +4,6 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV32,RV32IZbb ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV64,RV64IZbb -declare i4 @llvm.sadd.sat.i4(i4, i4) -declare i8 @llvm.sadd.sat.i8(i8, i8) -declare i16 @llvm.sadd.sat.i16(i16, i16) -declare i32 @llvm.sadd.sat.i32(i32, i32) -declare i64 @llvm.sadd.sat.i64(i64, i64) - define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind { ; RV32-LABEL: func32: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/saverestore.ll b/llvm/test/CodeGen/RISCV/saverestore.ll index f753f817b0ab6..85db6a01e3f66 100644 --- a/llvm/test/CodeGen/RISCV/saverestore.ll +++ b/llvm/test/CodeGen/RISCV/saverestore.ll @@ -180,9 +180,6 @@ entry: ; Check that functions with varargs do not use save/restore code -declare void @llvm.va_start(ptr) -declare void @llvm.va_end(ptr) - define i32 @varargs(ptr %fmt, ...) nounwind { ; RV32I-LABEL: varargs: ; RV32I-NOT: call t0, __riscv_save @@ -249,8 +246,6 @@ entry: ; Check that dynamic allocation calculations remain correct -declare ptr @llvm.stacksave() -declare void @llvm.stackrestore(ptr) declare void @notdead(ptr) define void @alloca(i32 %n) nounwind { diff --git a/llvm/test/CodeGen/RISCV/sextw-removal-debug.mir b/llvm/test/CodeGen/RISCV/sextw-removal-debug.mir index f8d6d4b13846e..f247c5f38037b 100644 --- a/llvm/test/CodeGen/RISCV/sextw-removal-debug.mir +++ b/llvm/test/CodeGen/RISCV/sextw-removal-debug.mir @@ -10,9 +10,6 @@ ret void, !dbg !13 } - declare void @llvm.dbg.value(metadata, metadata, metadata) #0 - - attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } !llvm.dbg.cu = !{!0} !llvm.debugify = !{!2, !3} diff --git a/llvm/test/CodeGen/RISCV/sextw-removal.ll b/llvm/test/CodeGen/RISCV/sextw-removal.ll index 9f326280885b5..7e7ba9fc6803a 100644 --- a/llvm/test/CodeGen/RISCV/sextw-removal.ll +++ b/llvm/test/CodeGen/RISCV/sextw-removal.ll @@ -256,8 +256,6 @@ bb7: ; preds = %bb2 ret void } -declare i32 @llvm.ctpop.i32(i32) - define void @test6(i32 signext %arg, i32 signext %arg1) nounwind { ; CHECK-LABEL: test6: ; CHECK: # %bb.0: # %bb @@ -410,8 +408,6 @@ bb7: ; preds = %bb2 ret void } -declare i64 @llvm.ctpop.i64(i64) - define void @test8(i32 signext %arg, i32 signext %arg1) nounwind { ; CHECK-LABEL: test8: ; CHECK: # %bb.0: # %bb @@ -715,7 +711,6 @@ bb7: ; preds = %bb2 ret i32 %i8 } - ; int test14(int a, int n) { ; for (int i = 1; i < n; ++i) { ; if (a > 1000) @@ -1323,7 +1318,6 @@ bb2: ; preds = %bb2, %bb bb7: ; preds = %bb2 ret void } -declare i32 @llvm.riscv.sha256sig0(i32) ; The type promotion of %7 forms a sext_inreg, but %7 and %6 are combined to ; form a sh2add. This leaves behind a sext.w that isn't needed. @@ -1499,8 +1493,6 @@ bb7: ; preds = %bb2 ret void } -declare i32 @llvm.riscv.vmv.x.s.nxv1i32( ) - ; Test that we can look through brev8 in hasAllNBitUsers. define signext i32 @test21(i64 %arg1, i64 %arg2, i64 %arg3) { ; RV64I-LABEL: test21: diff --git a/llvm/test/CodeGen/RISCV/shifts.ll b/llvm/test/CodeGen/RISCV/shifts.ll index 1ca23d72b107b..f5ec7da7b70fe 100644 --- a/llvm/test/CodeGen/RISCV/shifts.ll +++ b/llvm/test/CodeGen/RISCV/shifts.ll @@ -7,9 +7,6 @@ ; Basic shift support is tested as part of ALU.ll. This file ensures that ; shifts which may not be supported natively are lowered properly. -declare i64 @llvm.fshr.i64(i64, i64, i64) -declare i128 @llvm.fshr.i128(i128, i128, i128) - define i64 @lshr64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: lshr64: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll b/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll index 1bfeeb92e06dd..dc625e25bd6f4 100644 --- a/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll +++ b/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll @@ -2047,7 +2047,6 @@ define signext i32 @abs_i32(i32 signext %x) { %a = call i32 @llvm.abs.i32(i32 %x, i1 false) ret i32 %a } -declare i32 @llvm.abs.i32(i32, i1) define i64 @abs_i64(i64 %x) { ; NOSFB-LABEL: abs_i64: @@ -2088,7 +2087,6 @@ define i64 @abs_i64(i64 %x) { %a = call i64 @llvm.abs.i64(i64 %x, i1 false) ret i64 %a } -declare i64 @llvm.abs.i64(i64, i1) define i64 @select_andn(i64 %A, i64 %B, i64 %C, i1 zeroext %cond) { ; NOSFB-LABEL: select_andn: diff --git a/llvm/test/CodeGen/RISCV/simplify-condbr.ll b/llvm/test/CodeGen/RISCV/simplify-condbr.ll index 6dabd7d93cbc1..4aadd034ff0f7 100644 --- a/llvm/test/CodeGen/RISCV/simplify-condbr.ll +++ b/llvm/test/CodeGen/RISCV/simplify-condbr.ll @@ -2,12 +2,6 @@ ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0 - -; Function Attrs: nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: write) -declare void @llvm.assume(i1 noundef) #0 - declare fastcc i1 @S_reginclass() declare fastcc ptr @Perl_av_store(i64) @@ -175,5 +169,3 @@ sw.bb85: ; preds = %if.end48 br label %common.ret } -attributes #0 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) } -attributes #1 = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: write) } diff --git a/llvm/test/CodeGen/RISCV/ssub_sat.ll b/llvm/test/CodeGen/RISCV/ssub_sat.ll index ba4d170c719fc..0ee97d6660451 100644 --- a/llvm/test/CodeGen/RISCV/ssub_sat.ll +++ b/llvm/test/CodeGen/RISCV/ssub_sat.ll @@ -4,12 +4,6 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV32,RV32IZbb ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV64,RV64IZbb -declare i4 @llvm.ssub.sat.i4(i4, i4) -declare i8 @llvm.ssub.sat.i8(i8, i8) -declare i16 @llvm.ssub.sat.i16(i16, i16) -declare i32 @llvm.ssub.sat.i32(i32, i32) -declare i64 @llvm.ssub.sat.i64(i64, i64) - define signext i32 @func(i32 signext %x, i32 signext %y) nounwind { ; RV32-LABEL: func: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll index 437c1e2a2e489..f74cbd442ab83 100644 --- a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll @@ -4,12 +4,6 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV32,RV32IZbb ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV64,RV64IZbb -declare i4 @llvm.ssub.sat.i4(i4, i4) -declare i8 @llvm.ssub.sat.i8(i8, i8) -declare i16 @llvm.ssub.sat.i16(i16, i16) -declare i32 @llvm.ssub.sat.i32(i32, i32) -declare i64 @llvm.ssub.sat.i64(i64, i64) - define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind { ; RV32-LABEL: func32: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/tail-calls.ll b/llvm/test/CodeGen/RISCV/tail-calls.ll index 366b37ac5d472..6756fea8a1f85 100644 --- a/llvm/test/CodeGen/RISCV/tail-calls.ll +++ b/llvm/test/CodeGen/RISCV/tail-calls.ll @@ -26,7 +26,6 @@ entry: ; Perform tail call optimization for external symbol. @dest = global [2 x i8] zeroinitializer -declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1) define void @caller_extern(ptr %src) optsize { ; CHECK-LABEL: caller_extern: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/thread-pointer.ll b/llvm/test/CodeGen/RISCV/thread-pointer.ll index 4465b7ecc910c..d4f318a0cf2a1 100644 --- a/llvm/test/CodeGen/RISCV/thread-pointer.ll +++ b/llvm/test/CodeGen/RISCV/thread-pointer.ll @@ -2,8 +2,6 @@ ; RUN: llc < %s -mtriple=riscv64 | FileCheck %s ; RUN: llc < %s -mtriple=riscv32 | FileCheck %s -declare ptr @llvm.thread.pointer() - define ptr @thread_pointer() nounwind { ; CHECK-LABEL: thread_pointer: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/uadd_sat.ll b/llvm/test/CodeGen/RISCV/uadd_sat.ll index ee591a1784635..4e0c4ab750592 100644 --- a/llvm/test/CodeGen/RISCV/uadd_sat.ll +++ b/llvm/test/CodeGen/RISCV/uadd_sat.ll @@ -4,12 +4,6 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefix=RV32IZbb ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefix=RV64IZbb -declare i4 @llvm.uadd.sat.i4(i4, i4) -declare i8 @llvm.uadd.sat.i8(i8, i8) -declare i16 @llvm.uadd.sat.i16(i16, i16) -declare i32 @llvm.uadd.sat.i32(i32, i32) -declare i64 @llvm.uadd.sat.i64(i64, i64) - define signext i32 @func(i32 signext %x, i32 signext %y) nounwind { ; RV32I-LABEL: func: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll index da29d26b7147f..a6afef4286dea 100644 --- a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll @@ -4,12 +4,6 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefix=RV32IZbb ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefix=RV64IZbb -declare i4 @llvm.uadd.sat.i4(i4, i4) -declare i8 @llvm.uadd.sat.i8(i8, i8) -declare i16 @llvm.uadd.sat.i16(i16, i16) -declare i32 @llvm.uadd.sat.i32(i32, i32) -declare i64 @llvm.uadd.sat.i64(i64, i64) - define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind { ; RV32I-LABEL: func32: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll b/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll index 34d9aaf39bf72..938e6550387f5 100644 --- a/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll +++ b/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll @@ -118,8 +118,6 @@ start: } ; Function Attrs: nounwind readnone speculatable -declare { i128, i1 } @llvm.umul.with.overflow.i128(i128, i128) #1 attributes #0 = { nounwind readnone } -attributes #1 = { nounwind readnone speculatable } attributes #2 = { nounwind } diff --git a/llvm/test/CodeGen/RISCV/usub_sat.ll b/llvm/test/CodeGen/RISCV/usub_sat.ll index aab5626576427..33056682dcc79 100644 --- a/llvm/test/CodeGen/RISCV/usub_sat.ll +++ b/llvm/test/CodeGen/RISCV/usub_sat.ll @@ -4,12 +4,6 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefix=RV32IZbb ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefix=RV64IZbb -declare i4 @llvm.usub.sat.i4(i4, i4) -declare i8 @llvm.usub.sat.i8(i8, i8) -declare i16 @llvm.usub.sat.i16(i16, i16) -declare i32 @llvm.usub.sat.i32(i32, i32) -declare i64 @llvm.usub.sat.i64(i64, i64) - define signext i32 @func(i32 signext %x, i32 signext %y) nounwind { ; RV32I-LABEL: func: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll index 3285349ea068a..ef6bc022ddc9f 100644 --- a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll @@ -4,12 +4,6 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefix=RV32IZbb ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefix=RV64IZbb -declare i4 @llvm.usub.sat.i4(i4, i4) -declare i8 @llvm.usub.sat.i8(i8, i8) -declare i16 @llvm.usub.sat.i16(i16, i16) -declare i32 @llvm.usub.sat.i32(i32, i32) -declare i64 @llvm.usub.sat.i64(i64, i64) - define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind { ; RV32I-LABEL: func32: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll b/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll index 91999444fa766..7312d005962ba 100644 --- a/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll +++ b/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -target-abi ilp32e -frame-pointer=all -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=ILP32E-WITHFP %s -declare void @llvm.va_start(ptr) -declare void @llvm.va_end(ptr) declare void @abort() define i32 @caller(i32 %a) { diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll index 3dd99f3d49d2d..fc391e9fb049e 100644 --- a/llvm/test/CodeGen/RISCV/vararg.ll +++ b/llvm/test/CodeGen/RISCV/vararg.ll @@ -40,9 +40,6 @@ ; The nounwind attribute is omitted for some of the tests, to check that CFI ; directives are correctly generated. -declare void @llvm.va_start(ptr) -declare void @llvm.va_end(ptr) - declare void @notdead(ptr) ; Although frontends are recommended to not generate va_arg due to the lack of @@ -1871,8 +1868,6 @@ define void @va3_caller() nounwind { ret void } -declare void @llvm.va_copy(ptr, ptr) - define i32 @va4_va_copy(i32 %argno, ...) nounwind { ; ILP32-ILP32F-FPELIM-LABEL: va4_va_copy: ; ILP32-ILP32F-FPELIM: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/varargs-with-fp-and-second-adj.ll b/llvm/test/CodeGen/RISCV/varargs-with-fp-and-second-adj.ll index c8c364208da90..b569854bb47df 100644 --- a/llvm/test/CodeGen/RISCV/varargs-with-fp-and-second-adj.ll +++ b/llvm/test/CodeGen/RISCV/varargs-with-fp-and-second-adj.ll @@ -1,9 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=riscv64 -mattr=+m,+c,+v < %s | FileCheck --check-prefix=RV64V %s -declare void @llvm.va_copy.p0(ptr, ptr) -declare void @llvm.va_end.p0(ptr) - define dso_local void @_Z3fooPKcz(ptr noundef %0, ...) "frame-pointer"="all" { ; RV64V-LABEL: _Z3fooPKcz: ; RV64V: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/vlenb.ll b/llvm/test/CodeGen/RISCV/vlenb.ll index 280df6545fd06..cc2850617fcad 100644 --- a/llvm/test/CodeGen/RISCV/vlenb.ll +++ b/llvm/test/CodeGen/RISCV/vlenb.ll @@ -92,8 +92,6 @@ loop: br label %loop } - -declare i32 @llvm.read_register.i32(metadata) nounwind declare void @unknown() declare void @use(i32) diff --git a/llvm/test/CodeGen/RISCV/vscale-demanded-bits.ll b/llvm/test/CodeGen/RISCV/vscale-demanded-bits.ll index fd725e555a326..69958d48e63c8 100644 --- a/llvm/test/CodeGen/RISCV/vscale-demanded-bits.ll +++ b/llvm/test/CodeGen/RISCV/vscale-demanded-bits.ll @@ -1,9 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple riscv64 -mattr +v -filetype asm -o - %s | FileCheck %s -declare i8 @llvm.vscale.i8() -declare @llvm.stepvector.nxv8i8() - define @f() #0 { ; CHECK-LABEL: f: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll index 93b68b0a95b48..62f08d7831dda 100644 --- a/llvm/test/CodeGen/RISCV/xaluo.ll +++ b/llvm/test/CodeGen/RISCV/xaluo.ll @@ -1953,7 +1953,6 @@ entry: ret i1 %obit } - ; ; Check the use of the overflow bit in combination with a select instruction. ; @@ -3809,7 +3808,6 @@ entry: ret i1 %ret } - ; ; Check the use of the overflow bit in combination with a branch instruction. ; @@ -5586,15 +5584,3 @@ IfNoOverflow: ret i64 %val } -declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone -declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone -declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone -declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone -declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone -declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone -declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone -declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone -declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone -declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone -declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone -declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone diff --git a/llvm/test/CodeGen/RISCV/xcvalu.ll b/llvm/test/CodeGen/RISCV/xcvalu.ll index 91e5153ee7a5b..5cc7bfbc7fba6 100644 --- a/llvm/test/CodeGen/RISCV/xcvalu.ll +++ b/llvm/test/CodeGen/RISCV/xcvalu.ll @@ -2,12 +2,6 @@ ; RUN: llc -O0 -mtriple=riscv32 -mattr=+m -mattr=+xcvalu -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -declare i32 @llvm.abs.i32(i32, i1) -declare i32 @llvm.smin.i32(i32, i32) -declare i32 @llvm.smax.i32(i32, i32) -declare i32 @llvm.umin.i32(i32, i32) -declare i32 @llvm.umax.i32(i32, i32) - define i32 @abs(i32 %a) { ; CHECK-LABEL: abs: ; CHECK: # %bb.0: @@ -111,8 +105,6 @@ define i32 @extbz(i8 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.clip(i32, i32) - define i32 @test.cv.alu.clip.case.a(i32 %a) { ; CHECK-LABEL: test.cv.alu.clip.case.a: ; CHECK: # %bb.0: @@ -132,8 +124,6 @@ define i32 @test.cv.alu.clip.case.b(i32 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.clipu(i32, i32) - define i32 @test.cv.alu.clipu.case.a(i32 %a) { ; CHECK-LABEL: test.cv.alu.clipu.case.a: ; CHECK: # %bb.0: @@ -153,8 +143,6 @@ define i32 @test.cv.alu.clipu.case.b(i32 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.addN(i32, i32, i32) - define i32 @test.cv.alu.addN.case.a(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.alu.addN.case.a: ; CHECK: # %bb.0: @@ -174,8 +162,6 @@ define i32 @test.cv.alu.addN.case.b(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.adduN(i32, i32, i32) - define i32 @test.cv.alu.adduN.case.a(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.alu.adduN.case.a: ; CHECK: # %bb.0: @@ -195,8 +181,6 @@ define i32 @test.cv.alu.adduN.case.b(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.addRN(i32, i32, i32) - define i32 @test.cv.alu.addRN.case.a(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.alu.addRN.case.a: ; CHECK: # %bb.0: @@ -216,8 +200,6 @@ define i32 @test.cv.alu.addRN.case.b(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.adduRN(i32, i32, i32) - define i32 @test.cv.alu.adduRN.case.a(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.alu.adduRN.case.a: ; CHECK: # %bb.0: @@ -237,8 +219,6 @@ define i32 @test.cv.alu.adduRN.case.b(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.subN(i32, i32, i32) - define i32 @test.cv.alu.subN.case.a(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.alu.subN.case.a: ; CHECK: # %bb.0: @@ -258,8 +238,6 @@ define i32 @test.cv.alu.subN.case.b(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.subuN(i32, i32, i32) - define i32 @test.cv.alu.subuN.case.a(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.alu.subuN.case.a: ; CHECK: # %bb.0: @@ -279,8 +257,6 @@ define i32 @test.cv.alu.subuN.case.b(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.subRN(i32, i32, i32) - define i32 @test.cv.alu.subRN.case.a(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.alu.subRN.case.a: ; CHECK: # %bb.0: @@ -300,8 +276,6 @@ define i32 @test.cv.alu.subRN.case.b(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.subuRN(i32, i32, i32) - define i32 @test.cv.alu.subuRN.case.a(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.alu.subuRN.case.a: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/xcvbitmanip.ll b/llvm/test/CodeGen/RISCV/xcvbitmanip.ll index b2cebabb7df8b..7e63efac9b62f 100644 --- a/llvm/test/CodeGen/RISCV/xcvbitmanip.ll +++ b/llvm/test/CodeGen/RISCV/xcvbitmanip.ll @@ -4,8 +4,6 @@ ; RUN: llc -O3 -mtriple=riscv32 -mattr=+xcvbitmanip -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-O3 -declare i32 @llvm.riscv.cv.bitmanip.extract(i32, i32) - define i32 @test.cv.extractr(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.extractr: ; CHECK: # %bb.0: @@ -33,8 +31,6 @@ define i32 @test.cv.extract1023(i32 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.bitmanip.extractu(i32, i32) - define i32 @test.cv.extractur(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.extractur: ; CHECK: # %bb.0: @@ -53,8 +49,6 @@ define i32 @test.cv.extractu(i32 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.bitmanip.insert(i32, i32, i32) - define i32 @test.cv.insert(i32 %c, i32 %a) { ; CHECK-LABEL: test.cv.insert: ; CHECK: # %bb.0: @@ -73,8 +67,6 @@ define i32 @test.cv.insertr(i32 %c, i32 %b, i32 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.bitmanip.bclr(i32, i32) - define i32 @test.cv.bclrr(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.bclrr: ; CHECK: # %bb.0: @@ -93,8 +85,6 @@ define i32 @test.cv.bclr(i32 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.bitmanip.bset(i32, i32) - define i32 @test.cv.bsetr(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.bsetr: ; CHECK: # %bb.0: @@ -113,8 +103,6 @@ define i32 @test.cv.bset(i32 %a) { ret i32 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define i32 @test.cv.ff1(i32 %a) { ; CHECK-LABEL: test.cv.ff1: ; CHECK: # %bb.0: @@ -124,8 +112,6 @@ define i32 @test.cv.ff1(i32 %a) { ret i32 %1 } -declare i32 @llvm.ctlz.i32(i32, i1) - define i32 @test.cv.fl1(i32 %a) { ; CHECK-LABEL: test.cv.fl1: ; CHECK: # %bb.0: @@ -135,8 +121,6 @@ define i32 @test.cv.fl1(i32 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.bitmanip.clb(i32) - define i32 @test.cv.clb(i32 %a) { ; CHECK-LABEL: test.cv.clb: ; CHECK: # %bb.0: @@ -146,8 +130,6 @@ define i32 @test.cv.clb(i32 %a) { ret i32 %1 } -declare i32 @llvm.ctpop(i32) - define i32 @test.cv.cnt(i32 %a) { ; CHECK-LABEL: test.cv.cnt: ; CHECK: # %bb.0: @@ -157,8 +139,6 @@ define i32 @test.cv.cnt(i32 %a) { ret i32 %1 } -declare i32 @llvm.fshl.i32(i32, i32, i32) - define i32 @test.llvm.fshl.imm(i32 %a) { ; CHECK-LABEL: test.llvm.fshl.imm: ; CHECK: # %bb.0: @@ -187,8 +167,6 @@ define i32 @test.llvm.fshl.reg(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.fshr.i32(i32, i32, i32) - define i32 @test.llvm.fshr.imm(i32 %a) { ; CHECK-LABEL: test.llvm.fshr.imm: ; CHECK: # %bb.0: @@ -208,8 +186,6 @@ define i32 @test.llvm.fshr.reg(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.bitmanip.bitrev(i32, i32, i32) - define i32 @test.cv.bitrev(i32 %a) { ; CHECK-LABEL: test.cv.bitrev: ; CHECK: # %bb.0: @@ -219,8 +195,6 @@ define i32 @test.cv.bitrev(i32 %a) { ret i32 %1 } -declare i32 @llvm.bitreverse(i32) - define i32 @test.llvm.bitrev(i32 %a) { ; CHECK-LABEL: test.llvm.bitrev: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/xcvelw.ll b/llvm/test/CodeGen/RISCV/xcvelw.ll new file mode 100644 index 0000000000000..4ff8a5b38494f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/xcvelw.ll @@ -0,0 +1,27 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -O0 -mtriple=riscv32 -mattr=+xcvelw -verify-machineinstrs < %s \ +; RUN: | FileCheck %s + +declare i32 @llvm.riscv.cv.elw.elw(i8*) + +define i32 @test.cv.elw.elw(i8* %a) { +; CHECK-LABEL: test.cv.elw.elw: +; CHECK: # %bb.0: +; CHECK-NEXT: cv.elw a0, 0(a0) +; CHECK-NEXT: ret + %1 = call i32 @llvm.riscv.cv.elw.elw(i8* %a) + ret i32 %1 +} + +define i32 @test.cv.elw.elw2(i8* %a, i32 %b) { +; CHECK-LABEL: test.cv.elw.elw2: +; CHECK: # %bb.0: +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: cv.elw a0, 7(a0) +; CHECK-NEXT: ret + %c = add i32 %b, 4 + %d = add i32 %c, 3 + %e = getelementptr i8, i8* %a, i32 %d + %1 = call i32 @llvm.riscv.cv.elw.elw(i8* %e) + ret i32 %1 +} \ No newline at end of file diff --git a/llvm/test/CodeGen/RISCV/xcvmac.ll b/llvm/test/CodeGen/RISCV/xcvmac.ll index 68efdf7210f7f..40cde9aba6734 100644 --- a/llvm/test/CodeGen/RISCV/xcvmac.ll +++ b/llvm/test/CodeGen/RISCV/xcvmac.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m -mattr=+xcvmac -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -declare i32 @llvm.riscv.cv.mac.mac(i32, i32, i32) - define i32 @test.mac(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.mac: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define i32 @test.mac(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.msu(i32, i32, i32) - define i32 @test.msu(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.msu: ; CHECK: # %bb.0: @@ -26,8 +22,6 @@ define i32 @test.msu(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.muluN(i32, i32, i32) - define i32 @test.muluN(i32 %a, i32 %b) { ; CHECK-LABEL: test.muluN: ; CHECK: # %bb.0: @@ -37,8 +31,6 @@ define i32 @test.muluN(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.mulhhuN(i32, i32, i32) - define i32 @test.mulhhuN(i32 %a, i32 %b) { ; CHECK-LABEL: test.mulhhuN: ; CHECK: # %bb.0: @@ -48,8 +40,6 @@ define i32 @test.mulhhuN(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.mulsN(i32, i32, i32) - define i32 @test.mulsN(i32 %a, i32 %b) { ; CHECK-LABEL: test.mulsN: ; CHECK: # %bb.0: @@ -59,8 +49,6 @@ define i32 @test.mulsN(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.mulhhsN(i32, i32, i32) - define i32 @test.mulhhsN(i32 %a, i32 %b) { ; CHECK-LABEL: test.mulhhsN: ; CHECK: # %bb.0: @@ -70,8 +58,6 @@ define i32 @test.mulhhsN(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.muluRN(i32, i32, i32) - define i32 @test.muluRN(i32 %a, i32 %b) { ; CHECK-LABEL: test.muluRN: ; CHECK: # %bb.0: @@ -81,8 +67,6 @@ define i32 @test.muluRN(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.mulhhuRN(i32, i32, i32) - define i32 @test.mulhhuRN(i32 %a, i32 %b) { ; CHECK-LABEL: test.mulhhuRN: ; CHECK: # %bb.0: @@ -92,8 +76,6 @@ define i32 @test.mulhhuRN(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.mulsRN(i32, i32, i32) - define i32 @test.mulsRN(i32 %a, i32 %b) { ; CHECK-LABEL: test.mulsRN: ; CHECK: # %bb.0: @@ -103,8 +85,6 @@ define i32 @test.mulsRN(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.mulhhsRN(i32, i32, i32) - define i32 @test.mulhhsRN(i32 %a, i32 %b) { ; CHECK-LABEL: test.mulhhsRN: ; CHECK: # %bb.0: @@ -114,8 +94,6 @@ define i32 @test.mulhhsRN(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.macuN(i32, i32, i32, i32) - define i32 @test.macuN(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.macuN: ; CHECK: # %bb.0: @@ -126,8 +104,6 @@ define i32 @test.macuN(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.machhuN(i32, i32, i32, i32) - define i32 @test.machhuN(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.machhuN: ; CHECK: # %bb.0: @@ -138,8 +114,6 @@ define i32 @test.machhuN(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.macsN(i32, i32, i32, i32) - define i32 @test.macsN(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.macsN: ; CHECK: # %bb.0: @@ -150,8 +124,6 @@ define i32 @test.macsN(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.machhsN(i32, i32, i32, i32) - define i32 @test.machhsN(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.machhsN: ; CHECK: # %bb.0: @@ -162,8 +134,6 @@ define i32 @test.machhsN(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.macuRN(i32, i32, i32, i32) - define i32 @test.macuRN(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.macuRN: ; CHECK: # %bb.0: @@ -174,8 +144,6 @@ define i32 @test.macuRN(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.machhuRN(i32, i32, i32, i32) - define i32 @test.machhuRN(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.machhuRN: ; CHECK: # %bb.0: @@ -186,8 +154,6 @@ define i32 @test.machhuRN(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.macsRN(i32, i32, i32, i32) - define i32 @test.macsRN(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.macsRN: ; CHECK: # %bb.0: @@ -198,8 +164,6 @@ define i32 @test.macsRN(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.machhsRN(i32, i32, i32, i32) - define i32 @test.machhsRN(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.machhsRN: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll b/llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll index a7e24cecb4f26..92c5d2892cbdc 100644 --- a/llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll +++ b/llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll @@ -988,9 +988,6 @@ entry: ; Check that functions with varargs do not use save/restore code -declare void @llvm.va_start(ptr) -declare void @llvm.va_end(ptr) - define i32 @varargs(ptr %fmt, ...) { ; RV32IXQCCMP-LABEL: varargs: ; RV32IXQCCMP: # %bb.0: @@ -1437,8 +1434,6 @@ entry: ; Check that dynamic allocation calculations remain correct -declare ptr @llvm.stacksave() -declare void @llvm.stackrestore(ptr) declare void @notdead(ptr) define void @alloca(i32 %n) { @@ -3732,7 +3727,6 @@ define void @callee_no_irq() { } declare void @bar(ptr, ptr) -declare ptr @llvm.frameaddress.p0(i32 immarg) define i32 @use_fp(i32 %x) { ; RV32IXQCCMP-LABEL: use_fp: diff --git a/llvm/test/CodeGen/RISCV/xqcibm-cto-clo-brev.ll b/llvm/test/CodeGen/RISCV/xqcibm-cto-clo-brev.ll index 8568d88bceab6..5b5d3d856d878 100644 --- a/llvm/test/CodeGen/RISCV/xqcibm-cto-clo-brev.ll +++ b/llvm/test/CodeGen/RISCV/xqcibm-cto-clo-brev.ll @@ -6,21 +6,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zbb,experimental-xqcibm -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZBBXQCIBM -declare i8 @llvm.cttz.i8(i8, i1) -declare i16 @llvm.cttz.i16(i16, i1) -declare i32 @llvm.cttz.i32(i32, i1) -declare i64 @llvm.cttz.i64(i64, i1) - -declare i8 @llvm.ctlz.i8(i8, i1) -declare i16 @llvm.ctlz.i16(i16, i1) -declare i32 @llvm.ctlz.i32(i32, i1) -declare i64 @llvm.ctlz.i64(i64, i1) - -declare i8 @llvm.bitreverse.i8(i8) -declare i16 @llvm.bitreverse.i16(i16) -declare i32 @llvm.bitreverse.i32(i32) -declare i64 @llvm.bitreverse.i64(i64) - define i8 @test_cttz_i8(i8 %a) nounwind { ; RV32I-LABEL: test_cttz_i8: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/xqcilsm-memset.ll b/llvm/test/CodeGen/RISCV/xqcilsm-memset.ll index b48e039dd30a4..2fad19a653f1f 100644 --- a/llvm/test/CodeGen/RISCV/xqcilsm-memset.ll +++ b/llvm/test/CodeGen/RISCV/xqcilsm-memset.ll @@ -34,8 +34,6 @@ entry: ret void } -declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1) - define void @test2(ptr nocapture %p) nounwind { ; RV32I-LABEL: test2: ; RV32I: # %bb.0: # %entry @@ -142,7 +140,6 @@ entry: ret void } - define ptr @test3(ptr %p) nounwind { ; RV32I-LABEL: test3: ; RV32I: # %bb.0: # %entry @@ -215,10 +212,6 @@ entry: ret void } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) - -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) - define void @test4b() nounwind { ; RV32I-LABEL: test4b: ; RV32I: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll index 3efa9e58e65d3..eb1848965a9ba 100644 --- a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll +++ b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll @@ -24,8 +24,6 @@ ; RUN: -mattr=+zhinx -verify-machineinstrs -target-abi lp64 | \ ; RUN: FileCheck -check-prefix=RV64IZDINXZHINX %s -declare half @llvm.experimental.constrained.sqrt.f16(half, metadata, metadata) - define half @sqrt_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: sqrt_f16: ; RV32IZFH: # %bb.0: @@ -60,8 +58,6 @@ define half @sqrt_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.floor.f16(half, metadata) - define half @floor_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: floor_f16: ; RV32IZFH: # %bb.0: @@ -132,8 +128,6 @@ define half @floor_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.ceil.f16(half, metadata) - define half @ceil_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: ceil_f16: ; RV32IZFH: # %bb.0: @@ -204,8 +198,6 @@ define half @ceil_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.trunc.f16(half, metadata) - define half @trunc_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: trunc_f16: ; RV32IZFH: # %bb.0: @@ -276,8 +268,6 @@ define half @trunc_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.rint.f16(half, metadata, metadata) - define half @rint_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: rint_f16: ; RV32IZFH: # %bb.0: @@ -348,8 +338,6 @@ define half @rint_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.nearbyint.f16(half, metadata, metadata) - define half @nearbyint_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: nearbyint_f16: ; RV32IZFH: # %bb.0: @@ -420,8 +408,6 @@ define half @nearbyint_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.round.f16(half, metadata) - define half @round_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: round_f16: ; RV32IZFH: # %bb.0: @@ -492,8 +478,6 @@ define half @round_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.roundeven.f16(half, metadata) - define half @roundeven_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: roundeven_f16: ; RV32IZFH: # %bb.0: @@ -564,8 +548,6 @@ define half @roundeven_f16(half %a) nounwind strictfp { ret half %1 } -declare iXLen @llvm.experimental.constrained.lrint.iXLen.f16(half, metadata, metadata) - define iXLen @lrint_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: lrint_f16: ; RV32IZFH: # %bb.0: @@ -600,8 +582,6 @@ define iXLen @lrint_f16(half %a) nounwind strictfp { ret iXLen %1 } -declare iXLen @llvm.experimental.constrained.lround.iXLen.f16(half, metadata) - define iXLen @lround_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: lround_f16: ; RV32IZFH: # %bb.0: @@ -636,8 +616,6 @@ define iXLen @lround_f16(half %a) nounwind strictfp { ret iXLen %1 } -declare i64 @llvm.experimental.constrained.llrint.i64.f16(half, metadata, metadata) - define i64 @llrint_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: llrint_f16: ; RV32IZFH: # %bb.0: @@ -687,8 +665,6 @@ define i64 @llrint_f16(half %a) nounwind strictfp { ret i64 %1 } -declare i64 @llvm.experimental.constrained.llround.i64.f16(half, metadata) - define i64 @llround_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: llround_f16: ; RV32IZFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics.ll b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics.ll index ba2ea57a00822..b71027ee278d9 100644 --- a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics.ll @@ -26,8 +26,6 @@ ; These intrinsics require half to be a legal type. -declare iXLen @llvm.lrint.iXLen.f16(half) - define iXLen @lrint_f16(half %a) nounwind { ; RV32IZFH-LABEL: lrint_f16: ; RV32IZFH: # %bb.0: @@ -72,9 +70,6 @@ define iXLen @lrint_f16(half %a) nounwind { ret iXLen %1 } -declare i32 @llvm.lround.i32.f16(half) -declare i64 @llvm.lround.i64.f16(half) - define iXLen @lround_f16(half %a) nounwind { ; RV32IZFH-LABEL: lround_f16: ; RV32IZFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll index 214ea46d3130d..0529819a4f4e2 100644 --- a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll +++ b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll @@ -24,8 +24,6 @@ ; RUN: -mattr=+zhinxmin -verify-machineinstrs -target-abi lp64 | \ ; RUN: FileCheck -check-prefix=RV64IZDINXZHINXMIN %s -declare half @llvm.experimental.constrained.sqrt.f16(half, metadata, metadata) - define half @sqrt_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: sqrt_f16: ; RV32IZFHMIN: # %bb.0: @@ -72,8 +70,6 @@ define half @sqrt_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.floor.f16(half, metadata) - define half @floor_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: floor_f16: ; RV32IZFHMIN: # %bb.0: @@ -144,8 +140,6 @@ define half @floor_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.ceil.f16(half, metadata) - define half @ceil_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: ceil_f16: ; RV32IZFHMIN: # %bb.0: @@ -216,8 +210,6 @@ define half @ceil_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.trunc.f16(half, metadata) - define half @trunc_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: trunc_f16: ; RV32IZFHMIN: # %bb.0: @@ -288,8 +280,6 @@ define half @trunc_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.rint.f16(half, metadata, metadata) - define half @rint_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: rint_f16: ; RV32IZFHMIN: # %bb.0: @@ -360,8 +350,6 @@ define half @rint_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.nearbyint.f16(half, metadata, metadata) - define half @nearbyint_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: nearbyint_f16: ; RV32IZFHMIN: # %bb.0: @@ -432,8 +420,6 @@ define half @nearbyint_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.round.f16(half, metadata) - define half @round_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: round_f16: ; RV32IZFHMIN: # %bb.0: @@ -504,8 +490,6 @@ define half @round_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.roundeven.f16(half, metadata) - define half @roundeven_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: roundeven_f16: ; RV32IZFHMIN: # %bb.0: @@ -576,8 +560,6 @@ define half @roundeven_f16(half %a) nounwind strictfp { ret half %1 } -declare iXLen @llvm.experimental.constrained.lrint.iXLen.f16(half, metadata, metadata) - define iXLen @lrint_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: lrint_f16: ; RV32IZFHMIN: # %bb.0: @@ -618,8 +600,6 @@ define iXLen @lrint_f16(half %a) nounwind strictfp { ret iXLen %1 } -declare iXLen @llvm.experimental.constrained.lround.iXLen.f16(half, metadata) - define iXLen @lround_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: lround_f16: ; RV32IZFHMIN: # %bb.0: @@ -660,8 +640,6 @@ define iXLen @lround_f16(half %a) nounwind strictfp { ret iXLen %1 } -declare i64 @llvm.experimental.constrained.llrint.i64.f16(half, metadata, metadata) - define i64 @llrint_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: llrint_f16: ; RV32IZFHMIN: # %bb.0: @@ -714,8 +692,6 @@ define i64 @llrint_f16(half %a) nounwind strictfp { ret i64 %1 } -declare i64 @llvm.experimental.constrained.llround.i64.f16(half, metadata) - define i64 @llround_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: llround_f16: ; RV32IZFHMIN: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll index 0a494878926d1..a87f2dda9cb42 100644 --- a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll @@ -26,8 +26,6 @@ ; These intrinsics require half to be a legal type. -declare iXLen @llvm.lrint.iXLen.f16(half) - define iXLen @lrint_f16(half %a) nounwind { ; RV32IZFHMIN-LABEL: lrint_f16: ; RV32IZFHMIN: # %bb.0: @@ -80,8 +78,6 @@ define iXLen @lrint_f16(half %a) nounwind { ret iXLen %1 } -declare iXLen @llvm.lround.iXLen.f16(half) - define iXLen @lround_f16(half %a) nounwind { ; RV32IZFHMIN-LABEL: lround_f16: ; RV32IZFHMIN: # %bb.0: diff --git a/llvm/test/CodeGen/SPARC/fp128-abi.ll b/llvm/test/CodeGen/SPARC/fp128-abi.ll new file mode 100644 index 0000000000000..341e05d80e71e --- /dev/null +++ b/llvm/test/CodeGen/SPARC/fp128-abi.ll @@ -0,0 +1,89 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc < %s -mtriple=sparc | FileCheck %s --check-prefix=SPARC32 +; RUN: llc < %s -mtriple=sparcv9 | FileCheck %s --check-prefix=SPARC64 + +define fp128 @f128_direct(fp128 %num) nounwind { +; SPARC32-LABEL: f128_direct: +; SPARC32: ! %bb.0: +; SPARC32-NEXT: save %sp, -144, %sp +; SPARC32-NEXT: ldd [%i0], %f0 +; SPARC32-NEXT: ldd [%i0+8], %f4 +; SPARC32-NEXT: ld [%fp+64], %i0 +; SPARC32-NEXT: add %fp, -16, %i1 +; SPARC32-NEXT: st %i1, [%sp+64] +; SPARC32-NEXT: std %f4, [%fp+-40] +; SPARC32-NEXT: std %f0, [%fp+-48] +; SPARC32-NEXT: std %f4, [%fp+-24] +; SPARC32-NEXT: add %fp, -32, %o0 +; SPARC32-NEXT: add %fp, -48, %o1 +; SPARC32-NEXT: call f128_callee +; SPARC32-NEXT: std %f0, [%fp+-32] +; SPARC32-NEXT: unimp 16 +; SPARC32-NEXT: ldd [%fp+-8], %f0 +; SPARC32-NEXT: ldd [%fp+-16], %f4 +; SPARC32-NEXT: std %f0, [%i0+8] +; SPARC32-NEXT: std %f4, [%i0] +; SPARC32-NEXT: ret +; SPARC32-NEXT: restore +; +; SPARC64-LABEL: f128_direct: +; SPARC64: ! %bb.0: +; SPARC64-NEXT: save %sp, -176, %sp +; SPARC64-NEXT: fmovd %f0, %f4 +; SPARC64-NEXT: fmovd %f2, %f6 +; SPARC64-NEXT: call f128_callee +; SPARC64-NEXT: nop +; SPARC64-NEXT: ret +; SPARC64-NEXT: restore + %ret = call fp128 @f128_callee(fp128 %num, fp128 %num) + ret fp128 %ret +} +declare fp128 @f128_callee(fp128 %a, fp128 %b) + +define fp128 @f128_direct_spill(i32 %o0, i32 %o1, i32 %o2, i32 %o3, i32 %o4, i32 %o5, i32 %ss0, fp128 %num) nounwind { +; SPARC32-LABEL: f128_direct_spill: +; SPARC32: ! %bb.0: +; SPARC32-NEXT: save %sp, -136, %sp +; SPARC32-NEXT: ld [%fp+96], %g2 +; SPARC32-NEXT: ldd [%g2], %f0 +; SPARC32-NEXT: ldd [%g2+8], %f4 +; SPARC32-NEXT: ld [%fp+64], %l0 +; SPARC32-NEXT: mov %i5, %o5 +; SPARC32-NEXT: mov %i4, %o4 +; SPARC32-NEXT: mov %i3, %o3 +; SPARC32-NEXT: mov %i2, %o2 +; SPARC32-NEXT: mov %i1, %o1 +; SPARC32-NEXT: mov %i0, %o0 +; SPARC32-NEXT: add %fp, -32, %i0 +; SPARC32-NEXT: st %i0, [%sp+92] +; SPARC32-NEXT: add %fp, -16, %i0 +; SPARC32-NEXT: st %i0, [%sp+64] +; SPARC32-NEXT: std %f4, [%fp+-24] +; SPARC32-NEXT: call f128_callee_spill +; SPARC32-NEXT: std %f0, [%fp+-32] +; SPARC32-NEXT: unimp 16 +; SPARC32-NEXT: ldd [%fp+-8], %f0 +; SPARC32-NEXT: ldd [%fp+-16], %f4 +; SPARC32-NEXT: std %f0, [%l0+8] +; SPARC32-NEXT: std %f4, [%l0] +; SPARC32-NEXT: ret +; SPARC32-NEXT: restore +; +; SPARC64-LABEL: f128_direct_spill: +; SPARC64: ! %bb.0: +; SPARC64-NEXT: save %sp, -192, %sp +; SPARC64-NEXT: fmovd %f16, %f12 +; SPARC64-NEXT: fmovd %f18, %f14 +; SPARC64-NEXT: mov %i5, %o5 +; SPARC64-NEXT: mov %i4, %o4 +; SPARC64-NEXT: mov %i3, %o3 +; SPARC64-NEXT: mov %i2, %o2 +; SPARC64-NEXT: mov %i1, %o1 +; SPARC64-NEXT: call f128_callee_spill +; SPARC64-NEXT: mov %i0, %o0 +; SPARC64-NEXT: ret +; SPARC64-NEXT: restore + %ret = call fp128 @f128_callee_spill(i32 %o0, i32 %o1, i32 %o2, i32 %o3, i32 %o4, i32 %o5, fp128 %num) + ret fp128 %ret +} +declare fp128 @f128_callee_spill(i32 %o0, i32 %o1, i32 %o2, i32 %o3, i32 %o4, i32 %o5, fp128 %a) diff --git a/llvm/test/CodeGen/SPARC/fp16-promote.ll b/llvm/test/CodeGen/SPARC/fp16-promote.ll index 64873b744de50..4e46fd073923e 100644 --- a/llvm/test/CodeGen/SPARC/fp16-promote.ll +++ b/llvm/test/CodeGen/SPARC/fp16-promote.ll @@ -268,19 +268,20 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind { define void @test_fptrunc_fp128(ptr %dp, ptr %p) nounwind { ; V8-OPT-LABEL: test_fptrunc_fp128: ; V8-OPT: ! %bb.0: -; V8-OPT-NEXT: save %sp, -104, %sp +; V8-OPT-NEXT: save %sp, -112, %sp ; V8-OPT-NEXT: ldd [%i0], %f0 ; V8-OPT-NEXT: ldd [%i0+8], %f4 -; V8-OPT-NEXT: std %f4, [%sp+100] +; V8-OPT-NEXT: std %f4, [%fp+-8] +; V8-OPT-NEXT: add %fp, -16, %o0 ; V8-OPT-NEXT: call __trunctfhf2 -; V8-OPT-NEXT: std %f0, [%sp+92] +; V8-OPT-NEXT: std %f0, [%fp+-16] ; V8-OPT-NEXT: sth %o0, [%i1] ; V8-OPT-NEXT: ret ; V8-OPT-NEXT: restore ; ; V8-UNOPT-LABEL: test_fptrunc_fp128: ; V8-UNOPT: ! %bb.0: -; V8-UNOPT-NEXT: save %sp, -104, %sp +; V8-UNOPT-NEXT: save %sp, -112, %sp ; V8-UNOPT-NEXT: ldd [%i0], %f4 ; V8-UNOPT-NEXT: ! implicit-def: $q0 ; V8-UNOPT-NEXT: fmovs %f4, %f0 @@ -290,22 +291,24 @@ define void @test_fptrunc_fp128(ptr %dp, ptr %p) nounwind { ; V8-UNOPT-NEXT: fmovs %f5, %f3 ; V8-UNOPT-NEXT: fmovs %f2, %f4 ; V8-UNOPT-NEXT: fmovs %f3, %f5 -; V8-UNOPT-NEXT: std %f4, [%sp+100] +; V8-UNOPT-NEXT: std %f4, [%fp+-8] ; V8-UNOPT-NEXT: ! kill: def $d0 killed $d0 killed $q0 +; V8-UNOPT-NEXT: std %f0, [%fp+-16] ; V8-UNOPT-NEXT: call __trunctfhf2 -; V8-UNOPT-NEXT: std %f0, [%sp+92] +; V8-UNOPT-NEXT: add %fp, -16, %o0 ; V8-UNOPT-NEXT: sth %o0, [%i1] ; V8-UNOPT-NEXT: ret ; V8-UNOPT-NEXT: restore ; ; V9-LABEL: test_fptrunc_fp128: ; V9: ! %bb.0: -; V9-NEXT: save %sp, -104, %sp +; V9-NEXT: save %sp, -112, %sp ; V9-NEXT: ldd [%i0], %f0 ; V9-NEXT: ldd [%i0+8], %f4 -; V9-NEXT: std %f4, [%sp+100] +; V9-NEXT: std %f4, [%fp+-8] +; V9-NEXT: add %fp, -16, %o0 ; V9-NEXT: call __trunctfhf2 -; V9-NEXT: std %f0, [%sp+92] +; V9-NEXT: std %f0, [%fp+-16] ; V9-NEXT: sth %o0, [%i1] ; V9-NEXT: ret ; V9-NEXT: restore diff --git a/llvm/test/CodeGen/SPARC/llvm.sincos.ll b/llvm/test/CodeGen/SPARC/llvm.sincos.ll index 8d0d50f67e3f5..ea5de64607042 100644 --- a/llvm/test/CodeGen/SPARC/llvm.sincos.ll +++ b/llvm/test/CodeGen/SPARC/llvm.sincos.ll @@ -943,42 +943,38 @@ define { <2 x double>, <2 x double> } @test_sincos_v2f64(<2 x double> %a) #0 { define void @test_sincos_f128(ptr sret({ fp128, fp128 }) %ret, ptr %in) #0 { ; SPARC32-LABEL: test_sincos_f128: ; SPARC32: ! %bb.0: -; SPARC32-NEXT: save %sp, -168, %sp +; SPARC32-NEXT: save %sp, -184, %sp ; SPARC32-NEXT: ld [%fp+64], %i1 ; SPARC32-NEXT: ldd [%i0], %f0 -; SPARC32-NEXT: std %f0, [%fp+-64] -; SPARC32-NEXT: std %f2, [%fp+-56] ! 16-byte Folded Spill +; SPARC32-NEXT: std %f0, [%fp+-88] +; SPARC32-NEXT: std %f2, [%fp+-80] ! 16-byte Folded Spill ; SPARC32-NEXT: ldd [%i0+8], %f4 -; SPARC32-NEXT: std %f4, [%fp+-48] ! 8-byte Folded Spill -; SPARC32-NEXT: add %fp, -32, %i0 +; SPARC32-NEXT: std %f4, [%fp+-72] ! 8-byte Folded Spill +; SPARC32-NEXT: add %fp, -48, %i0 ; SPARC32-NEXT: st %i0, [%sp+64] -; SPARC32-NEXT: std %f4, [%sp+100] +; SPARC32-NEXT: std %f4, [%fp+-56] +; SPARC32-NEXT: add %fp, -64, %o0 ; SPARC32-NEXT: call sinl -; SPARC32-NEXT: std %f0, [%sp+92] +; SPARC32-NEXT: std %f0, [%fp+-64] ; SPARC32-NEXT: unimp 16 ; SPARC32-NEXT: add %fp, -16, %i0 ; SPARC32-NEXT: st %i0, [%sp+64] -; SPARC32-NEXT: ldd [%fp+-48], %f0 ! 8-byte Folded Reload -; SPARC32-NEXT: std %f0, [%sp+100] -; SPARC32-NEXT: ldd [%fp+-64], %f0 -; SPARC32-NEXT: ldd [%fp+-56], %f2 ! 16-byte Folded Reload -; SPARC32-NEXT: std %f0, [%sp+92] -; SPARC32-NEXT: ldd [%fp+-32], %f0 -; SPARC32-NEXT: std %f0, [%fp+-48] -; SPARC32-NEXT: std %f2, [%fp+-40] ! 16-byte Folded Spill -; SPARC32-NEXT: ldd [%fp+-24], %f0 +; SPARC32-NEXT: ldd [%fp+-72], %f0 ! 8-byte Folded Reload +; SPARC32-NEXT: std %f0, [%fp+-24] +; SPARC32-NEXT: add %fp, -32, %o0 +; SPARC32-NEXT: ldd [%fp+-88], %f0 +; SPARC32-NEXT: ldd [%fp+-80], %f2 ! 16-byte Folded Reload ; SPARC32-NEXT: call cosl -; SPARC32-NEXT: std %f0, [%fp+-64] +; SPARC32-NEXT: std %f0, [%fp+-32] ; SPARC32-NEXT: unimp 16 ; SPARC32-NEXT: ldd [%fp+-8], %f0 ; SPARC32-NEXT: ldd [%fp+-16], %f4 +; SPARC32-NEXT: ldd [%fp+-40], %f2 +; SPARC32-NEXT: ldd [%fp+-48], %f8 ; SPARC32-NEXT: std %f0, [%i1+24] ; SPARC32-NEXT: std %f4, [%i1+16] -; SPARC32-NEXT: ldd [%fp+-64], %f0 ! 8-byte Folded Reload -; SPARC32-NEXT: std %f0, [%i1+8] -; SPARC32-NEXT: ldd [%fp+-48], %f0 -; SPARC32-NEXT: ldd [%fp+-40], %f2 ! 16-byte Folded Reload -; SPARC32-NEXT: std %f0, [%i1] +; SPARC32-NEXT: std %f2, [%i1+8] +; SPARC32-NEXT: std %f8, [%i1] ; SPARC32-NEXT: jmp %i7+12 ; SPARC32-NEXT: restore %g0, %i1, %o0 ; @@ -1006,15 +1002,16 @@ define void @test_sincos_f128(ptr sret({ fp128, fp128 }) %ret, ptr %in) #0 { ; ; GNU32-LABEL: test_sincos_f128: ; GNU32: ! %bb.0: -; GNU32-NEXT: save %sp, -136, %sp +; GNU32-NEXT: save %sp, -144, %sp ; GNU32-NEXT: ld [%fp+64], %i1 ; GNU32-NEXT: ldd [%i0], %f0 ; GNU32-NEXT: ldd [%i0+8], %f4 -; GNU32-NEXT: std %f4, [%sp+100] -; GNU32-NEXT: add %fp, -16, %o0 -; GNU32-NEXT: add %fp, -32, %o1 +; GNU32-NEXT: std %f4, [%fp+-40] +; GNU32-NEXT: add %fp, -48, %o0 +; GNU32-NEXT: add %fp, -16, %o1 +; GNU32-NEXT: add %fp, -32, %o2 ; GNU32-NEXT: call sincosl -; GNU32-NEXT: std %f0, [%sp+92] +; GNU32-NEXT: std %f0, [%fp+-48] ; GNU32-NEXT: ldd [%fp+-24], %f0 ; GNU32-NEXT: ldd [%fp+-32], %f4 ; GNU32-NEXT: ldd [%fp+-8], %f2 @@ -1057,85 +1054,71 @@ define void @test_sincos_f128(ptr sret({ fp128, fp128 }) %ret, ptr %in) #0 { define void @test_sincos_v2f128(ptr sret({ <2 x fp128>, <2 x fp128> }) %ret, ptr %in) #0 { ; SPARC32-LABEL: test_sincos_v2f128: ; SPARC32: ! %bb.0: -; SPARC32-NEXT: save %sp, -248, %sp +; SPARC32-NEXT: save %sp, -272, %sp ; SPARC32-NEXT: mov %i0, %i1 ; SPARC32-NEXT: ld [%fp+64], %i0 ; SPARC32-NEXT: ldd [%i1], %f0 -; SPARC32-NEXT: std %f0, [%fp+-80] -; SPARC32-NEXT: std %f2, [%fp+-72] ! 16-byte Folded Spill +; SPARC32-NEXT: std %f0, [%fp+-144] +; SPARC32-NEXT: std %f2, [%fp+-136] ! 16-byte Folded Spill ; SPARC32-NEXT: ldd [%i1+8], %f0 -; SPARC32-NEXT: std %f0, [%fp+-88] ! 8-byte Folded Spill +; SPARC32-NEXT: std %f0, [%fp+-152] ! 8-byte Folded Spill ; SPARC32-NEXT: ldd [%i1+16], %f0 -; SPARC32-NEXT: std %f0, [%fp+-120] -; SPARC32-NEXT: std %f2, [%fp+-112] ! 16-byte Folded Spill +; SPARC32-NEXT: std %f0, [%fp+-176] +; SPARC32-NEXT: std %f2, [%fp+-168] ! 16-byte Folded Spill ; SPARC32-NEXT: ldd [%i1+24], %f4 -; SPARC32-NEXT: std %f4, [%fp+-104] ! 8-byte Folded Spill -; SPARC32-NEXT: add %fp, -64, %i1 +; SPARC32-NEXT: std %f4, [%fp+-160] ! 8-byte Folded Spill +; SPARC32-NEXT: add %fp, -112, %i1 ; SPARC32-NEXT: st %i1, [%sp+64] -; SPARC32-NEXT: std %f4, [%sp+100] +; SPARC32-NEXT: std %f4, [%fp+-120] +; SPARC32-NEXT: add %fp, -128, %o0 ; SPARC32-NEXT: call sinl -; SPARC32-NEXT: std %f0, [%sp+92] +; SPARC32-NEXT: std %f0, [%fp+-128] ; SPARC32-NEXT: unimp 16 ; SPARC32-NEXT: add %fp, -16, %i1 ; SPARC32-NEXT: st %i1, [%sp+64] -; SPARC32-NEXT: ldd [%fp+-88], %f0 ! 8-byte Folded Reload -; SPARC32-NEXT: std %f0, [%sp+100] -; SPARC32-NEXT: ldd [%fp+-80], %f0 -; SPARC32-NEXT: ldd [%fp+-72], %f2 ! 16-byte Folded Reload +; SPARC32-NEXT: ldd [%fp+-152], %f0 ! 8-byte Folded Reload +; SPARC32-NEXT: std %f0, [%fp+-24] +; SPARC32-NEXT: add %fp, -32, %o0 +; SPARC32-NEXT: ldd [%fp+-144], %f0 +; SPARC32-NEXT: ldd [%fp+-136], %f2 ! 16-byte Folded Reload ; SPARC32-NEXT: call cosl -; SPARC32-NEXT: std %f0, [%sp+92] +; SPARC32-NEXT: std %f0, [%fp+-32] ; SPARC32-NEXT: unimp 16 -; SPARC32-NEXT: add %fp, -32, %i1 +; SPARC32-NEXT: add %fp, -48, %i1 ; SPARC32-NEXT: st %i1, [%sp+64] -; SPARC32-NEXT: ldd [%fp+-88], %f0 ! 8-byte Folded Reload -; SPARC32-NEXT: std %f0, [%sp+100] -; SPARC32-NEXT: ldd [%fp+-80], %f0 -; SPARC32-NEXT: ldd [%fp+-72], %f2 ! 16-byte Folded Reload +; SPARC32-NEXT: ldd [%fp+-152], %f0 ! 8-byte Folded Reload +; SPARC32-NEXT: std %f0, [%fp+-56] +; SPARC32-NEXT: add %fp, -64, %o0 +; SPARC32-NEXT: ldd [%fp+-144], %f0 +; SPARC32-NEXT: ldd [%fp+-136], %f2 ! 16-byte Folded Reload ; SPARC32-NEXT: call sinl -; SPARC32-NEXT: std %f0, [%sp+92] +; SPARC32-NEXT: std %f0, [%fp+-64] ; SPARC32-NEXT: unimp 16 -; SPARC32-NEXT: add %fp, -48, %i1 +; SPARC32-NEXT: add %fp, -80, %i1 ; SPARC32-NEXT: st %i1, [%sp+64] -; SPARC32-NEXT: ldd [%fp+-104], %f0 ! 8-byte Folded Reload -; SPARC32-NEXT: std %f0, [%sp+100] -; SPARC32-NEXT: ldd [%fp+-120], %f0 -; SPARC32-NEXT: ldd [%fp+-112], %f2 ! 16-byte Folded Reload -; SPARC32-NEXT: std %f0, [%sp+92] -; SPARC32-NEXT: ldd [%fp+-32], %f0 -; SPARC32-NEXT: std %f0, [%fp+-80] -; SPARC32-NEXT: std %f2, [%fp+-72] ! 16-byte Folded Spill -; SPARC32-NEXT: ldd [%fp+-24], %f0 -; SPARC32-NEXT: std %f0, [%fp+-88] ! 8-byte Folded Spill -; SPARC32-NEXT: ldd [%fp+-64], %f0 -; SPARC32-NEXT: std %f0, [%fp+-104] -; SPARC32-NEXT: std %f2, [%fp+-96] ! 16-byte Folded Spill -; SPARC32-NEXT: ldd [%fp+-56], %f0 -; SPARC32-NEXT: std %f0, [%fp+-120] ! 8-byte Folded Spill -; SPARC32-NEXT: ldd [%fp+-16], %f0 -; SPARC32-NEXT: std %f0, [%fp+-136] -; SPARC32-NEXT: std %f2, [%fp+-128] ! 16-byte Folded Spill -; SPARC32-NEXT: ldd [%fp+-8], %f0 +; SPARC32-NEXT: ldd [%fp+-160], %f0 ! 8-byte Folded Reload +; SPARC32-NEXT: std %f0, [%fp+-88] +; SPARC32-NEXT: add %fp, -96, %o0 +; SPARC32-NEXT: ldd [%fp+-176], %f0 +; SPARC32-NEXT: ldd [%fp+-168], %f2 ! 16-byte Folded Reload ; SPARC32-NEXT: call cosl -; SPARC32-NEXT: std %f0, [%fp+-144] +; SPARC32-NEXT: std %f0, [%fp+-96] ; SPARC32-NEXT: unimp 16 -; SPARC32-NEXT: ldd [%fp+-40], %f0 -; SPARC32-NEXT: ldd [%fp+-48], %f4 -; SPARC32-NEXT: std %f0, [%i0+56] -; SPARC32-NEXT: std %f4, [%i0+48] -; SPARC32-NEXT: ldd [%fp+-144], %f0 ! 8-byte Folded Reload -; SPARC32-NEXT: std %f0, [%i0+40] -; SPARC32-NEXT: ldd [%fp+-136], %f0 -; SPARC32-NEXT: ldd [%fp+-128], %f2 ! 16-byte Folded Reload -; SPARC32-NEXT: std %f0, [%i0+32] -; SPARC32-NEXT: ldd [%fp+-120], %f0 ! 8-byte Folded Reload -; SPARC32-NEXT: std %f0, [%i0+24] -; SPARC32-NEXT: ldd [%fp+-104], %f0 -; SPARC32-NEXT: ldd [%fp+-96], %f2 ! 16-byte Folded Reload -; SPARC32-NEXT: std %f0, [%i0+16] -; SPARC32-NEXT: ldd [%fp+-88], %f0 ! 8-byte Folded Reload -; SPARC32-NEXT: std %f0, [%i0+8] -; SPARC32-NEXT: ldd [%fp+-80], %f0 -; SPARC32-NEXT: ldd [%fp+-72], %f2 ! 16-byte Folded Reload +; SPARC32-NEXT: ldd [%fp+-48], %f0 +; SPARC32-NEXT: ldd [%fp+-40], %f8 +; SPARC32-NEXT: ldd [%fp+-112], %f4 +; SPARC32-NEXT: ldd [%fp+-104], %f10 +; SPARC32-NEXT: ldd [%fp+-72], %f12 +; SPARC32-NEXT: ldd [%fp+-80], %f16 +; SPARC32-NEXT: ldd [%fp+-8], %f14 +; SPARC32-NEXT: ldd [%fp+-16], %f20 +; SPARC32-NEXT: std %f12, [%i0+56] +; SPARC32-NEXT: std %f16, [%i0+48] +; SPARC32-NEXT: std %f14, [%i0+40] +; SPARC32-NEXT: std %f20, [%i0+32] +; SPARC32-NEXT: std %f10, [%i0+24] +; SPARC32-NEXT: std %f4, [%i0+16] +; SPARC32-NEXT: std %f8, [%i0+8] ; SPARC32-NEXT: std %f0, [%i0] ; SPARC32-NEXT: jmp %i7+12 ; SPARC32-NEXT: restore @@ -1186,37 +1169,39 @@ define void @test_sincos_v2f128(ptr sret({ <2 x fp128>, <2 x fp128> }) %ret, ptr ; ; GNU32-LABEL: test_sincos_v2f128: ; GNU32: ! %bb.0: -; GNU32-NEXT: save %sp, -192, %sp +; GNU32-NEXT: save %sp, -216, %sp ; GNU32-NEXT: mov %i0, %i1 ; GNU32-NEXT: ld [%fp+64], %i0 ; GNU32-NEXT: ldd [%i1+16], %f0 -; GNU32-NEXT: std %f0, [%fp+-80] -; GNU32-NEXT: std %f2, [%fp+-72] ! 16-byte Folded Spill +; GNU32-NEXT: std %f0, [%fp+-112] +; GNU32-NEXT: std %f2, [%fp+-104] ! 16-byte Folded Spill ; GNU32-NEXT: ldd [%i1+24], %f0 -; GNU32-NEXT: std %f0, [%fp+-88] ! 8-byte Folded Spill +; GNU32-NEXT: std %f0, [%fp+-120] ! 8-byte Folded Spill ; GNU32-NEXT: ldd [%i1], %f0 ; GNU32-NEXT: ldd [%i1+8], %f4 -; GNU32-NEXT: std %f4, [%sp+100] -; GNU32-NEXT: add %fp, -48, %o0 +; GNU32-NEXT: std %f4, [%fp+-88] +; GNU32-NEXT: add %fp, -96, %o0 ; GNU32-NEXT: add %fp, -64, %o1 +; GNU32-NEXT: add %fp, -80, %o2 ; GNU32-NEXT: call sincosl -; GNU32-NEXT: std %f0, [%sp+92] -; GNU32-NEXT: ldd [%fp+-88], %f0 ! 8-byte Folded Reload -; GNU32-NEXT: std %f0, [%sp+100] -; GNU32-NEXT: add %fp, -16, %o0 -; GNU32-NEXT: add %fp, -32, %o1 -; GNU32-NEXT: ldd [%fp+-80], %f0 -; GNU32-NEXT: ldd [%fp+-72], %f2 ! 16-byte Folded Reload +; GNU32-NEXT: std %f0, [%fp+-96] +; GNU32-NEXT: ldd [%fp+-120], %f0 ! 8-byte Folded Reload +; GNU32-NEXT: std %f0, [%fp+-40] +; GNU32-NEXT: add %fp, -48, %o0 +; GNU32-NEXT: add %fp, -16, %o1 +; GNU32-NEXT: add %fp, -32, %o2 +; GNU32-NEXT: ldd [%fp+-112], %f0 +; GNU32-NEXT: ldd [%fp+-104], %f2 ! 16-byte Folded Reload ; GNU32-NEXT: call sincosl -; GNU32-NEXT: std %f0, [%sp+92] -; GNU32-NEXT: ldd [%fp+-48], %f0 -; GNU32-NEXT: ldd [%fp+-40], %f8 +; GNU32-NEXT: std %f0, [%fp+-48] +; GNU32-NEXT: ldd [%fp+-64], %f0 +; GNU32-NEXT: ldd [%fp+-56], %f8 ; GNU32-NEXT: ldd [%fp+-16], %f4 ; GNU32-NEXT: ldd [%fp+-8], %f10 ; GNU32-NEXT: ldd [%fp+-24], %f12 ; GNU32-NEXT: ldd [%fp+-32], %f16 -; GNU32-NEXT: ldd [%fp+-56], %f14 -; GNU32-NEXT: ldd [%fp+-64], %f20 +; GNU32-NEXT: ldd [%fp+-72], %f14 +; GNU32-NEXT: ldd [%fp+-80], %f20 ; GNU32-NEXT: std %f12, [%i0+56] ; GNU32-NEXT: std %f16, [%i0+48] ; GNU32-NEXT: std %f14, [%i0+40] diff --git a/llvm/test/CodeGen/SPIRV/OpVariable_order.ll b/llvm/test/CodeGen/SPIRV/OpVariable_order.ll index 1e94be0886307..a43a4d66d04bb 100644 --- a/llvm/test/CodeGen/SPIRV/OpVariable_order.ll +++ b/llvm/test/CodeGen/SPIRV/OpVariable_order.ll @@ -13,7 +13,9 @@ define void @main() { entry: %0 = alloca <2 x i32>, align 4 + store <2 x i32> zeroinitializer, ptr %0, align 4 %1 = getelementptr <2 x i32>, ptr %0, i32 0, i32 0 %2 = alloca float, align 4 + store float 0.0, ptr %2, align 4 ret void } diff --git a/llvm/test/CodeGen/SPIRV/SpecConstants/restore-spec-type.ll b/llvm/test/CodeGen/SPIRV/SpecConstants/restore-spec-type.ll index 9e91854de1172..b0bad1819a25d 100644 --- a/llvm/test/CodeGen/SPIRV/SpecConstants/restore-spec-type.ll +++ b/llvm/test/CodeGen/SPIRV/SpecConstants/restore-spec-type.ll @@ -29,9 +29,12 @@ %Struct7 = type [2 x %Struct] %Nested = type { %Struct7 } +@G = global %Struct zeroinitializer + define spir_kernel void @foo(ptr addrspace(4) %arg1, ptr addrspace(4) %arg2) { entry: %var = alloca %Struct + store %Struct zeroinitializer, ptr %var %r1 = call %Struct @_Z29__spirv_SpecConstantComposite_1(float 1.0) store %Struct %r1, ptr addrspace(4) %arg1 %r2 = call %Struct7 @_Z29__spirv_SpecConstantComposite_2(%Struct %r1, %Struct %r1) diff --git a/llvm/test/CodeGen/SPIRV/basic_float_types.ll b/llvm/test/CodeGen/SPIRV/basic_float_types.ll index a0ba97e1d1f14..6cdc67bbf24ee 100644 --- a/llvm/test/CodeGen/SPIRV/basic_float_types.ll +++ b/llvm/test/CodeGen/SPIRV/basic_float_types.ll @@ -2,6 +2,9 @@ ; RUN: llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_bfloat16 %s -o - | FileCheck %s ; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown --spirv-ext=+SPV_KHR_bfloat16 %s -o - -filetype=obj | spirv-val %} +// TODO: Open bug bfloat16 cannot be stored to. +XFAIL: * + define void @main() { entry: @@ -49,50 +52,66 @@ entry: ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_half]] Function %half_Val = alloca half, align 2 + store half 0.0, ptr %half_Val, align 2 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_bfloat]] Function %bfloat_Val = alloca bfloat, align 2 + store bfloat 0.0, ptr %bfloat_Val, align 2 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_float]] Function %float_Val = alloca float, align 4 + store float 0.0, ptr %float_Val, align 4 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_double]] Function %double_Val = alloca double, align 8 + store double 0.0, ptr %double_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v2half]] Function %half2_Val = alloca <2 x half>, align 4 + store <2 x half> zeroinitializer, ptr %half2_Val, align 4 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v3half]] Function %half3_Val = alloca <3 x half>, align 8 + store <3 x half> zeroinitializer, ptr %half3_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v4half]] Function %half4_Val = alloca <4 x half>, align 8 + store <4 x half> zeroinitializer, ptr %half4_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v2bfloat]] Function %bfloat2_Val = alloca <2 x bfloat>, align 4 + store <2 x bfloat> zeroinitializer, ptr %bfloat2_Val, align 4 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v3bfloat]] Function %bfloat3_Val = alloca <3 x bfloat>, align 8 + store <3 x bfloat> zeroinitializer, ptr %bfloat3_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v4bfloat]] Function %bfloat4_Val = alloca <4 x bfloat>, align 8 + store <4 x bfloat> zeroinitializer, ptr %bfloat4_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v2float]] Function %float2_Val = alloca <2 x float>, align 8 + store <2 x float> zeroinitializer, ptr %float2_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v3float]] Function %float3_Val = alloca <3 x float>, align 16 + store <3 x float> zeroinitializer, ptr %float3_Val, align 16 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v4float]] Function %float4_Val = alloca <4 x float>, align 16 + store <4 x float> zeroinitializer, ptr %float4_Val, align 16 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v2double]] Function %double2_Val = alloca <2 x double>, align 16 + store <2 x double> zeroinitializer, ptr %double2_Val, align 16 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v3double]] Function %double3_Val = alloca <3 x double>, align 32 + store <3 x double> zeroinitializer, ptr %double3_Val, align 32 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v4double]] Function %double4_Val = alloca <4 x double>, align 32 + store <4 x double> zeroinitializer, ptr %double4_Val, align 32 ret void } diff --git a/llvm/test/CodeGen/SPIRV/basic_int_types.ll b/llvm/test/CodeGen/SPIRV/basic_int_types.ll index 5aa7aaf6fbd01..1ed241eed4019 100644 --- a/llvm/test/CodeGen/SPIRV/basic_int_types.ll +++ b/llvm/test/CodeGen/SPIRV/basic_int_types.ll @@ -37,39 +37,51 @@ entry: ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_short]] Function %int16_t_Val = alloca i16, align 2 + store i16 0, ptr %int16_t_Val, align 2 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_int]] Function %int_Val = alloca i32, align 4 + store i32 0, ptr %int_Val, align 4 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_long]] Function %int64_t_Val = alloca i64, align 8 + store i64 0, ptr %int64_t_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v2short]] Function %int16_t2_Val = alloca <2 x i16>, align 4 + store <2 x i16> zeroinitializer, ptr %int16_t2_Val, align 4 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v3short]] Function %int16_t3_Val = alloca <3 x i16>, align 8 + store <3 x i16> zeroinitializer, ptr %int16_t3_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v4short]] Function %int16_t4_Val = alloca <4 x i16>, align 8 + store <4 x i16> zeroinitializer, ptr %int16_t4_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v2int]] Function %int2_Val = alloca <2 x i32>, align 8 + store <2 x i32> zeroinitializer, ptr %int2_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v3int]] Function %int3_Val = alloca <3 x i32>, align 16 + store <3 x i32> zeroinitializer, ptr %int3_Val, align 16 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v4int]] Function %int4_Val = alloca <4 x i32>, align 16 + store <4 x i32> zeroinitializer, ptr %int4_Val, align 16 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v2long]] Function %int64_t2_Val = alloca <2 x i64>, align 16 + store <2 x i64> zeroinitializer, ptr %int64_t2_Val, align 16 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v3long]] Function %int64_t3_Val = alloca <3 x i64>, align 32 + store <3 x i64> zeroinitializer, ptr %int64_t3_Val, align 32 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v4long]] Function %int64_t4_Val = alloca <4 x i64>, align 32 + store <4 x i64> zeroinitializer, ptr %int64_t4_Val, align 32 ret void } diff --git a/llvm/test/CodeGen/SPIRV/basic_int_types_spirvdis.ll b/llvm/test/CodeGen/SPIRV/basic_int_types_spirvdis.ll index 56b5f48715533..f3c8f9967211a 100644 --- a/llvm/test/CodeGen/SPIRV/basic_int_types_spirvdis.ll +++ b/llvm/test/CodeGen/SPIRV/basic_int_types_spirvdis.ll @@ -6,39 +6,51 @@ define void @main() { entry: ; CHECK: %int16_t_Val = OpVariable %_ptr_Function_ushort Function %int16_t_Val = alloca i16, align 2 + store i16 0, i16* %int16_t_Val, align 2 ; CHECK: %int_Val = OpVariable %_ptr_Function_uint Function %int_Val = alloca i32, align 4 + store i32 0, i32* %int_Val, align 4 ; CHECK: %int64_t_Val = OpVariable %_ptr_Function_ulong Function %int64_t_Val = alloca i64, align 8 + store i64 0, i64* %int64_t_Val, align 8 ; CHECK: %int16_t2_Val = OpVariable %_ptr_Function_v2ushort Function %int16_t2_Val = alloca <2 x i16>, align 4 + store <2 x i16> zeroinitializer, <2 x i16>* %int16_t2_Val, align 4 ; CHECK: %int16_t3_Val = OpVariable %_ptr_Function_v3ushort Function %int16_t3_Val = alloca <3 x i16>, align 8 + store <3 x i16> zeroinitializer, <3 x i16>* %int16_t3_Val, align 8 ; CHECK: %int16_t4_Val = OpVariable %_ptr_Function_v4ushort Function %int16_t4_Val = alloca <4 x i16>, align 8 + store <4 x i16> zeroinitializer, <4 x i16>* %int16_t4_Val, align 8 ; CHECK: %int2_Val = OpVariable %_ptr_Function_v2uint Function %int2_Val = alloca <2 x i32>, align 8 + store <2 x i32> zeroinitializer, <2 x i32>* %int2_Val, align 8 ; CHECK: %int3_Val = OpVariable %_ptr_Function_v3uint Function %int3_Val = alloca <3 x i32>, align 16 + store <3 x i32> zeroinitializer, <3 x i32>* %int3_Val, align 16 ; CHECK: %int4_Val = OpVariable %_ptr_Function_v4uint Function %int4_Val = alloca <4 x i32>, align 16 + store <4 x i32> zeroinitializer, <4 x i32>* %int4_Val, align 16 ; CHECK: %int64_t2_Val = OpVariable %_ptr_Function_v2ulong Function %int64_t2_Val = alloca <2 x i64>, align 16 + store <2 x i64> zeroinitializer, <2 x i64>* %int64_t2_Val, align 16 ; CHECK: %int64_t3_Val = OpVariable %_ptr_Function_v3ulong Function %int64_t3_Val = alloca <3 x i64>, align 32 + store <3 x i64> zeroinitializer, <3 x i64>* %int64_t3_Val, align 32 ; CHECK: %int64_t4_Val = OpVariable %_ptr_Function_v4ulong Function %int64_t4_Val = alloca <4 x i64>, align 32 + store <4 x i64> zeroinitializer, <4 x i64>* %int64_t4_Val, align 32 ret void } diff --git a/llvm/test/CodeGen/SPIRV/builtin_intrinsics_32.ll b/llvm/test/CodeGen/SPIRV/builtin_intrinsics_32.ll index 39a755e736081..bca90f4ebd151 100644 --- a/llvm/test/CodeGen/SPIRV/builtin_intrinsics_32.ll +++ b/llvm/test/CodeGen/SPIRV/builtin_intrinsics_32.ll @@ -33,6 +33,28 @@ target triple = "spirv32-unknown-unknown" ; CHECK: [[SubgroupId]] = OpVariable [[I32PTR]] Input ; CHECK: [[SubgroupLocalInvocationId]] = OpVariable [[I32PTR]] Input +@G_spv_num_workgroups_0 = global i32 0 +@G_spv_num_workgroups_1 = global i32 0 +@G_spv_num_workgroups_2 = global i32 0 +@G_spv_workgroup_size_0 = global i32 0 +@G_spv_workgroup_size_1 = global i32 0 +@G_spv_workgroup_size_2 = global i32 0 +@G_spv_group_id_0 = global i32 0 +@G_spv_group_id_1 = global i32 0 +@G_spv_group_id_2 = global i32 0 +@G_spv_thread_id_in_group_0 = global i32 0 +@G_spv_thread_id_in_group_1 = global i32 0 +@G_spv_thread_id_in_group_2 = global i32 0 +@G_spv_thread_id_0 = global i32 0 +@G_spv_thread_id_1 = global i32 0 +@G_spv_thread_id_2 = global i32 0 +@G_spv_global_size_0 = global i32 0 +@G_spv_global_size_1 = global i32 0 +@G_spv_global_size_2 = global i32 0 +@G_spv_global_offset_0 = global i32 0 +@G_spv_global_offset_1 = global i32 0 +@G_spv_global_offset_2 = global i32 0 + ; Function Attrs: convergent noinline norecurse nounwind optnone define spir_func void @test_id_and_range() { entry: @@ -44,66 +66,87 @@ entry: ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[NumWorkgroups]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0 %spv.num.workgroups = call i32 @llvm.spv.num.workgroups.i32(i32 0) + store i32 %spv.num.workgroups, i32* @G_spv_num_workgroups_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[NumWorkgroups]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1 %spv.num.workgroups1 = call i32 @llvm.spv.num.workgroups.i32(i32 1) + store i32 %spv.num.workgroups1, i32* @G_spv_num_workgroups_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[NumWorkgroups]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2 %spv.num.workgroups2 = call i32 @llvm.spv.num.workgroups.i32(i32 2) + store i32 %spv.num.workgroups2, i32* @G_spv_num_workgroups_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupSize]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0 %spv.workgroup.size = call i32 @llvm.spv.workgroup.size.i32(i32 0) + store i32 %spv.workgroup.size, i32* @G_spv_workgroup_size_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupSize]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1 %spv.workgroup.size3 = call i32 @llvm.spv.workgroup.size.i32(i32 1) + store i32 %spv.workgroup.size3, i32* @G_spv_workgroup_size_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupSize]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2 %spv.workgroup.size4 = call i32 @llvm.spv.workgroup.size.i32(i32 2) + store i32 %spv.workgroup.size4, i32* @G_spv_workgroup_size_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0 %spv.group.id = call i32 @llvm.spv.group.id.i32(i32 0) + store i32 %spv.group.id, i32* @G_spv_group_id_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1 %spv.group.id5 = call i32 @llvm.spv.group.id.i32(i32 1) + store i32 %spv.group.id5, i32* @G_spv_group_id_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2 %spv.group.id6 = call i32 @llvm.spv.group.id.i32(i32 2) + store i32 %spv.group.id6, i32* @G_spv_group_id_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[LocalInvocationId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0 %spv.thread.id.in.group = call i32 @llvm.spv.thread.id.in.group.i32(i32 0) + store i32 %spv.thread.id.in.group, i32* @G_spv_thread_id_in_group_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[LocalInvocationId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1 %spv.thread.id.in.group7 = call i32 @llvm.spv.thread.id.in.group.i32(i32 1) + store i32 %spv.thread.id.in.group7, i32* @G_spv_thread_id_in_group_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[LocalInvocationId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2 %spv.thread.id.in.group8 = call i32 @llvm.spv.thread.id.in.group.i32(i32 2) + store i32 %spv.thread.id.in.group8, i32* @G_spv_thread_id_in_group_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalInvocationId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0 %spv.thread.id = call i32 @llvm.spv.thread.id.i32(i32 0) + store i32 %spv.thread.id, i32* @G_spv_thread_id_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalInvocationId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1 %spv.thread.id9 = call i32 @llvm.spv.thread.id.i32(i32 1) + store i32 %spv.thread.id9, i32* @G_spv_thread_id_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalInvocationId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2 %spv.thread.id10 = call i32 @llvm.spv.thread.id.i32(i32 2) + store i32 %spv.thread.id10, i32* @G_spv_thread_id_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalSize]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0 %spv.num.workgroups11 = call i32 @llvm.spv.global.size.i32(i32 0) + store i32 %spv.num.workgroups11, i32* @G_spv_global_size_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalSize]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1 %spv.num.workgroups12 = call i32 @llvm.spv.global.size.i32(i32 1) + store i32 %spv.num.workgroups12, i32* @G_spv_global_size_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalSize]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2 %spv.num.workgroups13 = call i32 @llvm.spv.global.size.i32(i32 2) + store i32 %spv.num.workgroups13, i32* @G_spv_global_size_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalOffset]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0 %spv.global.offset = call i32 @llvm.spv.global.offset.i32(i32 0) + store i32 %spv.global.offset, i32* @G_spv_global_offset_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalOffset]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1 %spv.global.offset14 = call i32 @llvm.spv.global.offset.i32(i32 1) + store i32 %spv.global.offset14, i32* @G_spv_global_offset_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalOffset]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2 %spv.global.offset15 = call i32 @llvm.spv.global.offset.i32(i32 2) + store i32 %spv.global.offset15, i32* @G_spv_global_offset_2 ; CHECK: OpLoad %5 [[SubgroupSize]] %0 = call i32 @llvm.spv.subgroup.size() store i32 %0, ptr %ssize, align 4 diff --git a/llvm/test/CodeGen/SPIRV/builtin_intrinsics_64.ll b/llvm/test/CodeGen/SPIRV/builtin_intrinsics_64.ll index dcdf8992ce1c4..26c2d866d14c7 100644 --- a/llvm/test/CodeGen/SPIRV/builtin_intrinsics_64.ll +++ b/llvm/test/CodeGen/SPIRV/builtin_intrinsics_64.ll @@ -34,6 +34,28 @@ target triple = "spirv64-unknown-unknown" ; CHECK: [[SubgroupId]] = OpVariable [[I32PTR]] Input ; CHECK: [[SubgroupLocalInvocationId]] = OpVariable [[I32PTR]] Input +@G_spv_num_workgroups_0 = global i64 0 +@G_spv_num_workgroups_1 = global i64 0 +@G_spv_num_workgroups_2 = global i64 0 +@G_spv_workgroup_size_0 = global i64 0 +@G_spv_workgroup_size_1 = global i64 0 +@G_spv_workgroup_size_2 = global i64 0 +@G_spv_group_id_0 = global i64 0 +@G_spv_group_id_1 = global i64 0 +@G_spv_group_id_2 = global i64 0 +@G_spv_thread_id_in_group_0 = global i64 0 +@G_spv_thread_id_in_group_1 = global i64 0 +@G_spv_thread_id_in_group_2 = global i64 0 +@G_spv_thread_id_0 = global i64 0 +@G_spv_thread_id_1 = global i64 0 +@G_spv_thread_id_2 = global i64 0 +@G_spv_global_size_0 = global i64 0 +@G_spv_global_size_1 = global i64 0 +@G_spv_global_size_2 = global i64 0 +@G_spv_global_offset_0 = global i64 0 +@G_spv_global_offset_1 = global i64 0 +@G_spv_global_offset_2 = global i64 0 + ; Function Attrs: convergent noinline norecurse nounwind optnone define spir_func void @test_id_and_range() { entry: @@ -45,66 +67,87 @@ entry: ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[NumWorkgroups]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0 %spv.num.workgroups = call i64 @llvm.spv.num.workgroups.i64(i32 0) + store i64 %spv.num.workgroups, i64* @G_spv_num_workgroups_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[NumWorkgroups]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1 %spv.num.workgroups1 = call i64 @llvm.spv.num.workgroups.i64(i32 1) + store i64 %spv.num.workgroups1, i64* @G_spv_num_workgroups_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[NumWorkgroups]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2 %spv.num.workgroups2 = call i64 @llvm.spv.num.workgroups.i64(i32 2) + store i64 %spv.num.workgroups2, i64* @G_spv_num_workgroups_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupSize]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0 %spv.workgroup.size = call i64 @llvm.spv.workgroup.size.i64(i32 0) + store i64 %spv.workgroup.size, i64* @G_spv_workgroup_size_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupSize]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1 %spv.workgroup.size3 = call i64 @llvm.spv.workgroup.size.i64(i32 1) + store i64 %spv.workgroup.size3, i64* @G_spv_workgroup_size_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupSize]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2 %spv.workgroup.size4 = call i64 @llvm.spv.workgroup.size.i64(i32 2) + store i64 %spv.workgroup.size4, i64* @G_spv_workgroup_size_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0 %spv.group.id = call i64 @llvm.spv.group.id.i64(i32 0) + store i64 %spv.group.id, i64* @G_spv_group_id_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1 %spv.group.id5 = call i64 @llvm.spv.group.id.i64(i32 1) + store i64 %spv.group.id5, i64* @G_spv_group_id_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2 %spv.group.id6 = call i64 @llvm.spv.group.id.i64(i32 2) + store i64 %spv.group.id6, i64* @G_spv_group_id_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[LocalInvocationId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0 %spv.thread.id.in.group = call i64 @llvm.spv.thread.id.in.group.i64(i32 0) + store i64 %spv.thread.id.in.group, i64* @G_spv_thread_id_in_group_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[LocalInvocationId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1 %spv.thread.id.in.group7 = call i64 @llvm.spv.thread.id.in.group.i64(i32 1) + store i64 %spv.thread.id.in.group7, i64* @G_spv_thread_id_in_group_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[LocalInvocationId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2 %spv.thread.id.in.group8 = call i64 @llvm.spv.thread.id.in.group.i64(i32 2) + store i64 %spv.thread.id.in.group8, i64* @G_spv_thread_id_in_group_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalInvocationId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0 %spv.thread.id = call i64 @llvm.spv.thread.id.i64(i32 0) + store i64 %spv.thread.id, i64* @G_spv_thread_id_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalInvocationId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1 %spv.thread.id9 = call i64 @llvm.spv.thread.id.i64(i32 1) + store i64 %spv.thread.id9, i64* @G_spv_thread_id_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalInvocationId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2 %spv.thread.id10 = call i64 @llvm.spv.thread.id.i64(i32 2) + store i64 %spv.thread.id10, i64* @G_spv_thread_id_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalSize]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0 %spv.num.workgroups11 = call i64 @llvm.spv.global.size.i64(i32 0) + store i64 %spv.num.workgroups11, i64* @G_spv_global_size_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalSize]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1 %spv.num.workgroups12 = call i64 @llvm.spv.global.size.i64(i32 1) + store i64 %spv.num.workgroups12, i64* @G_spv_global_size_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalSize]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2 %spv.num.workgroups13 = call i64 @llvm.spv.global.size.i64(i32 2) + store i64 %spv.num.workgroups13, i64* @G_spv_global_size_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalOffset]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0 %spv.global.offset = call i64 @llvm.spv.global.offset.i64(i32 0) + store i64 %spv.global.offset, i64* @G_spv_global_offset_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalOffset]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1 %spv.global.offset14 = call i64 @llvm.spv.global.offset.i64(i32 1) + store i64 %spv.global.offset14, i64* @G_spv_global_offset_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalOffset]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2 %spv.global.offset15 = call i64 @llvm.spv.global.offset.i64(i32 2) + store i64 %spv.global.offset15, i64* @G_spv_global_offset_2 ; CHECK: OpLoad %5 [[SubgroupSize]] %0 = call i32 @llvm.spv.subgroup.size() store i32 %0, ptr %ssize, align 4 diff --git a/llvm/test/CodeGen/SPIRV/builtin_vars-decorate.ll b/llvm/test/CodeGen/SPIRV/builtin_vars-decorate.ll index 0c9b29de890d4..8dd9b387a6d84 100644 --- a/llvm/test/CodeGen/SPIRV/builtin_vars-decorate.ll +++ b/llvm/test/CodeGen/SPIRV/builtin_vars-decorate.ll @@ -81,17 +81,36 @@ @__spirv_BuiltInSubgroupId = external addrspace(1) global i32 @__spirv_BuiltInSubgroupLocalInvocationId = external addrspace(1) global i32 +@G_r1 = global i64 0 +@G_r2 = global i64 0 +@G_r3 = global i32 0 +@G_r4 = global i32 0 +@G_r5 = global i32 0 +@G_r6 = global i32 0 +@G_r7 = global i32 0 +@G_r8 = global i32 0 +@G_r9 = global i32 0 + define spir_kernel void @_Z1wv() { entry: %r1 = tail call spir_func i64 @get_global_linear_id() + store i64 %r1, i64* @G_r1 %r2 = tail call spir_func i64 @get_local_linear_id() + store i64 %r2, i64* @G_r2 %r3 = tail call spir_func i32 @get_work_dim() + store i32 %r3, i32* @G_r3 %r4 = tail call spir_func i32 @get_sub_group_size() + store i32 %r4, i32* @G_r4 %r5 = tail call spir_func i32 @get_max_sub_group_size() + store i32 %r5, i32* @G_r5 %r6 = tail call spir_func i32 @get_num_sub_groups() + store i32 %r6, i32* @G_r6 %r7 = tail call spir_func i32 @get_enqueued_num_sub_groups() + store i32 %r7, i32* @G_r7 %r8 = tail call spir_func i32 @get_sub_group_id() + store i32 %r8, i32* @G_r8 %r9 = tail call spir_func i32 @get_sub_group_local_id() + store i32 %r9, i32* @G_r9 ret void } diff --git a/llvm/test/CodeGen/SPIRV/debug-info/debug-type-pointer.ll b/llvm/test/CodeGen/SPIRV/debug-info/debug-type-pointer.ll index 3e0d0cc4cd8e2..d260c9f94d4ad 100644 --- a/llvm/test/CodeGen/SPIRV/debug-info/debug-type-pointer.ll +++ b/llvm/test/CodeGen/SPIRV/debug-info/debug-type-pointer.ll @@ -126,6 +126,7 @@ define spir_func i32 @test0() !dbg !17 { %14 = load ptr addrspace(4), ptr %11, align 4, !dbg !65 store ptr addrspace(4) %14, ptr %12, align 4, !dbg !64 #dbg_declare(ptr %13, !66, !DIExpression(DW_OP_constu, 0, DW_OP_swap, DW_OP_xderef), !70) + store [8 x i32] zeroinitializer, ptr %13, align 4 ret i32 0, !dbg !71 } @@ -169,6 +170,7 @@ define spir_func i32 @test1() !dbg !72 { %14 = load ptr addrspace(4), ptr %11, align 4, !dbg !97 store ptr addrspace(4) %14, ptr %12, align 4, !dbg !96 #dbg_declare(ptr %13, !98, !DIExpression(DW_OP_constu, 0, DW_OP_swap, DW_OP_xderef), !99) + store [8 x i32] zeroinitializer, ptr %13, align 4 ret i32 0, !dbg !100 } diff --git a/llvm/test/CodeGen/SPIRV/event-zero-const.ll b/llvm/test/CodeGen/SPIRV/event-zero-const.ll index 523d2ad9825f3..2bf8259e78785 100644 --- a/llvm/test/CodeGen/SPIRV/event-zero-const.ll +++ b/llvm/test/CodeGen/SPIRV/event-zero-const.ll @@ -12,11 +12,15 @@ ; CHECK: OpINotEqual %[[#]] %[[#]] %[[#LongNull]] ; CHECK: OpGroupAsyncCopy %[[#EventTy]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#EventNull]] +@G_r1 = global i1 0 +@G_e1 = global target("spirv.Event") poison define weak_odr dso_local spir_kernel void @foo(i64 %_arg_i, ptr addrspace(1) %_arg_ptr, ptr addrspace(3) %_arg_local) { entry: %r1 = icmp ne i64 %_arg_i, 0 + store i1 %r1, ptr @G_r1 %e1 = tail call spir_func target("spirv.Event") @__spirv_GroupAsyncCopy(i32 2, ptr addrspace(3) %_arg_local, ptr addrspace(1) %_arg_ptr, i64 1, i64 1, target("spirv.Event") zeroinitializer) + store target("spirv.Event") %e1, ptr @G_e1 ret void } diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fun-ptr-addrcast.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fun-ptr-addrcast.ll index e5736b88b63a3..a9a0d3358f8cc 100644 --- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fun-ptr-addrcast.ll +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fun-ptr-addrcast.ll @@ -11,15 +11,22 @@ @G1 = addrspace(1) constant { [3 x ptr addrspace(4)] } { [3 x ptr addrspace(4)] [ptr addrspace(4) null, ptr addrspace(4) addrspacecast (ptr @foo to ptr addrspace(4)), ptr addrspace(4) addrspacecast (ptr @bar to ptr addrspace(4))] } @G2 = addrspace(1) constant { [3 x ptr addrspace(4)] } { [3 x ptr addrspace(4)] [ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), ptr addrspace(4) addrspacecast (ptr @bar to ptr addrspace(4)), ptr addrspace(4) addrspacecast (ptr @foo to ptr addrspace(4))] } +@G_r1_foo = global ptr addrspace(4) null +@G_r2_foo = global ptr addrspace(4) null +@G_r1_bar = global ptr addrspace(4) null + define void @foo(ptr addrspace(4) %p) { entry: %r1 = addrspacecast ptr @foo to ptr addrspace(4) + store ptr addrspace(4) %r1, ptr @G_r1_foo %r2 = addrspacecast ptr null to ptr addrspace(4) + store ptr addrspace(4) %r2, ptr @G_r2_foo ret void } define void @bar(ptr addrspace(4) %p) { entry: %r1 = addrspacecast ptr @bar to ptr addrspace(4) + store ptr addrspace(4) %r1, ptr @G_r1_bar ret void } diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bfloat16/bfloat16.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bfloat16/bfloat16.ll index 22668e71fb257..92652f1faefc0 100644 --- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bfloat16/bfloat16.ll +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bfloat16/bfloat16.ll @@ -12,11 +12,16 @@ target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64" target triple = "spir64-unknown-unknown" +@G1 = global bfloat 0.0 +@G2 = global <2 x bfloat> zeroinitializer + define spir_kernel void @test() { entry: %addr1 = alloca bfloat %addr2 = alloca <2 x bfloat> %data1 = load bfloat, ptr %addr1 %data2 = load <2 x bfloat>, ptr %addr2 + store bfloat %data1, ptr @G1 + store <2 x bfloat> %data2, ptr @G2 ret void } diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/decoration.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/decoration.ll index d3fe9e43450cd..81497f26f1aef 100644 --- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/decoration.ll +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/decoration.ll @@ -79,6 +79,54 @@ ; CHECK: OpDecorate %[[#maxResV]] FPFastMathMode NotNaN|NotInf|NSZ|AllowRecip|AllowContract|AllowReassoc|AllowTransform ; CHECK: OpDecorate %[[#maxCommonResV]] FPFastMathMode NotNaN|NotInf +@G_addRes = global float 0.0 +@G_subRes = global float 0.0 +@G_mulRes = global float 0.0 +@G_divRes = global float 0.0 +@G_remRes = global float 0.0 +@G_negRes = global float 0.0 +@G_oeqRes = global i1 0 +@G_oneRes = global i1 0 +@G_oltRes = global i1 0 +@G_ogtRes = global i1 0 +@G_oleRes = global i1 0 +@G_ogeRes = global i1 0 +@G_ordRes = global i1 0 +@G_ueqRes = global i1 0 +@G_uneRes = global i1 0 +@G_ultRes = global i1 0 +@G_ugtRes = global i1 0 +@G_uleRes = global i1 0 +@G_ugeRes = global i1 0 +@G_unoRes = global i1 0 +@G_modRes = global float 0.0 +@G_maxRes = global float 0.0 +@G_maxCommonRes = global float 0.0 + +@G_addResV = global <2 x float> zeroinitializer +@G_subResV = global <2 x float> zeroinitializer +@G_mulResV = global <2 x float> zeroinitializer +@G_divResV = global <2 x float> zeroinitializer +@G_remResV = global <2 x float> zeroinitializer +@G_negResV = global <2 x float> zeroinitializer +@G_oeqResV = global <2 x i1> zeroinitializer +@G_oneResV = global <2 x i1> zeroinitializer +@G_oltResV = global <2 x i1> zeroinitializer +@G_ogtResV = global <2 x i1> zeroinitializer +@G_oleResV = global <2 x i1> zeroinitializer +@G_ogeResV = global <2 x i1> zeroinitializer +@G_ordResV = global <2 x i1> zeroinitializer +@G_ueqResV = global <2 x i1> zeroinitializer +@G_uneResV = global <2 x i1> zeroinitializer +@G_ultResV = global <2 x i1> zeroinitializer +@G_ugtResV = global <2 x i1> zeroinitializer +@G_uleResV = global <2 x i1> zeroinitializer +@G_ugeResV = global <2 x i1> zeroinitializer +@G_unoResV = global <2 x i1> zeroinitializer +@G_modResV = global <2 x float> zeroinitializer +@G_maxResV = global <2 x float> zeroinitializer +@G_maxCommonResV = global <2 x float> zeroinitializer + ; Function Attrs: convergent mustprogress nofree nounwind willreturn memory(none) declare spir_func float @_Z4fmodff(float, float) declare dso_local spir_func noundef nofpclass(nan inf) float @_Z16__spirv_ocl_fmaxff(float noundef nofpclass(nan inf), float noundef nofpclass(nan inf)) local_unnamed_addr #1 @@ -91,55 +139,101 @@ declare dso_local spir_func noundef nofpclass(nan inf) <2 x float> @_Z23__spirv_ define weak_odr dso_local spir_kernel void @foo(float %1, float %2) { entry: %addRes = fadd float %1, %2 + store float %addRes, float* @G_addRes %subRes = fsub nnan float %1, %2 + store float %subRes, float* @G_subRes %mulRes = fmul ninf float %1, %2 + store float %mulRes, float* @G_mulRes %divRes = fdiv nsz float %1, %2 + store float %divRes, float* @G_divRes %remRes = frem arcp float %1, %2 + store float %remRes, float* @G_remRes %negRes = fneg fast float %1 + store float %negRes, float* @G_negRes %oeqRes = fcmp nnan ninf oeq float %1, %2 + store i1 %oeqRes, i1* @G_oeqRes %oneRes = fcmp one float %1, %2, !spirv.Decorations !3 + store i1 %oneRes, i1* @G_oneRes %oltRes = fcmp nnan olt float %1, %2, !spirv.Decorations !3 + store i1 %oltRes, i1* @G_oltRes %ogtRes = fcmp ninf ogt float %1, %2, !spirv.Decorations !3 + store i1 %ogtRes, i1* @G_ogtRes %oleRes = fcmp nsz ole float %1, %2, !spirv.Decorations !3 + store i1 %oleRes, i1* @G_oleRes %ogeRes = fcmp arcp oge float %1, %2, !spirv.Decorations !3 + store i1 %ogeRes, i1* @G_ogeRes %ordRes = fcmp fast ord float %1, %2, !spirv.Decorations !3 + store i1 %ordRes, i1* @G_ordRes %ueqRes = fcmp nnan ninf ueq float %1, %2, !spirv.Decorations !3 + store i1 %ueqRes, i1* @G_ueqRes %uneRes = fcmp une float %1, %2, !spirv.Decorations !3 + store i1 %uneRes, i1* @G_uneRes %ultRes = fcmp ult float %1, %2, !spirv.Decorations !3 + store i1 %ultRes, i1* @G_ultRes %ugtRes = fcmp ugt float %1, %2, !spirv.Decorations !3 + store i1 %ugtRes, i1* @G_ugtRes %uleRes = fcmp ule float %1, %2, !spirv.Decorations !3 + store i1 %uleRes, i1* @G_uleRes %ugeRes = fcmp uge float %1, %2, !spirv.Decorations !3 + store i1 %ugeRes, i1* @G_ugeRes %unoRes = fcmp uno float %1, %2, !spirv.Decorations !3 + store i1 %unoRes, i1* @G_unoRes %modRes = call spir_func float @_Z4fmodff(float %1, float %2) + store float %modRes, float* @G_modRes %maxRes = tail call fast spir_func noundef nofpclass(nan inf) float @_Z16__spirv_ocl_fmaxff(float noundef nofpclass(nan inf) %1, float noundef nofpclass(nan inf) %2) + store float %maxRes, float* @G_maxRes %maxCommonRes = tail call spir_func noundef float @_Z23__spirv_ocl_fmax_commonff(float noundef nofpclass(nan inf) %1, float noundef nofpclass(nan inf) %2) + store float %maxCommonRes, float* @G_maxCommonRes ret void } define weak_odr dso_local spir_kernel void @fooV(<2 x float> %v1, <2 x float> %v2) { %addResV = fadd <2 x float> %v1, %v2 + store <2 x float> %addResV, <2 x float>* @G_addResV %subResV = fsub nnan <2 x float> %v1, %v2 + store <2 x float> %subResV, <2 x float>* @G_subResV %mulResV = fmul ninf <2 x float> %v1, %v2 + store <2 x float> %mulResV, <2 x float>* @G_mulResV %divResV = fdiv nsz <2 x float> %v1, %v2 + store <2 x float> %divResV, <2 x float>* @G_divResV %remResV = frem arcp <2 x float> %v1, %v2 + store <2 x float> %remResV, <2 x float>* @G_remResV %negResV = fneg fast <2 x float> %v1 + store <2 x float> %negResV, <2 x float>* @G_negResV %oeqResV = fcmp nnan ninf oeq <2 x float> %v1, %v2 + store <2 x i1> %oeqResV, <2 x i1>* @G_oeqResV %oneResV = fcmp one <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %oneResV, <2 x i1>* @G_oneResV %oltResV = fcmp nnan olt <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %oltResV, <2 x i1>* @G_oltResV %ogtResV = fcmp ninf ogt <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %ogtResV, <2 x i1>* @G_ogtResV %oleResV = fcmp nsz ole <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %oleResV, <2 x i1>* @G_oleResV %ogeResV = fcmp arcp oge <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %ogeResV, <2 x i1>* @G_ogeResV %ordResV = fcmp fast ord <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %ordResV, <2 x i1>* @G_ordResV %ueqResV = fcmp nnan ninf ueq <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %ueqResV, <2 x i1>* @G_ueqResV %uneResV = fcmp une <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %uneResV, <2 x i1>* @G_uneResV %ultResV = fcmp ult <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %ultResV, <2 x i1>* @G_ultResV %ugtResV = fcmp ugt <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %ugtResV, <2 x i1>* @G_ugtResV %uleResV = fcmp ule <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %uleResV, <2 x i1>* @G_uleResV %ugeResV = fcmp uge <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %ugeResV, <2 x i1>* @G_ugeResV %unoResV = fcmp uno <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %unoResV, <2 x i1>* @G_unoResV %modResV = call spir_func <2 x float> @_Z4fmodDv2_fDv2_f(<2 x float> %v1, <2 x float> %v2) + store <2 x float> %modResV, <2 x float>* @G_modResV %maxResV = tail call fast spir_func noundef nofpclass(nan inf) <2 x float> @_Z16__spirv_ocl_fmaxDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf) %v1, <2 x float> noundef nofpclass(nan inf) %v2) + store <2 x float> %maxResV, <2 x float>* @G_maxResV %maxCommonResV = tail call spir_func noundef <2 x float> @_Z23__spirv_ocl_fmax_commonDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf) %v1, <2 x float> noundef nofpclass(nan inf) %v2) + store <2 x float> %maxCommonResV, <2 x float>* @G_maxCommonResV ret void } diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/disabled-on-amd.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/disabled-on-amd.ll new file mode 100644 index 0000000000000..879aab4de4808 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/disabled-on-amd.ll @@ -0,0 +1,22 @@ +; RUN: llc -mtriple=spirv64-- --spirv-ext=all < %s | FileCheck %s --check-prefix=CHECK +; No need to validate the output of the first command, we just want to ensure that we are on a path that triggers the use of SPV_KHR_float_controls2 + +; RUN: llc -mtriple=spirv64-amd-amdhsa --spirv-ext=all < %s | FileCheck %s --check-prefix=CHECK-AMD +; RUN: %if spirv-tools %{ llc -mtriple=spirv64-amd-amdhsa --spirv-ext=all < %s -filetype=obj | spirv-val %} + +; RUN: llc -mtriple=spirv64-amd-amdhsa --spirv-ext=+SPV_KHR_float_controls2 < %s | FileCheck %s --check-prefix=CHECK-AMD +; RUN: %if spirv-tools %{ llc -mtriple=spirv64-amd-amdhsa --spirv-ext=+SPV_KHR_float_controls2 < %s -filetype=obj | spirv-val %} + +; Check that SPV_KHR_float_controls2 is not present when the target is AMD. +; AMD's SPIRV implementation uses the translator to get bitcode from SPIRV, +; which at the moment doesn't implement the SPV_KHR_float_controls2 extension. + +; CHECK: SPV_KHR_float_controls2 +; CHECK-AMD-NOT: SPV_KHR_float_controls2 + +define spir_kernel void @foo(float %a, float %b) { +entry: + ; Use contract to trigger a use of SPV_KHR_float_controls2 + %r1 = fadd contract float %a, %b + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll b/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll index 4db0ba33d52c9..face4a9f5e615 100644 --- a/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll +++ b/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll @@ -2,10 +2,15 @@ ; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=KHR %s -o - | FileCheck %s ; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=khr %s -o - | FileCheck %s +@G = global i32 0 + define i6 @foo() { %call = tail call i32 @llvm.bitreverse.i32(i32 42) + store i32 %call, ptr @G ret i6 2 } ; CHECK-NOT: OpExtension "SPV_INTEL_arbitrary_precision_integers" ; CHECK-DAG: OpExtension "SPV_KHR_bit_instructions" + +declare i32 @llvm.bitreverse.i32(i32) diff --git a/llvm/test/CodeGen/SPIRV/freeze.ll b/llvm/test/CodeGen/SPIRV/freeze.ll index 9077d2ede72a9..4f7e7794ed03b 100644 --- a/llvm/test/CodeGen/SPIRV/freeze.ll +++ b/llvm/test/CodeGen/SPIRV/freeze.ll @@ -1,15 +1,15 @@ ; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s ; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} -; CHECK: OpName %[[Arg1:.*]] "arg1" -; CHECK: OpName %[[Arg2:.*]] "arg2" -; CHECK: OpName %[[NotAStaticPoison:.*]] "poison1" -; CHECK: OpName %[[NotAStaticPoison]] "nil0" -; CHECK: OpName %[[StaticPoisonIntFreeze:.*]] "nil1" -; CHECK: OpName %[[StaticPoisonFloatFreeze:.*]] "nil2" -; CHECK: OpName %[[Arg1]] "val1" -; CHECK: OpName %[[Const100:.*]] "val2" -; CHECK: OpName %[[Const100]] "val3" +; CHECK-DAG: OpName %[[Arg1:.*]] "arg1" +; CHECK-DAG: OpName %[[Arg2:.*]] "arg2" +; CHECK-DAG: OpName %[[NotAStaticPoison:.*]] "poison1" +; CHECK-DAG: OpName %[[NotAStaticPoison]] "nil0" +; CHECK-DAG: OpName %[[StaticPoisonIntFreeze:.*]] "nil1" +; CHECK-DAG: OpName %[[StaticPoisonFloatFreeze:.*]] "nil2" +; CHECK-DAG: OpName %[[Arg1]] "val1" +; CHECK-DAG: OpName %[[Const100:.*]] "val2" +; CHECK-DAG: OpName %[[Const100]] "val3" ; CHECK: OpDecorate ; CHECK-DAG: %[[FloatTy:.*]] = OpTypeFloat 32 ; CHECK-DAG: %[[ShortTy:.*]] = OpTypeInt 16 0 @@ -18,17 +18,37 @@ ; CHECK-DAG: %[[Undef32:.*]] = OpUndef %[[IntTy]] ; CHECK-DAG: %[[UndefFloat:.*]] = OpUndef %[[FloatTy]] ; CHECK-DAG: %[[Const100]] = OpConstant %[[IntTy]] 100 -; CHECK: %[[Arg1]] = OpFunctionParameter %[[FloatTy]] -; CHECK: %[[NotAStaticPoison]] = OpIAdd %[[ShortTy]] %[[Arg2]] %[[Undef16]] -define spir_func void @foo(float %arg1, i16 %arg2) { +define spir_func i16 @test_nil0(i16 %arg2) { entry: +; CHECK: %[[NotAStaticPoison]] = OpIAdd %[[ShortTy]] %[[Arg2]] %[[Undef16]] %poison1 = add i16 %arg2, undef %nil0 = freeze i16 %poison1 + ret i16 %nil0 +} + +define spir_func i32 @test_nil1() { +entry: %nil1 = freeze i32 undef + ret i32 %nil1 +} + +define spir_func float @test_nil2() { +entry: %nil2 = freeze float poison + ret float %nil2 +} + +define spir_func float @freeze_float(float %arg1) { +entry: +; CHECK: %[[Arg1]] = OpFunctionParameter %[[FloatTy]] %val1 = freeze float %arg1 + ret float %val1 +} + +define spir_func i32 @foo() { +entry: %val2 = freeze i32 100 %val3 = freeze i32 %val2 - ret void + ret i32 %val3 } diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/AddUint64.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/AddUint64.ll index a97492b8453ea..a15d628cc3614 100644 --- a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/AddUint64.ll +++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/AddUint64.ll @@ -63,7 +63,7 @@ entry: ; CHECK: %[[#a_high:]] = OpVectorShuffle %[[#vec2_int_32]] %[[#a]] %[[#undef_v4i32]] 1 3 ; CHECK: %[[#b_low:]] = OpVectorShuffle %[[#vec2_int_32]] %[[#b]] %[[#undef_v4i32]] 0 2 ; CHECK: %[[#b_high:]] = OpVectorShuffle %[[#vec2_int_32]] %[[#b]] %[[#undef_v4i32]] 1 3 -; CHECK: %[[#iaddcarry:]] = OpIAddCarry %[[#struct_v2i32_v2i32]] %[[#a_low]] %[[#vec2_int_32]] +; CHECK: %[[#iaddcarry:]] = OpIAddCarry %[[#struct_v2i32_v2i32]] %[[#a_low]] %[[#b_low]] ; CHECK: %[[#lowsum:]] = OpCompositeExtract %[[#vec2_int_32]] %[[#iaddcarry]] 0 ; CHECK: %[[#carry:]] = OpCompositeExtract %[[#vec2_int_32]] %[[#iaddcarry]] 1 ; CHECK: %[[#carry_ne0:]] = OpINotEqual %[[#vec2_bool]] %[[#carry]] %[[#const_v2i32_0_0]] diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll index 4a15fa8b14537..75fac211f1108 100644 --- a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll +++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll @@ -3,24 +3,25 @@ ; CHECK: OpExtInstImport "GLSL.std.450" +@i = global i32 0, align 4 +@absi = global i32 0, align 4 +@f = global float 0.0, align 4 +@absf = global float 0.0, align 4 + define void @main() #1 { entry: - %i = alloca i32, align 4 - %absi = alloca i32, align 4 - %f = alloca float, align 4 - %absf = alloca float, align 4 - %0 = load i32, ptr %i, align 4 + %0 = load i32, ptr @i, align 4 ; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] SAbs %[[#]] %elt.abs = call i32 @llvm.abs.i32(i32 %0, i1 false) - store i32 %elt.abs, ptr %absi, align 4 - %1 = load float, ptr %f, align 4 + store i32 %elt.abs, ptr @absi, align 4 + %1 = load float, ptr @f, align 4 ; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] FAbs %[[#]] %elt.abs1 = call float @llvm.fabs.f32(float %1) - store float %elt.abs1, ptr %absf, align 4 + store float %elt.abs1, ptr @absf, align 4 ret void } diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll index 7583066c01cf8..dceaa8c209957 100644 --- a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll +++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll @@ -7,21 +7,23 @@ ; CHECK: %[[#v4float:]] = OpTypeVector %[[#float]] 4 ; CHECK: %[[#float_0_30103001:]] = OpConstant %[[#float]] 0.30103000998497009 +@logf = global float 0.0, align 4 +@logf4 = global <4 x float> zeroinitializer, align 16 + define void @main(float %f, <4 x float> %f4) { entry: ; CHECK-DAG: %[[#f:]] = OpFunctionParameter %[[#float]] ; CHECK-DAG: %[[#f4:]] = OpFunctionParameter %[[#v4float]] - %logf = alloca float, align 4 - %logf4 = alloca <4 x float>, align 16 - ; CHECK: %[[#log2:]] = OpExtInst %[[#float]] %[[#extinst]] Log2 %[[#f]] ; CHECK: %[[#res:]] = OpFMul %[[#float]] %[[#log2]] %[[#float_0_30103001]] %elt.log10 = call float @llvm.log10.f32(float %f) + store float %elt.log10, ptr @logf, align 4 ; CHECK: %[[#log2:]] = OpExtInst %[[#v4float]] %[[#extinst]] Log2 %[[#f4]] ; CHECK: %[[#res:]] = OpVectorTimesScalar %[[#v4float]] %[[#log2]] %[[#float_0_30103001]] %elt.log101 = call <4 x float> @llvm.log10.v4f32(<4 x float> %f4) + store <4 x float> %elt.log101, ptr @logf4, align 16 ret void } diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-array.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-array.ll new file mode 100644 index 0000000000000..5d45178715d70 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-array.ll @@ -0,0 +1,77 @@ +; RUN: llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: %[[FLOAT:[0-9]+]] = OpTypeFloat 32 +; CHECK-DAG: %[[VEC4:[0-9]+]] = OpTypeVector %[[FLOAT]] 4 +; CHECK-DAG: %[[PTR_VEC4:[0-9]+]] = OpTypePointer Uniform %[[VEC4]] +; CHECK-DAG: %[[INT:[0-9]+]] = OpTypeInt 32 0 +; CHECK-DAG: %[[PTR_INT:[0-9]+]] = OpTypePointer Uniform %[[INT]] +; CHECK-DAG: %[[INT64:[0-9]+]] = OpTypeInt 64 0 +; CHECK-DAG: %[[CONST_4:[0-9]+]] = OpConstant %[[INT]] 4{{$}} + +; CHECK-DAG: %[[ARRAY:[0-9]+]] = OpTypeArray %[[VEC4]] %[[CONST_4]] +; CHECK-DAG: %[[PTR_ARRAY:[0-9]+]] = OpTypePointer Uniform %[[ARRAY]] + +; CHECK-DAG: %[[STRUCT_INNER:[0-9]+]] = OpTypeStruct %[[ARRAY]] %[[INT]] +; CHECK-DAG: %[[STRUCT_CBUFFER:[0-9]+]] = OpTypeStruct %[[STRUCT_INNER]] +; CHECK-DAG: %[[PTR_CBUFFER:[0-9]+]] = OpTypePointer Uniform %[[STRUCT_CBUFFER]] + +; CHECK-DAG: OpDecorate %[[ARRAY]] ArrayStride 16 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_INNER]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_INNER]] 1 Offset 64 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_CBUFFER]] 0 Offset 0 +; CHECK-DAG: OpDecorate %[[STRUCT_CBUFFER]] Block + +; CHECK-DAG: %[[ZERO:[0-9]+]] = OpConstant %[[INT]] 0{{$}} +; CHECK-DAG: %[[ONE:[0-9]+]] = OpConstant %[[INT]] 1{{$}} + +; CHECK: %[[CBUFFER:[0-9]+]] = OpVariable %[[PTR_CBUFFER]] Uniform + +%__cblayout_MyCBuffer = type <{ [4 x <4 x float>], i32 }> + +@MyCBuffer.cb = local_unnamed_addr global target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) poison +@colors = external hidden local_unnamed_addr addrspace(12) global [4 x <4 x float>], align 16 +@index = external hidden local_unnamed_addr addrspace(12) global i32, align 4 +@MyCBuffer.str = private unnamed_addr constant [10 x i8] c"MyCBuffer\00", align 1 +@.str = private unnamed_addr constant [7 x i8] c"output\00", align 1 + +declare target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32, i32, i32, i32, ptr) + +define void @main() #1 { +entry: +; Get pointers to the two elements of the cbuffer +; CHECK: %[[COPY:[0-9]+]] = OpCopyObject %[[PTR_CBUFFER]] %[[CBUFFER]] +; CHECK: %[[PTR_ARRAY_ACCESS:[0-9]+]] = OpAccessChain %[[PTR_ARRAY]] %[[COPY]] %[[ZERO]] %[[ZERO]] +; CHECK: %[[PTR_INT_ACCESS:[0-9]+]] = OpAccessChain %[[PTR_INT]] %[[COPY]] %[[ZERO]] %[[ONE]] + %MyCBuffer.cb_h.i.i = tail call target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @MyCBuffer.str) + store target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) %MyCBuffer.cb_h.i.i, ptr @MyCBuffer.cb, align 8 + + %0 = tail call target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_1t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @.str) + +; CHECK: %[[VAL_INT:[0-9]+]] = OpLoad %[[INT]] %[[PTR_INT_ACCESS]] Aligned 4 + %1 = load i32, ptr addrspace(12) @index, align 4 + +; CHECK: %[[VAL_INT64:[0-9]+]] = OpSConvert %[[INT64]] %[[VAL_INT]] + %idxprom.i = sext i32 %1 to i64 + +; CHECK: %[[PTR_ELEM:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_ARRAY_ACCESS]] %[[VAL_INT64]] + %arrayidx.i = getelementptr inbounds <4 x float>, ptr addrspace(12) @colors, i64 %idxprom.i + +; CHECK: %[[VAL_ELEM:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_ELEM]] Aligned 16 + %2 = load <4 x float>, ptr addrspace(12) %arrayidx.i, align 16 + +; CHECK: OpStore {{%[0-9]+}} %[[VAL_ELEM]] Aligned 16 + %3 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_1t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) %0, i32 0) + store <4 x float> %2, ptr addrspace(11) %3, align 16 + ret void +} + +declare target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_1t(i32, i32, i32, i32, ptr) + +declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_1t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1), i32) + +attributes #1 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + +!hlsl.cbs = !{!0} + +!0 = !{ptr @MyCBuffer.cb, ptr addrspace(12) @colors, ptr addrspace(12) @index} diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-peeled-array-minimal.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-peeled-array-minimal.ll new file mode 100644 index 0000000000000..fc12f0f0592fe --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-peeled-array-minimal.ll @@ -0,0 +1,90 @@ +; RUN: llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: OpDecorate %[[ARRAY:[0-9]+]] ArrayStride 16 +; CHECK-DAG: OpMemberDecorate %[[CBLAYOUT:[0-9]+]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[CBLAYOUT]] 1 Offset 52 +; CHECK-DAG: OpMemberDecorate %[[WRAPPER:[0-9]+]] 0 Offset 0 +; CHECK-DAG: OpDecorate %[[WRAPPER]] Block +; CHECK-DAG: OpMemberDecorate %[[STRUCT:[0-9]+]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_PAD:[0-9]+]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_PAD]] 1 Offset 4 + +; CHECK-DAG: %[[FLOAT:[0-9]+]] = OpTypeFloat 32 +; CHECK-DAG: %[[STRUCT]] = OpTypeStruct %[[FLOAT]] +; CHECK-DAG: %[[I8:[0-9]+]] = OpTypeInt 8 0 +; CHECK-DAG: %[[STRUCT_PAD]] = OpTypeStruct %[[STRUCT]] %[[I8]] +; CHECK-DAG: %[[UINT:[0-9]+]] = OpTypeInt 32 0 +; CHECK-DAG: %[[CONST_4:[0-9]+]] = OpConstant %[[UINT]] 4 +; CHECK-DAG: %[[ARRAY]] = OpTypeArray %[[STRUCT_PAD]] %[[CONST_4]] +; CHECK-DAG: %[[CBLAYOUT]] = OpTypeStruct %[[ARRAY]] %[[FLOAT]] +; CHECK-DAG: %[[WRAPPER]] = OpTypeStruct %[[CBLAYOUT]] +; CHECK-DAG: %[[PTR_WRAPPER:[0-9]+]] = OpTypePointer Uniform %[[WRAPPER]] +; CHECK-DAG: %[[ZERO:[0-9]+]] = OpConstant %[[UINT]] 0 +; CHECK-DAG: %[[MYCBUFFER:[0-9]+]] = OpVariable %[[PTR_WRAPPER]] Uniform + +; CHECK-DAG: %[[I64:[0-9]+]] = OpTypeInt 64 0 +; CHECK-DAG: %[[STRUCT2:[0-9]+]] = OpTypeStruct %[[I64]] %[[UINT]] +; CHECK-DAG: %[[CONST_3:[0-9]+]] = OpConstant %[[UINT]] 3 +; CHECK-DAG: %[[ARRAY2:[0-9]+]] = OpTypeArray %[[STRUCT2]] %[[CONST_3]] +; CHECK-DAG: %[[CBLAYOUT2:[0-9]+]] = OpTypeStruct %[[ARRAY2]] %[[I64]] +; CHECK-DAG: %[[PTR_PRIVATE:[0-9]+]] = OpTypePointer Private %[[CBLAYOUT2]] +; CHECK-DAG: %[[MYPRIVATEVAR:[0-9]+]] = OpVariable %[[PTR_PRIVATE]] Private + +%__cblayout_MyCBuffer = type <{ <{ [3 x <{ %OrigType, target("spirv.Padding", 12) }>], %OrigType }>, float }> +%OrigType = type <{ float }> + +%__cblayout_MyCBuffer2 = type <{ [ 3 x <{ i64, i32 }> ], i64 }> + +@MyCBuffer.cb = local_unnamed_addr global target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) poison +@myPrivateVar = internal addrspace(10) global %__cblayout_MyCBuffer2 poison + +@myArray = external hidden local_unnamed_addr addrspace(12) global <{ [3 x <{ %OrigType, target("spirv.Padding", 12) }>], %OrigType }>, align 1 +@MyCBuffer.str = private unnamed_addr constant [10 x i8] c"MyCBuffer\00", align 1 +@.str = private unnamed_addr constant [7 x i8] c"output\00", align 1 + +declare target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32, i32, i32, i32, ptr) + +define void @main() #1 { +entry: +; CHECK: %[[BUFFER_HANDLE:[0-9]+]] = OpCopyObject %[[PTR_WRAPPER]] %[[MYCBUFFER]] +; CHECK: %[[ACCESS_ARRAY:[0-9]+]] = OpAccessChain {{%[0-9]+}} %[[BUFFER_HANDLE]] %[[ZERO]] %[[ZERO]] + %MyCBuffer.cb_h.i.i = tail call target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @MyCBuffer.str) + store target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) %MyCBuffer.cb_h.i.i, ptr @MyCBuffer.cb, align 8 + + %0 = tail call target("spirv.Image", float, 5, 2, 0, 0, 2, 1) @llvm.spv.resource.handlefromimplicitbinding.tspirv.Image_f32_5_2_0_0_2_1t(i32 1, i32 0, i32 1, i32 0, ptr nonnull @.str) + %1 = tail call i32 @llvm.spv.thread.id.i32(i32 0) + %rem.i = and i32 %1, 3 + +; CHECK: %[[IDX_CONV:[0-9]+]] = OpUConvert {{.*}} + %idxprom.i = zext nneg i32 %rem.i to i64 + +; CHECK: %[[PTR_ELEM:[0-9]+]] = OpAccessChain {{%[0-9]+}} %[[ACCESS_ARRAY]] %[[IDX_CONV]] + %cbufferidx.i = getelementptr <{ %OrigType, target("spirv.Padding", 12) }>, ptr addrspace(12) @myArray, i64 %idxprom.i + +; CHECK: %[[PTR_FIELD:[0-9]+]] = OpAccessChain {{%[0-9]+}} %[[PTR_ELEM]] %[[ZERO]] %[[ZERO]] +; CHECK: %[[VAL_FLOAT:[0-9]+]] = OpLoad %[[FLOAT]] %[[PTR_FIELD]] Aligned 4 + %2 = load float, ptr addrspace(12) %cbufferidx.i, align 4 + + %val = load i64, ptr addrspace(10) getelementptr (%__cblayout_MyCBuffer2, ptr addrspace(10) @myPrivateVar, i32 0, i32 1), align 8 + %val.float = sitofp i64 %val to float + + %vecinit4.i = insertelement <4 x float> , float %2, i64 0 + %vecinit4.i.2 = insertelement <4 x float> %vecinit4.i, float %val.float, i64 1 + %3 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.Image_f32_5_2_0_0_2_1t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %0, i32 0) + store <4 x float> %vecinit4.i.2, ptr addrspace(11) %3, align 16 +; CHECK: OpImageWrite {{%[0-9]+}} {{%[0-9]+}} {{%[0-9]+}} + ret void +} + +declare i32 @llvm.spv.thread.id.i32(i32) + +declare target("spirv.Image", float, 5, 2, 0, 0, 2, 1) @llvm.spv.resource.handlefromimplicitbinding.tspirv.Image_f32_5_2_0_0_2_1t(i32, i32, i32, i32, ptr) + +declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.Image_f32_5_2_0_0_2_1t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1), i32) + +attributes #1 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + +!hlsl.cbs = !{!0} + +!0 = distinct !{ptr @MyCBuffer.cb, ptr addrspace(12) @myArray, null} diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-peeled-array.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-peeled-array.ll new file mode 100644 index 0000000000000..fb93d53b337b3 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-peeled-array.ll @@ -0,0 +1,74 @@ +; RUN: llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val %} + + +; CHECK-DAG: %[[FLOAT:[0-9]+]] = OpTypeFloat 32 +; CHECK-DAG: %[[VEC3:[0-9]+]] = OpTypeVector %[[FLOAT]] 3 +; CHECK-DAG: %[[I8:[0-9]+]] = OpTypeInt 8 0 +; CHECK-DAG: %[[STRUCT_PAD:[0-9]+]] = OpTypeStruct %[[VEC3]] %[[I8]] +; CHECK-DAG: %[[UINT:[0-9]+]] = OpTypeInt 32 0 +; CHECK-DAG: %[[CONST_3:[0-9]+]] = OpConstant %[[UINT]] 3 +; CHECK-DAG: %[[ARRAY:[0-9]+]] = OpTypeArray %[[STRUCT_PAD]] %[[CONST_3]] +; CHECK-DAG: %[[CBLAYOUT:[0-9]+]] = OpTypeStruct %[[ARRAY]] +; CHECK-DAG: OpMemberDecorate %[[CBLAYOUT]] 0 Offset 0 +; CHECK-DAG: %[[WRAPPER:[0-9]+]] = OpTypeStruct %[[CBLAYOUT]] +; CHECK-DAG: %[[PTR_WRAPPER:[0-9]+]] = OpTypePointer Uniform %[[WRAPPER]] +; CHECK-DAG: %[[ZERO:[0-9]+]] = OpConstant %[[UINT]] 0 +; CHECK-DAG: %[[MYCBUFFER:[0-9]+]] = OpVariable %[[PTR_WRAPPER]] Uniform + + +; TODO(168401): This array stride and offset of element 1 are incorrect. This +; is an issue with how 3 element vectors are handled. +; CHECK-DAG: OpDecorate %[[ARRAY]] ArrayStride 20 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_PAD]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_PAD]] 1 Offset 16 +; CHECK-DAG: OpMemberDecorate %[[WRAPPER]] 0 Offset 0 +; CHECK-DAG: OpDecorate %[[WRAPPER]] Block +%__cblayout_MyCBuffer = type <{ <{ [2 x <{ <3 x float>, target("spirv.Padding", 4) }>], <3 x float> }> }> + +@MyCBuffer.cb = local_unnamed_addr global target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) poison +@myArray = external hidden local_unnamed_addr addrspace(12) global <{ [2 x <{ <3 x float>, target("spirv.Padding", 4) }>], <3 x float> }>, align 16 +@MyCBuffer.str = private unnamed_addr constant [10 x i8] c"MyCBuffer\00", align 1 +@.str = private unnamed_addr constant [7 x i8] c"output\00", align 1 + +declare target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32, i32, i32, i32, ptr) + +define void @main() #1 { +entry: +; CHECK: %[[BUFFER_HANDLE:[0-9]+]] = OpCopyObject %[[PTR_WRAPPER]] %[[MYCBUFFER]] +; CHECK: %[[ACCESS_ARRAY:[0-9]+]] = OpAccessChain {{%[0-9]+}} %[[BUFFER_HANDLE]] %[[ZERO]] %[[ZERO]] + %MyCBuffer.cb_h.i.i = tail call target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @MyCBuffer.str) + store target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) %MyCBuffer.cb_h.i.i, ptr @MyCBuffer.cb, align 8 + + %0 = tail call target("spirv.VulkanBuffer", [0 x <3 x float>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v3f32_12_1t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @.str) + %1 = tail call i32 @llvm.spv.thread.id.i32(i32 0) + +; CHECK: %[[IDX:[0-9]+]] = OpUMod %[[UINT]] {{%[0-9]+}} %[[CONST_3]] + %rem.i = urem i32 %1, 3 + +; CHECK: %[[IDX_CONV:[0-9]+]] = OpUConvert {{.*}} %[[IDX]] + %idxprom.i = zext nneg i32 %rem.i to i64 + +; CHECK: %[[PTR_ELEM:[0-9]+]] = OpAccessChain {{%[0-9]+}} %[[ACCESS_ARRAY]] %[[IDX_CONV]] + %cbufferidx.i = getelementptr <{ <3 x float>, target("spirv.Padding", 4) }>, ptr addrspace(12) @myArray, i64 %idxprom.i + +; CHECK: %[[PTR_FIELD:[0-9]+]] = OpAccessChain {{%[0-9]+}} %[[PTR_ELEM]] {{.*}} +; CHECK: %[[VAL_VEC3:[0-9]+]] = OpLoad %[[VEC3]] %[[PTR_FIELD]] Aligned 16 + %2 = load <3 x float>, ptr addrspace(12) %cbufferidx.i, align 16 + + %3 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v3f32_12_1t(target("spirv.VulkanBuffer", [0 x <3 x float>], 12, 1) %0, i32 %1) + store <3 x float> %2, ptr addrspace(11) %3, align 16 + ret void +} + +declare i32 @llvm.spv.thread.id.i32(i32) + +declare target("spirv.VulkanBuffer", [0 x <3 x float>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v3f32_12_1t(i32, i32, i32, i32, ptr) + +declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v3f32_12_1t(target("spirv.VulkanBuffer", [0 x <3 x float>], 12, 1), i32) + +attributes #1 = { "hlsl.numthreads"="8,1,1" "hlsl.shader"="compute" } + +!hlsl.cbs = !{!0} + +!0 = !{ptr @MyCBuffer.cb, ptr addrspace(12) @myArray} diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-simple.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-simple.ll new file mode 100644 index 0000000000000..1dd2c92bca09d --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-simple.ll @@ -0,0 +1,73 @@ +; RUN: llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: %[[FLOAT:[0-9]+]] = OpTypeFloat 32 +; CHECK-DAG: %[[VEC4:[0-9]+]] = OpTypeVector %[[FLOAT]] 4 +; CHECK-DAG: %[[PTR_FLOAT:[0-9]+]] = OpTypePointer Uniform %[[FLOAT]] +; CHECK-DAG: %[[PTR_VEC4:[0-9]+]] = OpTypePointer Uniform %[[VEC4]] +; CHECK-DAG: %[[STRUCT:[0-9]+]] = OpTypeStruct %[[VEC4]] %[[FLOAT]] +; CHECK-DAG: %[[CBUFFER_TYPE:[0-9]+]] = OpTypeStruct %[[STRUCT]] +; CHECK-DAG: %[[PTR_CBUFFER:[0-9]+]] = OpTypePointer Uniform %[[CBUFFER_TYPE]] +; CHECK-DAG: %[[INT:[0-9]+]] = OpTypeInt 32 0 +; CHECK-DAG: %[[ZERO:[0-9]+]] = OpConstant %[[INT]] 0{{$}} +; CHECK-DAG: %[[ONE:[0-9]+]] = OpConstant %[[INT]] 1{{$}} + +; CHECK-DAG: OpMemberDecorate %[[STRUCT]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[STRUCT]] 1 Offset 16 +; CHECK-DAG: OpMemberDecorate %[[CBUFFER_TYPE]] 0 Offset 0 +; CHECK-DAG: OpDecorate %[[CBUFFER_TYPE]] Block + +; CHECK-DAG: %[[CBUFFER:[0-9]+]] = OpVariable %[[PTR_CBUFFER]] Uniform + +%__cblayout_MyCBuffer = type <{ <4 x float>, float }> + +@MyCBuffer.cb = local_unnamed_addr global target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) poison +@color = external hidden local_unnamed_addr addrspace(12) global <4 x float>, align 16 +@factor = external hidden local_unnamed_addr addrspace(12) global float, align 4 +@MyCBuffer.str = private unnamed_addr constant [10 x i8] c"MyCBuffer\00", align 1 +@.str = private unnamed_addr constant [7 x i8] c"output\00", align 1 + +declare target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32, i32, i32, i32, ptr) + +define void @main() #1 { +entry: +; CHECK: %[[COPY:[0-9]+]] = OpCopyObject %[[PTR_CBUFFER]] %[[CBUFFER]] +; CHECK: %[[PTR_VEC4_ACCESS:[0-9]+]] = OpAccessChain %[[PTR_VEC4]] %[[COPY]] %[[ZERO]] %[[ZERO]] +; CHECK: %[[PTR_FLOAT_ACCESS:[0-9]+]] = OpAccessChain %[[PTR_FLOAT]] %[[COPY]] %[[ZERO]] %[[ONE]] + %MyCBuffer.cb_h.i.i = tail call target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @MyCBuffer.str) + store target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) %MyCBuffer.cb_h.i.i, ptr @MyCBuffer.cb, align 8 + + %0 = tail call target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_1t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @.str) + %1 = tail call i32 @llvm.spv.thread.id.i32(i32 0) + %2 = tail call i32 @llvm.spv.thread.id.i32(i32 1) + %conv.i = uitofp i32 %1 to float + %conv2.i = uitofp i32 %2 to float + %3 = insertelement <4 x float> , float %conv.i, i64 0 + %vecinit5.i = insertelement <4 x float> %3, float %conv2.i, i64 1 + +; CHECK: %[[VAL_VEC4:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_VEC4_ACCESS]] Aligned 16 + %4 = load <4 x float>, ptr addrspace(12) @color, align 16 + %mul.i = fmul reassoc nnan ninf nsz arcp afn <4 x float> %vecinit5.i, %4 + +; CHECK: %[[VAL_FLOAT:[0-9]+]] = OpLoad %[[FLOAT]] %[[PTR_FLOAT_ACCESS]] Aligned 4 + %5 = load float, ptr addrspace(12) @factor, align 4 + + %splat.splatinsert.i = insertelement <4 x float> poison, float %5, i64 0 + %splat.splat.i = shufflevector <4 x float> %splat.splatinsert.i, <4 x float> poison, <4 x i32> zeroinitializer + %mul6.i = fmul reassoc nnan ninf nsz arcp afn <4 x float> %mul.i, %splat.splat.i + %6 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_1t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) %0, i32 0) + store <4 x float> %mul6.i, ptr addrspace(11) %6, align 16 + ret void +} + +declare i32 @llvm.spv.thread.id.i32(i32) + +declare target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_1t(i32, i32, i32, i32, ptr) + +declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_1t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1), i32) + +attributes #1 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + +!hlsl.cbs = !{!0} + +!0 = !{ptr @MyCBuffer.cb, ptr addrspace(12) @color, ptr addrspace(12) @factor} diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-struct.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-struct.ll new file mode 100644 index 0000000000000..60512fe3ed718 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-struct.ll @@ -0,0 +1,158 @@ +; RUN: llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: %[[FLOAT:[0-9]+]] = OpTypeFloat 32 +; CHECK-DAG: %[[VEC4:[0-9]+]] = OpTypeVector %[[FLOAT]] 4 +; CHECK-DAG: %[[PTR_VEC4:[0-9]+]] = OpTypePointer Uniform %[[VEC4]] +; CHECK-DAG: %[[INT:[0-9]+]] = OpTypeInt 32 0 +; CHECK-DAG: %[[ZERO:[0-9]+]] = OpConstant %[[INT]] 0{{$}} + +; CHECK-DAG: %[[STRUCT_MATRIX:[0-9]+]] = OpTypeStruct %[[VEC4]] %[[VEC4]] %[[VEC4]] %[[VEC4]] +; CHECK-DAG: %[[PTR_MATRIX:[0-9]+]] = OpTypePointer Uniform %[[STRUCT_MATRIX]] +; CHECK-DAG: %[[PTR_FLOAT:[0-9]+]] = OpTypePointer Uniform %[[FLOAT]] + +; CHECK-DAG: %[[STRUCT_MYSTRUCT:[0-9]+]] = OpTypeStruct %[[STRUCT_MATRIX]] %[[STRUCT_MATRIX]] %[[STRUCT_MATRIX]] + +; CHECK-DAG: %[[PTR_MYSTRUCT:[0-9]+]] = OpTypePointer Uniform %[[STRUCT_MYSTRUCT]] +; CHECK-DAG: %[[STRUCT_INNER:[0-9]+]] = OpTypeStruct %[[STRUCT_MYSTRUCT]] %[[FLOAT]] + +; CHECK-DAG: %[[STRUCT_CBUFFER:[0-9]+]] = OpTypeStruct %[[STRUCT_INNER]] +; CHECK-DAG: %[[PTR_CBUFFER:[0-9]+]] = OpTypePointer Uniform %[[STRUCT_CBUFFER]] +; CHECK-DAG: %[[INT64:[0-9]+]] = OpTypeInt 64 0 + +; CHECK-DAG: OpMemberDecorate %[[STRUCT_CBUFFER]] 0 Offset 0 +; CHECK-DAG: OpDecorate %[[STRUCT_CBUFFER]] Block +; CHECK-DAG: OpMemberDecorate %[[STRUCT_INNER]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_INNER]] 1 Offset 192 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_MYSTRUCT]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_MYSTRUCT]] 1 Offset 64 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_MYSTRUCT]] 2 Offset 128 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_MATRIX]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_MATRIX]] 1 Offset 16 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_MATRIX]] 2 Offset 32 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_MATRIX]] 3 Offset 48 + +; CHECK-DAG: %[[ONE:[0-9]+]] = OpConstant %[[INT]] 1{{$}} +; CHECK-DAG: %[[ZERO_64:[0-9]+]] = OpConstant %[[INT64]] 0{{$}} +; CHECK-DAG: %[[ONE_64:[0-9]+]] = OpConstant %[[INT64]] 1{{$}} +; CHECK-DAG: %[[TWO_64:[0-9]+]] = OpConstant %[[INT64]] 2{{$}} +; CHECK-DAG: %[[THREE_64:[0-9]+]] = OpConstant %[[INT64]] 3{{$}} + +; CHECK: %[[CBUFFER:[0-9]+]] = OpVariable %[[PTR_CBUFFER]] Uniform + +%__cblayout_MyCBuffer = type <{ %MyStruct, float }> +%MyStruct = type <{ %MyMatrix, %MyMatrix, %MyMatrix }> +%MyMatrix = type <{ <4 x float>, <4 x float>, <4 x float>, <4 x float> }> + +@MyCBuffer.cb = local_unnamed_addr global target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) poison +@transforms = external hidden local_unnamed_addr addrspace(12) global %MyStruct, align 1 +@blend = external hidden local_unnamed_addr addrspace(12) global float, align 4 +@MyCBuffer.str = private unnamed_addr constant [10 x i8] c"MyCBuffer\00", align 1 +@.str = private unnamed_addr constant [7 x i8] c"output\00", align 1 + +declare target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32, i32, i32, i32, ptr) + +declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>) + +define void @main() #3 { +entry: +; CHECK: %[[COPY:[0-9]+]] = OpCopyObject %[[PTR_CBUFFER]] %[[CBUFFER]] +; CHECK: %[[PTR_STRUCT:[0-9]+]] = OpAccessChain %[[PTR_MYSTRUCT]] %[[COPY]] %[[ZERO]] %[[ZERO]] +; CHECK: %[[PTR_FLOAT_VAL:[0-9]+]] = OpAccessChain %[[PTR_FLOAT]] %[[COPY]] %[[ZERO]] %[[ONE]] + %MyCBuffer.cb_h.i.i = tail call target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @MyCBuffer.str) + store target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) %MyCBuffer.cb_h.i.i, ptr @MyCBuffer.cb, align 8 + + %0 = tail call target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_1t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @.str) + %1 = tail call i32 @llvm.spv.thread.id.i32(i32 0) + %2 = tail call i32 @llvm.spv.thread.id.i32(i32 1) + %conv.i = uitofp i32 %1 to float + %conv2.i = uitofp i32 %2 to float + %3 = insertelement <4 x float> poison, float %conv.i, i64 0 + +; CHECK: %[[PTR_M0_V0:[0-9]+]] = OpAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[ZERO]] %[[ZERO]] +; CHECK: %[[VAL_M0_V0:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M0_V0]] Aligned 16 + %4 = load <4 x float>, ptr addrspace(12) @transforms, align 16 + +; CHECK: %[[PTR_M0_V1:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[ZERO_64]] %[[ONE_64]] +; CHECK: %[[VAL_M0_V1:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M0_V1]] Aligned 16 + %5 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 16), align 16 + +; CHECK: %[[PTR_M0_V3:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[ZERO_64]] %[[THREE_64]] +; CHECK: %[[VAL_M0_V3:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M0_V3]] Aligned 16 + %6 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 48), align 16 + + %splat.splat.i18.i = shufflevector <4 x float> %3, <4 x float> poison, <4 x i32> zeroinitializer + %7 = insertelement <4 x float> poison, float %conv2.i, i64 0 + %splat.splat2.i19.i = shufflevector <4 x float> %7, <4 x float> poison, <4 x i32> zeroinitializer + %mul3.i20.i = fmul reassoc nnan ninf nsz arcp afn <4 x float> %splat.splat2.i19.i, %5 + %8 = tail call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.fmuladd.v4f32(<4 x float> %splat.splat.i18.i, <4 x float> nofpclass(nan inf) %4, <4 x float> %mul3.i20.i) + %9 = fadd reassoc nnan ninf nsz arcp afn <4 x float> %8, %6 +; CHECK: %[[PTR_M1:[0-9]+]] = OpInBoundsAccessChain %[[PTR_MATRIX]] %[[PTR_STRUCT]] %[[ONE_64]] +; CHECK: %[[PTR_M1_V0:[0-9]+]] = OpAccessChain %[[PTR_VEC4]] %[[PTR_M1]] %[[ZERO]] +; CHECK: %[[VAL_M1_V0:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M1_V0]] Aligned 16 + %10 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 64), align 16 +; CHECK: %[[PTR_M1_V1:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[ONE_64]] %[[ONE_64]] +; CHECK: %[[VAL_M1_V1:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M1_V1]] Aligned 16 + %11 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 80), align 16 +; CHECK: %[[PTR_M1_V2:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[ONE_64]] %[[TWO_64]] +; CHECK: %[[VAL_M1_V2:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M1_V2]] Aligned 16 + %12 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 96), align 16 +; CHECK: %[[PTR_M1_V3:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[ONE_64]] %[[THREE_64]] +; CHECK: %[[VAL_M1_V3:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M1_V3]] Aligned 16 + %13 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 112), align 16 + %splat.splat.i13.i = shufflevector <4 x float> %9, <4 x float> poison, <4 x i32> zeroinitializer + %splat.splat2.i14.i = shufflevector <4 x float> %9, <4 x float> poison, <4 x i32> + %mul3.i15.i = fmul reassoc nnan ninf nsz arcp afn <4 x float> %splat.splat2.i14.i, %11 + %14 = tail call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.fmuladd.v4f32(<4 x float> %splat.splat.i13.i, <4 x float> nofpclass(nan inf) %10, <4 x float> %mul3.i15.i) + %splat.splat5.i16.i = shufflevector <4 x float> %9, <4 x float> poison, <4 x i32> + %15 = tail call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.fmuladd.v4f32(<4 x float> %splat.splat5.i16.i, <4 x float> nofpclass(nan inf) %12, <4 x float> %14) + %splat.splat7.i17.i = shufflevector <4 x float> %9, <4 x float> poison, <4 x i32> + %16 = tail call reassoc nnan ninf nsz arcp afn noundef <4 x float> @llvm.fmuladd.v4f32(<4 x float> %splat.splat7.i17.i, <4 x float> nofpclass(nan inf) %13, <4 x float> %15) +; CHECK: %[[PTR_M2:[0-9]+]] = OpInBoundsAccessChain %[[PTR_MATRIX]] %[[PTR_STRUCT]] %[[TWO_64]] +; CHECK: %[[PTR_M2_V0:[0-9]+]] = OpAccessChain %[[PTR_VEC4]] %[[PTR_M2]] %[[ZERO]] +; CHECK: %[[VAL_M2_V0:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M2_V0]] Aligned 16 + %17 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 128), align 16 +; CHECK: %[[PTR_M2_V1:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[TWO_64]] %[[ONE_64]] +; CHECK: %[[VAL_M2_V1:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M2_V1]] Aligned 16 + %18 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 144), align 16 +; CHECK: %[[PTR_M2_V2:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[TWO_64]] %[[TWO_64]] +; CHECK: %[[VAL_M2_V2:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M2_V2]] Aligned 16 + %19 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 160), align 16 +; CHECK: %[[PTR_M2_V3:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[TWO_64]] %[[THREE_64]] +; CHECK: %[[VAL_M2_V3:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M2_V3]] Aligned 16 + %20 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 176), align 16 + %splat.splat.i.i = shufflevector <4 x float> %16, <4 x float> poison, <4 x i32> zeroinitializer + %splat.splat2.i.i = shufflevector <4 x float> %16, <4 x float> poison, <4 x i32> + %mul3.i.i = fmul reassoc nnan ninf nsz arcp afn <4 x float> %splat.splat2.i.i, %18 + %21 = tail call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.fmuladd.v4f32(<4 x float> %splat.splat.i.i, <4 x float> nofpclass(nan inf) %17, <4 x float> %mul3.i.i) + %splat.splat5.i.i = shufflevector <4 x float> %16, <4 x float> poison, <4 x i32> + %22 = tail call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.fmuladd.v4f32(<4 x float> %splat.splat5.i.i, <4 x float> nofpclass(nan inf) %19, <4 x float> %21) + %splat.splat7.i.i = shufflevector <4 x float> %16, <4 x float> poison, <4 x i32> + %23 = tail call reassoc nnan ninf nsz arcp afn noundef <4 x float> @llvm.fmuladd.v4f32(<4 x float> %splat.splat7.i.i, <4 x float> nofpclass(nan inf) %20, <4 x float> %22) + %24 = load float, ptr addrspace(12) @blend, align 4 +; CHECK: %[[VAL_FLOAT:[0-9]+]] = OpLoad %[[FLOAT]] %[[PTR_FLOAT_VAL]] Aligned 4 +; CHECK: %[[SPLAT_INS:[0-9]+]] = OpCompositeInsert %[[VEC4]] %[[VAL_FLOAT]] {{.*}} 0 +; CHECK: %[[SPLAT:[0-9]+]] = OpVectorShuffle %[[VEC4]] %[[SPLAT_INS]] {{.*}} 0 0 0 0 +; CHECK: %[[RES:[0-9]+]] = OpFMul %[[VEC4]] {{%[0-9]+}} %[[SPLAT]] + %splat.splatinsert.i = insertelement <4 x float> poison, float %24, i64 0 + %splat.splat.i = shufflevector <4 x float> %splat.splatinsert.i, <4 x float> poison, <4 x i32> zeroinitializer + %mul.i = fmul reassoc nnan ninf nsz arcp afn <4 x float> %23, %splat.splat.i + %25 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_1t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) %0, i32 0) + store <4 x float> %mul.i, ptr addrspace(11) %25, align 16 +; CHECK: OpStore {{%[0-9]+}} %[[RES]] Aligned 16 + ret void +} + +declare i32 @llvm.spv.thread.id.i32(i32) + +declare target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_1t(i32, i32, i32, i32, ptr) + +declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_1t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1), i32) + +attributes #1 = { alwaysinline mustprogress nofree norecurse nosync nounwind willreturn memory(none) } +attributes #3 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } +attributes #4 = { mustprogress nofree nosync nounwind willreturn memory(none) } + +!hlsl.cbs = !{!0} + +!0 = !{ptr @MyCBuffer.cb, ptr addrspace(12) @transforms, ptr addrspace(12) @blend} \ No newline at end of file diff --git a/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll b/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll index b788f34bf7238..02825e3cbb599 100644 --- a/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll +++ b/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll @@ -4,25 +4,40 @@ ; CHECK-LABEL: Begin function original_testcase define fastcc void @original_testcase() { top: + %0 = alloca [1 x ptr], align 4 ; CHECK: OpCompositeInsert - %0 = insertvalue [1 x ptr] zeroinitializer, ptr poison, 0 + %1 = insertvalue [1 x ptr] zeroinitializer, ptr poison, 0 + store [1 x ptr] %1, ptr %0 ret void } ; CHECK-LABEL: Begin function additional_testcases define fastcc void @additional_testcases() { top: + %0 = alloca [2 x ptr], align 4 + + ; Test with different pointer types ; CHECK: OpCompositeInsert %1 = insertvalue [1 x ptr] zeroinitializer, ptr undef, 0 + ; CHECK: OpStore + store [1 x ptr] %1, ptr %0 + ; CHECK-NEXT: OpCompositeInsert %2 = insertvalue {ptr, i32} zeroinitializer, ptr poison, 0 + ; CHECK: OpStore + store {ptr, i32} %2, ptr %0 + ; CHECK-NEXT: OpCompositeInsert %3 = insertvalue {ptr, ptr} undef, ptr null, 0 + ; CHECK: OpStore + store {ptr, ptr} %3, ptr %0 ; Test with undef aggregate ; CHECK-NEXT: OpCompositeInsert %4 = insertvalue [1 x ptr] undef, ptr undef, 0 + ; CHECK: OpStore + store [1 x ptr] %4, ptr %0 ret void } diff --git a/llvm/test/CodeGen/SPIRV/instructions/select-ptr-load.ll b/llvm/test/CodeGen/SPIRV/instructions/select-ptr-load.ll index 6e6cd2f68a971..510c7954c78f8 100644 --- a/llvm/test/CodeGen/SPIRV/instructions/select-ptr-load.ll +++ b/llvm/test/CodeGen/SPIRV/instructions/select-ptr-load.ll @@ -13,13 +13,18 @@ %struct = type { [3 x float] } +@G = global float 0.0 + define spir_kernel void @bar(i1 %sw) { entry: %var1 = alloca %struct + store %struct zeroinitializer, ptr %var1 %var2 = alloca %struct + store %struct zeroinitializer, ptr %var2 %elem1 = getelementptr inbounds [3 x float], ptr %var1, i64 0, i64 0 %elem2 = getelementptr inbounds [3 x float], ptr %var2, i64 0, i64 1 %elem = select i1 %sw, ptr %elem1, ptr %elem2 %res = load float, ptr %elem + store float %res, ptr @G ret void } diff --git a/llvm/test/CodeGen/SPIRV/keep-tracked-const.ll b/llvm/test/CodeGen/SPIRV/keep-tracked-const.ll deleted file mode 100644 index efde6a2c082fc..0000000000000 --- a/llvm/test/CodeGen/SPIRV/keep-tracked-const.ll +++ /dev/null @@ -1,23 +0,0 @@ -; This test case ensures that cleaning of temporary constants doesn't purge tracked ones. - -; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV -; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} - -; CHECK-SPIRV-DAG: %[[#Int:]] = OpTypeInt 8 0 -; CHECK-SPIRV-DAG: %[[#C0:]] = OpConstantNull %[[#Int]] -; CHECK-SPIRV-DAG: %[[#C1:]] = OpConstant %[[#Int]] 1{{$}} - -define spir_kernel void @foo() { -entry: - %addr = alloca i32 - %r1 = call i8 @_Z20__spirv_SpecConstantia(i32 0, i8 1) - ; The name '%conv17.i' is important for the test case, - ; because it includes i32 0 when encoded for SPIR-V usage. - %conv17.i = sext i8 %r1 to i64 - %tobool = trunc i8 %r1 to i1 - %r2 = zext i1 %tobool to i32 - store i32 %r2, ptr %addr - ret void -} - -declare i8 @_Z20__spirv_SpecConstantia(i32, i8) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/assume.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/assume.ll index 3d2080e0050b7..691325251f11d 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/assume.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/assume.ll @@ -8,14 +8,15 @@ %class.anon = type { i8 } -define spir_func void @_Z3fooi(i32 %x) { +define spir_func i32 @_Z3fooi(i32 %x) { entry: %x.addr = alloca i32, align 4 store i32 %x, i32* %x.addr, align 4 - %0 = load i32, i32* %x.addr, align 4 + %0 = load i32, ptr %x.addr, align 4 %cmp = icmp ne i32 %0, 0 call void @llvm.assume(i1 %cmp) - ret void + %retval = select i1 %cmp, i32 100, i32 10 + ret i32 %retval } declare void @llvm.assume(i1) @@ -45,9 +46,9 @@ entry: call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) store i32 1, i32* %a, align 4 %1 = load i32, i32* %a, align 4 - call spir_func void @_Z3fooi(i32 %1) - %2 = bitcast i32* %a to i8* - call void @llvm.lifetime.end.p0i8(i64 4, i8* %2) + %2 = call spir_func i32 @_Z3fooi(i32 %1) + %3 = bitcast i32* %a to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %3) ret void } diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bitreverse_small_type.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bitreverse_small_type.ll index 438fff6e94f89..18856147896bb 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bitreverse_small_type.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bitreverse_small_type.ll @@ -7,20 +7,20 @@ ; CHECK: OpCapability ArbitraryPrecisionIntegersINTEL ; CHECK: OpExtension "SPV_INTEL_arbitrary_precision_integers" -; CHECK: %[[#I4:]] = OpTypeInt 4 0 -; CHECK: %[[#I2:]] = OpTypeInt 2 0 -; CHECK: %[[#Z4:]] = OpConstantNull %[[#I4]] -; CHECK: %[[#Z2:]] = OpConstantNull %[[#I2]] -; CHECK: %[[#V2I2:]] = OpTypeVector %[[#I2]] 2 -; CHECK: %[[#V2I4:]] = OpTypeVector %[[#I4]] 2 -; CHECK: %[[#V3I2:]] = OpTypeVector %[[#I2]] 3 -; CHECK: %[[#V3I4:]] = OpTypeVector %[[#I4]] 3 -; CHECK: %[[#V4I2:]] = OpTypeVector %[[#I2]] 4 -; CHECK: %[[#V4I4:]] = OpTypeVector %[[#I4]] 4 -; CHECK: %[[#V8I2:]] = OpTypeVector %[[#I2]] 8 -; CHECK: %[[#V8I4:]] = OpTypeVector %[[#I4]] 8 -; CHECK: %[[#V16I2:]] = OpTypeVector %[[#I2]] 16 -; CHECK: %[[#V16I4:]] = OpTypeVector %[[#I4]] 16 +; CHECK-DAG: %[[#I4:]] = OpTypeInt 4 0 +; CHECK-DAG: %[[#I2:]] = OpTypeInt 2 0 +; CHECK-DAG: %[[#Z4:]] = OpConstantNull %[[#I4]] +; CHECK-DAG: %[[#Z2:]] = OpConstantNull %[[#I2]] +; CHECK-DAG: %[[#V2I2:]] = OpTypeVector %[[#I2]] 2 +; CHECK-DAG: %[[#V2I4:]] = OpTypeVector %[[#I4]] 2 +; CHECK-DAG: %[[#V3I2:]] = OpTypeVector %[[#I2]] 3 +; CHECK-DAG: %[[#V3I4:]] = OpTypeVector %[[#I4]] 3 +; CHECK-DAG: %[[#V4I2:]] = OpTypeVector %[[#I2]] 4 +; CHECK-DAG: %[[#V4I4:]] = OpTypeVector %[[#I4]] 4 +; CHECK-DAG: %[[#V8I2:]] = OpTypeVector %[[#I2]] 8 +; CHECK-DAG: %[[#V8I4:]] = OpTypeVector %[[#I4]] 8 +; CHECK-DAG: %[[#V16I2:]] = OpTypeVector %[[#I2]] 16 +; CHECK-DAG: %[[#V16I4:]] = OpTypeVector %[[#I4]] 16 ; CHECK: %[[#]] = OpBitReverse %[[#I2]] %[[#Z2]] @@ -36,45 +36,70 @@ ; CHECK: %[[#]] = OpBitReverse %[[#V16I2]] %[[#]] ; CHECK: %[[#]] = OpBitReverse %[[#V16I4]] %[[#]] +@G_i2_res = global i2 0 +@G_i4_res = global i4 0 +@G_v2i2_res = global <2 x i2> zeroinitializer +@G_v2i4_res = global <2 x i4> zeroinitializer +@G_v3i2_res = global <3 x i2> zeroinitializer +@G_v3i4_res = global <3 x i4> zeroinitializer +@G_v4i2_res = global <4 x i2> zeroinitializer +@G_v4i4_res = global <4 x i4> zeroinitializer +@G_v8i2_res = global <8 x i2> zeroinitializer +@G_v8i4_res = global <8 x i4> zeroinitializer +@G_v16i2_res = global <16 x i2> zeroinitializer +@G_v16i4_res = global <16 x i4> zeroinitializer + define spir_kernel void @testBitRev() { entry: %call2 = call i2 @llvm.bitreverse.i2(i2 0) + store i2 %call2, i2* @G_i2_res %call4 = call i4 @llvm.bitreverse.i4(i4 0) + store i4 %call4, i4* @G_i4_res ret void } define spir_kernel void @testBitRevV2(<2 x i2> %a, <2 x i4> %b) { entry: %call2 = call <2 x i2> @llvm.bitreverse.v2i2(<2 x i2> %a) + store <2 x i2> %call2, <2 x i2>* @G_v2i2_res %call4 = call <2 x i4> @llvm.bitreverse.v2i4(<2 x i4> %b) + store <2 x i4> %call4, <2 x i4>* @G_v2i4_res ret void } define spir_kernel void @testBitRevV3(<3 x i2> %a, <3 x i4> %b) { entry: %call2 = call <3 x i2> @llvm.bitreverse.v3i2(<3 x i2> %a) + store <3 x i2> %call2, <3 x i2>* @G_v3i2_res %call4 = call <3 x i4> @llvm.bitreverse.v3i4(<3 x i4> %b) + store <3 x i4> %call4, <3 x i4>* @G_v3i4_res ret void } define spir_kernel void @testBitRevV4(<4 x i2> %a, <4 x i4> %b) { entry: %call2 = call <4 x i2> @llvm.bitreverse.v4i2(<4 x i2> %a) + store <4 x i2> %call2, <4 x i2>* @G_v4i2_res %call4 = call <4 x i4> @llvm.bitreverse.v4i4(<4 x i4> %b) + store <4 x i4> %call4, <4 x i4>* @G_v4i4_res ret void } define spir_kernel void @testBitRevV8(<8 x i2> %a, <8 x i4> %b) { entry: %call2 = call <8 x i2> @llvm.bitreverse.v8i2(<8 x i2> %a) + store <8 x i2> %call2, <8 x i2>* @G_v8i2_res %call4 = call <8 x i4> @llvm.bitreverse.v8i4(<8 x i4> %b) + store <8 x i4> %call4, <8 x i4>* @G_v8i4_res ret void } define spir_kernel void @testBitRevV16(<16 x i2> %a, <16 x i4> %b) { entry: %call2 = call <16 x i2> @llvm.bitreverse.v16i2(<16 x i2> %a) + store <16 x i2> %call2, <16 x i2>* @G_v16i2_res %call4 = call <16 x i4> @llvm.bitreverse.v16i4(<16 x i4> %b) + store <16 x i4> %call4, <16 x i4>* @G_v16i4_res ret void } diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-arithmetic.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-arithmetic.ll index 11bedfa605f9b..8e8e4df8fabc6 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-arithmetic.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-arithmetic.ll @@ -23,15 +23,28 @@ ; CHECK: OpExtInst %[[#]] %[[#]] fma %[[#]] %[[#]] %[[#]] ; CHECK: OpFRem +@G_r1 = global float 0.0 +@G_r2 = global float 0.0 +@G_r3 = global float 0.0 +@G_r4 = global float 0.0 +@G_r5 = global float 0.0 +@G_r6 = global float 0.0 + ; Function Attrs: norecurse nounwind strictfp define dso_local spir_kernel void @test(float %a, i32 %in, i32 %ui) { entry: %r1 = tail call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict") + store float %r1, ptr @G_r1 %r2 = tail call float @llvm.experimental.constrained.fdiv.f32(float %a, float %a, metadata !"round.towardzero", metadata !"fpexcept.strict") + store float %r2, ptr @G_r2 %r3 = tail call float @llvm.experimental.constrained.fsub.f32(float %a, float %a, metadata !"round.upward", metadata !"fpexcept.strict") + store float %r3, ptr @G_r3 %r4 = tail call float @llvm.experimental.constrained.fmul.f32(float %a, float %a, metadata !"round.downward", metadata !"fpexcept.strict") + store float %r4, ptr @G_r4 %r5 = tail call float @llvm.experimental.constrained.fma.f32(float %a, float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + store float %r5, ptr @G_r5 %r6 = tail call float @llvm.experimental.constrained.frem.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + store float %r6, ptr @G_r6 ret void } diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll index f83cd8ad1969c..375da5b32e232 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll @@ -18,19 +18,20 @@ ; CL: %[[#FooVar:]] = OpVariable ; CL-NEXT: %[[#Casted1:]] = OpBitcast %[[#PtrChar]] %[[#FooVar]] ; CL-NEXT: OpLifetimeStart %[[#Casted1]] 16 -; CL-NEXT: OpBitcast -; CL-NEXT: OpInBoundsPtrAccessChain -; CL-NEXT: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#FooVar]] +; CL: OpInBoundsPtrAccessChain +; CL: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#FooVar]] ; CL-NEXT: OpLifetimeStop %[[#Casted2]] 16 ; VK: OpFunction ; VK: %[[#FooVar:]] = OpVariable ; VK-NEXT: OpInBoundsAccessChain +; VK-NEXT: OpStore ; VK-NEXT: OpReturn define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange) { %RoundedRangeKernel = alloca %tprange, align 8 call void @llvm.lifetime.start.p0(ptr nonnull %RoundedRangeKernel) %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8 + store i64 zeroinitializer, ptr %KernelFunc, align 8 call void @llvm.lifetime.end.p0(ptr nonnull %RoundedRangeKernel) ret void } @@ -39,37 +40,40 @@ define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange) ; CL: %[[#BarVar:]] = OpVariable ; CL-NEXT: %[[#Casted1:]] = OpBitcast %[[#PtrChar]] %[[#BarVar]] ; CL-NEXT: OpLifetimeStart %[[#Casted1]] 16 -; CL-NEXT: OpBitcast -; CL-NEXT: OpInBoundsPtrAccessChain -; CL-NEXT: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#BarVar]] +; CL: OpInBoundsPtrAccessChain +; CL: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#BarVar]] ; CL-NEXT: OpLifetimeStop %[[#Casted2]] 16 ; VK: OpFunction ; VK: %[[#BarVar:]] = OpVariable ; VK-NEXT: OpInBoundsAccessChain +; VK-NEXT: OpStore ; VK-NEXT: OpReturn define spir_func void @bar(ptr noundef byval(%tprange) align 8 %_arg_UserRange) { %RoundedRangeKernel = alloca %tprange, align 8 call void @llvm.lifetime.start.p0(ptr nonnull %RoundedRangeKernel) %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8 + store i64 zeroinitializer, ptr %KernelFunc, align 8 call void @llvm.lifetime.end.p0(ptr nonnull %RoundedRangeKernel) ret void } ; CL: OpFunction ; CL: %[[#TestVar:]] = OpVariable -; CL-NEXT: OpLifetimeStart %[[#TestVar]] 1 -; CL-NEXT: OpInBoundsPtrAccessChain -; CL-NEXT: OpLifetimeStop %[[#TestVar]] 1 +; CL: OpLifetimeStart %[[#TestVar]] 1 +; CL: OpInBoundsPtrAccessChain +; CL: OpLifetimeStop %[[#TestVar]] 1 ; VK: OpFunction ; VK: %[[#Test:]] = OpVariable ; VK-NEXT: OpInBoundsAccessChain +; VK-NEXT: OpStore ; VK-NEXT: OpReturn define spir_func void @test(ptr noundef align 8 %_arg) { %var = alloca i8, align 8 call void @llvm.lifetime.start.p0(ptr nonnull %var) %KernelFunc = getelementptr inbounds i8, ptr %var, i64 1 + store i8 0, ptr %KernelFunc, align 8 call void @llvm.lifetime.end.p0(ptr nonnull %var) ret void } diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/logical-memcpy.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/logical-memcpy.ll new file mode 100644 index 0000000000000..63eddd20bfc22 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/logical-memcpy.ll @@ -0,0 +1,32 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpName %[[dst_var:[0-9]+]] "dst" +; CHECK: OpName %[[src_var:[0-9]+]] "src" + +; CHECK: %[[f32:[0-9]+]] = OpTypeFloat 32 +; CHECK: %[[structS:[0-9]+]] = OpTypeStruct %[[f32]] %[[f32]] %[[f32]] %[[f32]] %[[f32]] +; CHECK: %[[ptr_crosswkgrp_structS:[0-9]+]] = OpTypePointer CrossWorkgroup %[[structS]] +%struct.S = type <{ float, float, float, float, float }> + +; CHECK-DAG: %[[src_var]] = OpVariable %[[ptr_crosswkgrp_structS]] CrossWorkgroup +@src = external dso_local addrspace(1) global %struct.S, align 4 + +; CHECK-DAG: %[[dst_var]] = OpVariable %[[ptr_crosswkgrp_structS]] CrossWorkgroup +@dst = external dso_local addrspace(1) global %struct.S, align 4 + +; CHECK: %[[main_func:[0-9]+]] = OpFunction %{{[0-9]+}} None %{{[0-9]+}} +; CHECK: %[[entry:[0-9]+]] = OpLabel +; Function Attrs: mustprogress nofree noinline norecurse nosync nounwind willreturn memory(readwrite, inaccessiblemem: none, target_mem0: none, target_mem1: none) +define void @main() local_unnamed_addr #0 { +entry: +; CHECK: OpCopyMemory %[[dst_var]] %[[src_var]] Aligned 4 + call void @llvm.memcpy.p0.p0.i64(ptr addrspace(1) align 4 @dst, ptr addrspace(1) align 4 @src, i64 20, i1 false) + ret void +; CHECK: OpReturn +; CHECK: OpFunctionEnd +} + +attributes #0 = { "hlsl.numthreads"="8,1,1" "hlsl.shader"="compute" } + + diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/satur-arith.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/satur-arith.ll index 08f15c077fed9..db930d1b28ec3 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/satur-arith.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/satur-arith.ll @@ -9,29 +9,55 @@ ; CHECK-DAG: OpName %[[#Bar:]] "bar" ; CHECK: %[[#Foo]] = OpFunction ; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] u_add_sat -; CHECK-NEXT: %[[#]] = OpExtInst %[[#]] %[[#]] u_sub_sat -; CHECK-NEXT: %[[#]] = OpExtInst %[[#]] %[[#]] s_add_sat -; CHECK-NEXT: %[[#]] = OpExtInst %[[#]] %[[#]] s_sub_sat +; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] u_sub_sat +; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] s_add_sat +; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] s_sub_sat ; CHECK: %[[#Bar]] = OpFunction ; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] u_add_sat -; CHECK-NEXT: %[[#]] = OpExtInst %[[#]] %[[#]] u_sub_sat -; CHECK-NEXT: %[[#]] = OpExtInst %[[#]] %[[#]] s_add_sat -; CHECK-NEXT: %[[#]] = OpExtInst %[[#]] %[[#]] s_sub_sat +; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] u_sub_sat +; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] s_add_sat +; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] s_sub_sat + +@G_r1_foo = global i16 0 +@G_r2_foo = global i16 0 +@G_r3_foo = global i16 0 +@G_r4_foo = global i16 0 +@G_r1_bar = global <4 x i32> zeroinitializer +@G_r2_bar = global <4 x i32> zeroinitializer +@G_r3_bar = global <4 x i32> zeroinitializer +@G_r4_bar = global <4 x i32> zeroinitializer define spir_func void @foo(i16 %x, i16 %y) { entry: %r1 = tail call i16 @llvm.uadd.sat.i16(i16 %x, i16 %y) + store i16 %r1, ptr @G_r1_foo %r2 = tail call i16 @llvm.usub.sat.i16(i16 %x, i16 %y) + store i16 %r2, ptr @G_r2_foo %r3 = tail call i16 @llvm.sadd.sat.i16(i16 %x, i16 %y) + store i16 %r3, ptr @G_r3_foo %r4 = tail call i16 @llvm.ssub.sat.i16(i16 %x, i16 %y) + store i16 %r4, ptr @G_r4_foo ret void } define spir_func void @bar(<4 x i32> %x, <4 x i32> %y) { entry: %r1 = tail call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y) + store <4 x i32> %r1, ptr @G_r1_bar %r2 = tail call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %y) + store <4 x i32> %r2, ptr @G_r2_bar %r3 = tail call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y) + store <4 x i32> %r3, ptr @G_r3_bar %r4 = tail call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %y) + store <4 x i32> %r4, ptr @G_r4_bar ret void } + +declare i16 @llvm.uadd.sat.i16(i16, i16) +declare i16 @llvm.usub.sat.i16(i16, i16) +declare i16 @llvm.sadd.sat.i16(i16, i16) +declare i16 @llvm.ssub.sat.i16(i16, i16) +declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/uadd.with.overflow.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/uadd.with.overflow.ll index 08e429f36827c..54cb096da8d89 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/uadd.with.overflow.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/uadd.with.overflow.ll @@ -90,12 +90,13 @@ define dso_local spir_func void @umulo_v2i64(<2 x i64> %a, <2 x i64> %b, ptr %p) ; CHECK: OpIAddCarry %[[StructLong]] ; CHECK: OpIAddCarry %[[StructLong]] ; CHECK: OpReturn -define void @foo(i64 %a, i64 %b) { +define i64 @foo(i64 %a, i64 %b) { %r1 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) %r2 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) %d1 = extractvalue { i64, i1 } %r1, 0 %d2 = extractvalue { i64, i1 } %r2, 0 - ret void + %sum = add i64 %d1, %d2 + ret i64 %sum } declare {i8, i1} @llvm.uadd.with.overflow.i8(i8, i8) diff --git a/llvm/test/CodeGen/SPIRV/logical-access-chain.ll b/llvm/test/CodeGen/SPIRV/logical-access-chain.ll index d56678ecfc2c9..e96ebf777c28f 100644 --- a/llvm/test/CodeGen/SPIRV/logical-access-chain.ll +++ b/llvm/test/CodeGen/SPIRV/logical-access-chain.ll @@ -2,6 +2,7 @@ ; CHECK-DAG: [[uint:%[0-9]+]] = OpTypeInt 32 0 ; CHECK-DAG: [[uint2:%[0-9]+]] = OpTypeVector [[uint]] 2 +; CHECK-DAG: [[uint_0:%[0-9]+]] = OpConstant [[uint]] 0 ; CHECK-DAG: [[uint_1:%[0-9]+]] = OpConstant [[uint]] 1 ; CHECK-DAG: [[ptr_uint:%[0-9]+]] = OpTypePointer Function [[uint]] ; CHECK-DAG: [[ptr_uint2:%[0-9]+]] = OpTypePointer Function [[uint2]] @@ -12,7 +13,9 @@ entry: ; CHECK: [[var:%[0-9]+]] = OpVariable [[ptr_uint2]] Function %1 = getelementptr <2 x i32>, ptr %0, i32 0, i32 1 -; CHECK: {{%[0-9]+}} = OpAccessChain [[ptr_uint]] [[var]] [[uint_1]] +; CHECK: [[gep:%[0-9]+]] = OpAccessChain [[ptr_uint]] [[var]] [[uint_1]] + store i32 0, ptr %1 +; CHECK: OpStore [[gep]] [[uint_0]] ret void } diff --git a/llvm/test/CodeGen/SPIRV/logical-struct-access.ll b/llvm/test/CodeGen/SPIRV/logical-struct-access.ll index 66337b1ba2b37..518e011bf0be2 100644 --- a/llvm/test/CodeGen/SPIRV/logical-struct-access.ll +++ b/llvm/test/CodeGen/SPIRV/logical-struct-access.ll @@ -1,5 +1,4 @@ -; RUN: llc -O0 -mtriple=spirv-unknown-vulkan1.3-compute %s -o - -print-after-all | FileCheck %s -; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %} +; RUN: llc -O0 -mtriple=spirv-unknown-vulkan1.3-compute %s -o - | FileCheck %s ; CHECK-DAG: [[uint:%[0-9]+]] = OpTypeInt 32 0 @@ -24,35 +23,85 @@ ; CHECK-DAG: [[ptr_A:%[0-9]+]] = OpTypePointer Function [[A]] ; CHECK-DAG: [[ptr_B:%[0-9]+]] = OpTypePointer Function [[B]] -define void @main() #1 { -entry: - %0 = alloca %B, align 4 -; CHECK: [[tmp:%[0-9]+]] = OpVariable [[ptr_B]] Function - - %1 = getelementptr %B, ptr %0, i32 0, i32 0 +define internal ptr @gep_B_0(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: {{%[0-9]+}} = OpAccessChain [[ptr_A]] [[tmp]] [[uint_0]] - %2 = getelementptr inbounds %B, ptr %0, i32 0, i32 0 + %res = getelementptr %B, ptr %base, i32 0, i32 0 + ret ptr %res +} + +define internal ptr @gep_inbounds_B_0(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: {{%[0-9]+}} = OpInBoundsAccessChain [[ptr_A]] [[tmp]] [[uint_0]] + %res = getelementptr inbounds %B, ptr %base, i32 0, i32 0 + ret ptr %res +} - %3 = getelementptr %B, ptr %0, i32 0, i32 1 +define internal ptr @gep_B_1(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: {{%[0-9]+}} = OpAccessChain [[ptr_uint]] [[tmp]] [[uint_1]] - %4 = getelementptr inbounds %B, ptr %0, i32 0, i32 1 + %res = getelementptr %B, ptr %base, i32 0, i32 1 + ret ptr %res +} + +define internal ptr @gep_inbounds_B_1(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: {{%[0-9]+}} = OpInBoundsAccessChain [[ptr_uint]] [[tmp]] [[uint_1]] + %res = getelementptr inbounds %B, ptr %base, i32 0, i32 1 + ret ptr %res +} - %5 = getelementptr %B, ptr %0, i32 0, i32 2 +define internal ptr @gep_B_2(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: {{%[0-9]+}} = OpAccessChain [[ptr_A]] [[tmp]] [[uint_2]] - %6 = getelementptr inbounds %B, ptr %0, i32 0, i32 2 + %res = getelementptr %B, ptr %base, i32 0, i32 2 + ret ptr %res +} + +define internal ptr @gep_inbounds_B_2(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: {{%[0-9]+}} = OpInBoundsAccessChain [[ptr_A]] [[tmp]] [[uint_2]] + %res = getelementptr inbounds %B, ptr %base, i32 0, i32 2 + ret ptr %res +} - %7 = getelementptr %B, ptr %0, i32 0, i32 2, i32 1 +define internal ptr @gep_B_2_1(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: {{%[0-9]+}} = OpAccessChain [[ptr_uint]] [[tmp]] [[uint_2]] [[uint_1]] - %8 = getelementptr inbounds %B, ptr %0, i32 0, i32 2, i32 1 + %res = getelementptr %B, ptr %base, i32 0, i32 2, i32 1 + ret ptr %res +} + +define internal ptr @gep_inbounds_B_2_1(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: {{%[0-9]+}} = OpInBoundsAccessChain [[ptr_uint]] [[tmp]] [[uint_2]] [[uint_1]] + %res = getelementptr inbounds %B, ptr %base, i32 0, i32 2, i32 1 + ret ptr %res +} - %9 = getelementptr %B, ptr %0, i32 0, i32 2 - %10 = getelementptr %A, ptr %9, i32 0, i32 1 +define internal ptr @gep_B_2_A_1(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: [[x:%[0-9]+]] = OpAccessChain [[ptr_A]] [[tmp]] [[uint_2]] ; CHECK: {{%[0-9]+}} = OpAccessChain [[ptr_uint]] [[x]] [[uint_1]] + %x = getelementptr %B, ptr %base, i32 0, i32 2 + %res = getelementptr %A, ptr %x, i32 0, i32 1 + ret ptr %res +} + +define void @main() #1 { +entry: + %0 = alloca %B, align 4 +; CHECK: [[tmp:%[0-9]+]] = OpVariable [[ptr_B]] Function + + %1 = call ptr @gep_B_0(ptr %0) + %2 = call ptr @gep_inbounds_B_0(ptr %0) + %3 = call ptr @gep_B_1(ptr %0) + %4 = call ptr @gep_inbounds_B_1(ptr %0) + %5 = call ptr @gep_B_2(ptr %0) + %6 = call ptr @gep_inbounds_B_2(ptr %0) + %7 = call ptr @gep_B_2_1(ptr %0) + %8 = call ptr @gep_inbounds_B_2_1(ptr %0) + %10 = call ptr @gep_B_2_A_1(ptr %0) ret void } diff --git a/llvm/test/CodeGen/SPIRV/phi-insert-point.ll b/llvm/test/CodeGen/SPIRV/phi-insert-point.ll index 70d121cdf4b3a..a34186d491257 100644 --- a/llvm/test/CodeGen/SPIRV/phi-insert-point.ll +++ b/llvm/test/CodeGen/SPIRV/phi-insert-point.ll @@ -36,9 +36,18 @@ ok: br label %exit exit: + store i64 %r1, ptr @g1 + store i64 %r2, ptr @g2 + store ptr addrspace(4) %r3, ptr @g3 + store ptr addrspace(4) %r4, ptr @g4 ret void } +@g1 = internal global i64 0 +@g2 = internal global i64 0 +@g3 = internal global ptr addrspace(4) null +@g4 = internal global ptr addrspace(4) null + define spir_kernel void @bar(i64 %arg_val, i64 %arg_val_def, ptr addrspace(4) byval(%struct) %arg_ptr, ptr addrspace(4) %arg_ptr_def) { entry: %fl = icmp eq i64 %arg_val, 0 @@ -55,5 +64,9 @@ ok: br label %exit exit: + store i64 %r1, ptr @g1 + store i64 %r2, ptr @g2 + store ptr addrspace(4) %r3, ptr @g3 + store ptr addrspace(4) %r4, ptr @g4 ret void } diff --git a/llvm/test/CodeGen/SPIRV/phi-ptrcast-dominate.ll b/llvm/test/CodeGen/SPIRV/phi-ptrcast-dominate.ll index bc090ce55fbec..c250ebae12746 100644 --- a/llvm/test/CodeGen/SPIRV/phi-ptrcast-dominate.ll +++ b/llvm/test/CodeGen/SPIRV/phi-ptrcast-dominate.ll @@ -20,11 +20,14 @@ ; CHECK: %[[#Case1]] = OpFunction define spir_func void @case1(i1 %b1, i1 %b2, i1 %b3) { entry: + %tmp.1 = alloca i8, align 1 ; CHECK: OpBranchConditional %[[#]] %[[#l1:]] %[[#l2:]] br i1 %b1, label %l1, label %l2 l1: %str = phi ptr addrspace(1) [ @.str.1, %entry ], [ @.str.2, %l2 ], [ @.str.2, %l3 ] + %v1 = load i8, ptr addrspace(1) %str, align 1 + store i8 %v1, ptr %tmp.1, align 1 br label %exit ; CHECK: %[[#l2]] = OpLabel @@ -51,11 +54,14 @@ exit: ; CHECK: %[[#Case2]] = OpFunction define spir_func void @case2(i1 %b1, i1 %b2, i1 %b3, ptr addrspace(1) byval(%struct1) %str1, ptr addrspace(1) byval(%struct2) %str2) { entry: + %tmp.2 = alloca i8, align 1 ; CHECK: OpBranchConditional %[[#]] %[[#l1:]] %[[#l2:]] br i1 %b1, label %l1, label %l2 l1: %str = phi ptr addrspace(1) [ %str1, %entry ], [ %str2, %l2 ], [ %str2, %l3 ] + %v2 = load i8, ptr addrspace(1) %str, align 1 + store i8 %v2, ptr %tmp.2, align 1 br label %exit ; CHECK: %[[#l2]] = OpLabel @@ -83,10 +89,13 @@ define spir_func void @case3(i1 %b1, i1 %b2, i1 %b3, ptr addrspace(1) byval(%str ; CHECK: OpBranchConditional %[[#]] %[[#l1:]] %[[#l2:]] entry: + %tmp.3 = alloca i8, align 1 br i1 %b1, label %l1, label %l2 l1: %str = phi ptr addrspace(1) [ %_arg_str1, %entry ], [ %str2, %l2 ], [ %str3, %l3 ] + %v3 = load i8, ptr addrspace(1) %str, align 1 + store i8 %v3, ptr %tmp.3, align 1 br label %exit ; CHECK: %[[#l2]] = OpLabel diff --git a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-accesschain.ll b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-accesschain.ll index 7db1eed84bf7d..3382987bbd581 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-accesschain.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-accesschain.ll @@ -26,9 +26,13 @@ %struct.S = type { i32 } %struct.__wrapper_class = type { [7 x %struct.S] } +@G_elem = global ptr null +@G_data = global i64 0 + define spir_kernel void @foo1(ptr noundef byval(%struct.__wrapper_class) align 4 %_arg_Arr) { entry: %elem = getelementptr inbounds i8, ptr %_arg_Arr, i64 0 + store ptr %elem, ptr @G_elem ret void } @@ -36,5 +40,6 @@ define spir_kernel void @foo2(ptr noundef byval(%struct.__wrapper_class) align 4 entry: %elem = getelementptr inbounds %struct.__wrapper_class, ptr %_arg_Arr, i64 0 %data = load i64, ptr %elem + store i64 %data, ptr @G_data ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll index d6a0071167cef..ed5652a750582 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll @@ -14,8 +14,11 @@ %struct.S = type { i32 } %struct.__wrapper_class = type { [7 x %struct.S] } +@G = global i32 0 + define spir_kernel void @foo(ptr noundef byval(%struct.__wrapper_class) align 4 %_arg_Arr) { entry: %val = load i32, ptr %_arg_Arr + store i32 %val, ptr @G ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/gep-types-1.ll b/llvm/test/CodeGen/SPIRV/pointers/gep-types-1.ll index 0e2730e18bf38..e47aa61a8acd7 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/gep-types-1.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/gep-types-1.ll @@ -30,6 +30,8 @@ %"class.std::complex" = type { { double, double } } %class.anon = type { i32, ptr addrspace(4), [2 x [2 x %"class.std::complex"]] } +@G = global ptr addrspace(4) null + define weak_odr dso_local spir_kernel void @foo(i32 noundef %_arg_N, ptr addrspace(1) noundef align 8 %_arg_p) { entry: %Kernel = alloca %class.anon, align 8 @@ -38,5 +40,6 @@ entry: %r0 = addrspacecast ptr addrspace(1) %_arg_p to ptr addrspace(4) store ptr addrspace(4) %r0, ptr %p, align 8 %r3 = load ptr addrspace(4), ptr %p, align 8 + store ptr addrspace(4) %r3, ptr @G ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll index 7a09ac973b590..0e397ec51caaa 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll @@ -7,9 +7,14 @@ ; CHECK: %[[#]] = OpInBoundsPtrAccessChain %[[#PTR1]] %[[#]] %[[#]] ; CHECK: %[[#]] = OpInBoundsPtrAccessChain %[[#PTR2]] %[[#]] %[[#]] +@G_c = global ptr addrspace(1) null +@G_d = global ptr addrspace(2) null + define spir_kernel void @foo(ptr addrspace(1) %a, ptr addrspace(2) %b) { entry: %c = getelementptr inbounds i8, ptr addrspace(1) %a, i32 1 + store ptr addrspace(1) %c, ptr @G_c %d = getelementptr inbounds i8, ptr addrspace(2) %b, i32 2 + store ptr addrspace(2) %d, ptr @G_d ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll index c822dbc5d6c0e..e12a809125248 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll @@ -7,9 +7,12 @@ ; CHECK: %[[#GEP:]] = OpInBoundsPtrAccessChain %[[#PTR]] %[[#ARG]] %[[#]] ; CHECK: %[[#]] = OpLoad %[[#FLOAT32]] %[[#GEP]] Aligned 4 +@G = global float 0.0 + define spir_kernel void @test1(ptr addrspace(1) %arg1) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3 !kernel_arg_type_qual !4 { %a = getelementptr inbounds float, ptr addrspace(1) %arg1, i64 1 %b = load float, ptr addrspace(1) %a, align 4 + store float %b, ptr @G ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-bitcast-load.ll b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-bitcast-load.ll index 1d846a35a65aa..859253e5b18d9 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-bitcast-load.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-bitcast-load.ll @@ -7,6 +7,9 @@ ; CHECK-DAG: %[[#PTR_VEC3:]] = OpTypePointer CrossWorkgroup %[[#VEC3]] ; CHECK-DAG: %[[#PTR_VEC4:]] = OpTypePointer CrossWorkgroup %[[#VEC4]] +@G_loadv1 = global <4 x i8> zeroinitializer +@G_loadv2 = global <4 x i8> zeroinitializer + ; CHECK: %[[#AC1:]] = OpInBoundsPtrAccessChain %[[#PTR_VEC3]] %[[#]] %[[#]] ; CHECK: %[[#BC1:]] = OpBitcast %[[#PTR_VEC4]] %[[#AC1]] ; CHECK: %[[#LD1:]] = OpLoad %[[#VEC4]] %[[#BC1]] Aligned 4 @@ -15,6 +18,7 @@ define spir_kernel void @foo(ptr addrspace(1) %a, i64 %b) { %index = getelementptr inbounds <3 x i8>, ptr addrspace(1) %a, i64 %b %loadv = load <4 x i8>, ptr addrspace(1) %index, align 4 + store <4 x i8> %loadv, ptr @G_loadv1 ret void } @@ -29,5 +33,6 @@ define spir_kernel void @bar(ptr addrspace(1) %a, i64 %b) { ; from older LLVM IR with typed pointers. %cast = bitcast ptr addrspace(1) %index to ptr addrspace(1) %loadv = load <4 x i8>, ptr addrspace(1) %cast, align 4 + store <4 x i8> %loadv, ptr @G_loadv2 ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-kernel-arg-char.ll b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-kernel-arg-char.ll index a5e891dae6f11..3ae03edf5200f 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-kernel-arg-char.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-kernel-arg-char.ll @@ -7,11 +7,15 @@ ; CHECK-DAG: %[[#PTRINT8:]] = OpTypePointer Workgroup %[[#INT8]] ; CHECK-DAG: %[[#CONST:]] = OpConstant %[[#INT64]] 1 +@G_gep1 = global ptr addrspace(3) null +@G_gep2 = global ptr addrspace(3) null + ; CHECK: %[[#PARAM1:]] = OpFunctionParameter %[[#PTRINT8]] define spir_kernel void @test1(ptr addrspace(3) %address) { ; CHECK: %[[#]] = OpInBoundsPtrAccessChain %[[#PTRINT8]] %[[#PARAM1]] %[[#CONST]] %cast = bitcast ptr addrspace(3) %address to ptr addrspace(3) %gep = getelementptr inbounds i8, ptr addrspace(3) %cast, i64 1 + store ptr addrspace(3) %gep, ptr @G_gep1 ret void } @@ -19,5 +23,6 @@ define spir_kernel void @test1(ptr addrspace(3) %address) { define spir_kernel void @test2(ptr addrspace(3) %address) { ; CHECK: %[[#]] = OpInBoundsPtrAccessChain %[[#PTRINT8]] %[[#PARAM2]] %[[#CONST]] %gep = getelementptr inbounds i8, ptr addrspace(3) %address, i64 1 + store ptr addrspace(3) %gep, ptr @G_gep2 ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/global-addrspacecast.ll b/llvm/test/CodeGen/SPIRV/pointers/global-addrspacecast.ll index 19451d23c6830..39563aecafec4 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/global-addrspacecast.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/global-addrspacecast.ll @@ -7,13 +7,16 @@ ; CHECK-DAG: %[[#value:]] = OpConstant %[[#type]] 456 ; CHECK-DAG: %[[#var:]] = OpVariable %[[#ptrty]] Private %[[#value]] +@G = internal global i32 0 + define hidden spir_func void @Foo() { %p = addrspacecast ptr addrspace(10) @PrivInternal to ptr %v = load i32, ptr %p, align 4 + store i32 %v, ptr @G ret void ; CHECK: OpLabel -; CHECK-NEXT: OpLoad %[[#type]] %[[#var]] Aligned 4 -; CHECK-Next: OpReturn +; CHECK: OpLoad %[[#type]] %[[#var]] Aligned 4 +; CHECK: OpReturn } define void @main() #1 { diff --git a/llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll b/llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll index b3c68d22f9bdd..681fb70ad706d 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll @@ -9,9 +9,14 @@ ; CHECK: %[[#]] = OpLoad %[[#INT8]] %[[#FNP1]] Aligned 1 ; CHECK: %[[#]] = OpLoad %[[#INT8]] %[[#FNP2]] Aligned 1 +@G_c = global i8 0 +@G_d = global i8 0 + define spir_kernel void @foo(ptr addrspace(1) %a, ptr addrspace(2) %b) { entry: %c = load i8, ptr addrspace(1) %a + store i8 %c, ptr @G_c %d = load i8, ptr addrspace(2) %b + store i8 %d, ptr @G_d ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/phi-chain-types.ll b/llvm/test/CodeGen/SPIRV/pointers/phi-chain-types.ll index a9e79df259c4f..44134f83cfec3 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/phi-chain-types.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/phi-chain-types.ll @@ -51,6 +51,7 @@ l1: l2: %val2 = phi ptr addrspace(4) [ %p, %l1 ], [ %val3, %l3 ] %val1 = phi ptr addrspace(4) [ addrspacecast (ptr addrspace(3) @G1 to ptr addrspace(4)), %l1 ], [ %val2, %l3 ] + store i16 0, ptr addrspace(4) %val1, align 2 br i1 %f2, label %l3, label %exit l3: @@ -75,6 +76,7 @@ l1: l2: %val1 = phi ptr addrspace(4) [ addrspacecast (ptr addrspace(3) @G1 to ptr addrspace(4)), %l1 ], [ %val2, %l3 ] %val2 = phi ptr addrspace(4) [ %p, %l1 ], [ %val3, %l3 ] + store i16 0, ptr addrspace(4) %val1, align 2 br i1 %f2, label %l3, label %exit exit: diff --git a/llvm/test/CodeGen/SPIRV/pointers/pointer-addrspacecast.ll b/llvm/test/CodeGen/SPIRV/pointers/pointer-addrspacecast.ll index 4d5549dfab8d9..123daa411810b 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/pointer-addrspacecast.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/pointer-addrspacecast.ll @@ -10,6 +10,7 @@ ; CHECK-DAG: OpName %[[#func_chain:]] "chain" @global = internal addrspace(10) global i32 zeroinitializer +@G = global i32 0 define void @simple() { ; CHECK: %[[#func_simple]] = OpFunction @@ -17,6 +18,7 @@ entry: %ptr = getelementptr i32, ptr addrspace(10) @global, i32 0 %casted = addrspacecast ptr addrspace(10) %ptr to ptr %val = load i32, ptr %casted + store i32 %val, ptr @G ; CHECK: %{{.*}} = OpLoad %[[#uint]] %[[#var]] Aligned 4 ret void } @@ -31,6 +33,7 @@ entry: %e = addrspacecast ptr addrspace(10) %d to ptr %val = load i32, ptr %e + store i32 %val, ptr @G ; CHECK: %{{.*}} = OpLoad %[[#uint]] %[[#var]] Aligned 4 ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/ptr-eq-types.ll b/llvm/test/CodeGen/SPIRV/pointers/ptr-eq-types.ll index 876cd3c20cf35..80ee36cfe15d2 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/ptr-eq-types.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/ptr-eq-types.ll @@ -15,6 +15,9 @@ ; CHECK: OpGenericCastToPtr ; CHECK: OpPtrEqual +@G_b1 = global i1 0 +@G_b2 = global i1 0 + define spir_kernel void @foo(ptr addrspace(3) align 4 %_arg_local, ptr addrspace(1) align 4 %_arg_global) { entry: %p1 = getelementptr inbounds i32, ptr addrspace(1) %_arg_global, i64 0 @@ -24,9 +27,12 @@ entry: %p4 = addrspacecast ptr addrspace(1) %p3 to ptr addrspace(4) %p5 = tail call spir_func ptr addrspace(3) @_Z40__spirv_GenericCastToPtrExplicit_ToLocalPvi(ptr addrspace(4) %p4, i32 4) %b1 = icmp eq ptr addrspace(3) %p5, null + store i1 %b1, ptr @G_b1 %p6 = getelementptr inbounds i32, ptr addrspace(3) %p5, i64 0 %p7 = tail call spir_func ptr addrspace(3) @_Z40__spirv_GenericCastToPtrExplicit_ToLocalPvi(ptr addrspace(4) %p4, i32 4) %b2 = icmp eq ptr addrspace(3) %p7, null + store i1 %b2, ptr @G_b2 + store ptr addrspace(3) %p6, ptr addrspace(3) %p2 ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/resource-vector-load-store.ll b/llvm/test/CodeGen/SPIRV/pointers/resource-vector-load-store.ll index 7548f4757dbe6..6fc03a386d14d 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/resource-vector-load-store.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/resource-vector-load-store.ll @@ -4,18 +4,23 @@ @.str = private unnamed_addr constant [7 x i8] c"buffer\00", align 1 +; The i64 values in the extracts will be turned +; into immidiate values. There should be no 64-bit +; integers in the module. +; CHECK-NOT: OpTypeInt 64 0 + define void @main() "hlsl.shader"="pixel" { -; CHECK: %24 = OpFunction %2 None %3 ; -- Begin function main -; CHECK-NEXT: %1 = OpLabel -; CHECK-NEXT: %25 = OpVariable %13 Function %22 -; CHECK-NEXT: %26 = OpLoad %7 %23 -; CHECK-NEXT: %27 = OpImageRead %5 %26 %15 -; CHECK-NEXT: %28 = OpCompositeExtract %4 %27 0 -; CHECK-NEXT: %29 = OpCompositeExtract %4 %27 1 -; CHECK-NEXT: %30 = OpFAdd %4 %29 %28 -; CHECK-NEXT: %31 = OpCompositeInsert %5 %30 %27 0 -; CHECK-NEXT: %32 = OpLoad %7 %23 -; CHECK-NEXT: OpImageWrite %32 %15 %31 +; CHECK: %[[FUNC:[0-9]+]] = OpFunction %[[VOID:[0-9]+]] None %[[FNTYPE:[0-9]+]] ; -- Begin function main +; CHECK-NEXT: %[[LABEL:[0-9]+]] = OpLabel +; CHECK-NEXT: %[[VAR:[0-9]+]] = OpVariable %[[PTR_FN:[a-zA-Z0-9_]+]] Function %[[INIT:[a-zA-Z0-9_]+]] +; CHECK-NEXT: %[[LOAD1:[0-9]+]] = OpLoad %[[IMG_TYPE:[a-zA-Z0-9_]+]] %[[IMG_VAR:[a-zA-Z0-9_]+]] +; CHECK-NEXT: %[[READ:[0-9]+]] = OpImageRead %[[VEC4:[a-zA-Z0-9_]+]] %[[LOAD1]] %[[COORD:[a-zA-Z0-9_]+]] +; CHECK-NEXT: %[[EXTRACT1:[0-9]+]] = OpCompositeExtract %[[FLOAT:[a-zA-Z0-9_]+]] %[[READ]] 0 +; CHECK-NEXT: %[[EXTRACT2:[0-9]+]] = OpCompositeExtract %[[FLOAT]] %[[READ]] 1 +; CHECK-NEXT: %[[ADD:[0-9]+]] = OpFAdd %[[FLOAT]] %[[EXTRACT2]] %[[EXTRACT1]] +; CHECK-NEXT: %[[INSERT:[0-9]+]] = OpCompositeInsert %[[VEC4]] %[[ADD]] %[[READ]] 0 +; CHECK-NEXT: %[[LOAD2:[0-9]+]] = OpLoad %[[IMG_TYPE]] %[[IMG_VAR]] +; CHECK-NEXT: OpImageWrite %[[LOAD2]] %[[COORD]] %[[INSERT]] ; CHECK-NEXT: OpReturn ; CHECK-NEXT: OpFunctionEnd entry: diff --git a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-call-no-bitcast.ll b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-call-no-bitcast.ll index 101116f437811..7409b3db51948 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-call-no-bitcast.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-call-no-bitcast.ll @@ -34,6 +34,8 @@ %class.CustomType = type { i64 } +@G = global ptr addrspace(4) null + define linkonce_odr dso_local spir_func void @bar(ptr addrspace(4) noundef %first) { entry: %first.addr = alloca ptr addrspace(4) @@ -44,6 +46,7 @@ entry: call spir_func void @foo(i64 noundef 100, ptr addrspace(4) noundef dereferenceable(8) %first.addr.ascast, ptr addrspace(4) noundef dereferenceable(8) %temp.ascast) call spir_func void @foo(i64 noundef 100, ptr addrspace(4) noundef dereferenceable(8) %temp.ascast, ptr addrspace(4) noundef dereferenceable(8) %first.addr.ascast) %var = alloca ptr addrspace(4), align 8 + store ptr addrspace(4) null, ptr %var ret void } diff --git a/llvm/test/CodeGen/SPIRV/remove-dead-type-intrinsics.ll b/llvm/test/CodeGen/SPIRV/remove-dead-type-intrinsics.ll new file mode 100644 index 0000000000000..6bd640f813142 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/remove-dead-type-intrinsics.ll @@ -0,0 +1,31 @@ +; RUN: llc -O0 -mtriple=spirv-unknown-vulkan1.3-compute %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %} + +%A = type { + i32, + i32 +} + +%B = type { + %A, + i32, + %A +} + +; Make sure all struct types are removed. +; CHECK-NOT: OpTypeStruct + +; Make sure the GEPs and the function scope variable are removed. +; CHECK: OpFunction +; CHECK-NEXT: OpLabel +; CHECK-NEXT: OpReturn +; CHECK-NEXT: OpFunctionEnd +define void @main() #1 { +entry: + %0 = alloca %B, align 4 + %1 = getelementptr %B, ptr %0, i32 0, i32 2 + %2 = getelementptr %A, ptr %1, i32 0, i32 1 + ret void +} + +attributes #1 = { "hlsl.numthreads"="4,8,16" "hlsl.shader"="compute" } diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse-subbyte.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse-subbyte.ll index 481bad9a26b7b..280f586891717 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse-subbyte.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse-subbyte.ll @@ -19,10 +19,15 @@ ; TODO: Add a check to ensure that there's no behavior change of bitreverse operation ; between the LLVM-IR and SPIR-V for i2 and i4 +@G_res2 = global i2 0 +@G_res4 = global i4 0 + define spir_func void @foo(i2 %a, i4 %b) { entry: %res2 = tail call i2 @llvm.bitreverse.i2(i2 %a) + store i2 %res2, ptr @G_res2 %res4 = tail call i4 @llvm.bitreverse.i4(i4 %b) + store i4 %res4, ptr @G_res4 ret void } diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpGenericCastToPtr.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpGenericCastToPtr.ll index 119dbe14446c1..68f33510b6a8d 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/OpGenericCastToPtr.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/OpGenericCastToPtr.ll @@ -45,6 +45,12 @@ entry: %GE = call spir_func ptr addrspace(1) @_Z41__spirv_GenericCastToPtrExplicit_ToGlobalPvi(ptr addrspace(4) %var1, i32 5) %LE = call spir_func ptr addrspace(3) @_Z40__spirv_GenericCastToPtrExplicit_ToLocalPvi(ptr addrspace(4) %var2, i32 4) %PE = call spir_func ptr @_Z42__spirv_GenericCastToPtrExplicit_ToPrivatePvi(ptr addrspace(4) %var3, i32 7) + store i32 0, ptr addrspace(1) %G, align 4 + store i8 0, ptr addrspace(3) %L, align 1 + store i32 0, ptr %P, align 4 + store i32 0, ptr addrspace(1) %GE, align 4 + store i8 0, ptr addrspace(3) %LE, align 1 + store i32 0, ptr %PE, align 4 ret void } @@ -70,6 +76,9 @@ entry: %G = call spir_func ptr addrspace(1) @_Z9to_globalPv(ptr addrspace(4) %var1) %L = call spir_func ptr addrspace(3) @_Z8to_localPv(ptr addrspace(4) %var2) %P = call spir_func ptr @_Z10to_privatePv(ptr addrspace(4) %var3) + store i32 0, ptr addrspace(1) %G, align 4 + store i8 0, ptr addrspace(3) %L, align 1 + store i32 0, ptr %P, align 4 ret void } @@ -114,6 +123,12 @@ entry: %GE = call spir_func ptr addrspace(1) @__spirv_GenericCastToPtrExplicit_ToGlobal(ptr addrspace(4) %var1, i32 5) %LE = call spir_func ptr addrspace(3) @__spirv_GenericCastToPtrExplicit_ToLocal(ptr addrspace(4) %var2, i32 4) %PE = call spir_func ptr @__spirv_GenericCastToPtrExplicit_ToPrivate(ptr addrspace(4) %var3, i32 7) + store i32 0, ptr addrspace(1) %G, align 4 + store i8 0, ptr addrspace(3) %L, align 1 + store i32 0, ptr %P, align 4 + store i32 0, ptr addrspace(1) %GE, align 4 + store i8 0, ptr addrspace(3) %LE, align 1 + store i32 0, ptr %PE, align 4 ret void } @@ -139,6 +154,9 @@ entry: %G = call spir_func ptr addrspace(1) @to_global(ptr addrspace(4) %var1) %L = call spir_func ptr addrspace(3) @to_local(ptr addrspace(4) %var2) %P = call spir_func ptr @to_private(ptr addrspace(4) %var3) + store i32 0, ptr addrspace(1) %G, align 4 + store i8 0, ptr addrspace(3) %L, align 1 + store i32 0, ptr %P, align 4 ret void } diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpPtrCastToGeneric.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpPtrCastToGeneric.ll index 818243ab19e41..9f08a65c16866 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/OpPtrCastToGeneric.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/OpPtrCastToGeneric.ll @@ -16,9 +16,13 @@ ; CHECK-SPIRV: OpGenericCastToPtr %[[#LocalCharPtr]] %[[#Ptr2]] ; CHECK-SPIRV: OpFunctionEnd +@G_p = global ptr addrspace(3) null +@G_p2 = global ptr addrspace(3) null + define spir_kernel void @foo(ptr addrspace(1) %arg) { entry: %p = addrspacecast ptr addrspace(1) %arg to ptr addrspace(3) + store ptr addrspace(3) %p, ptr @G_p ret void } @@ -26,5 +30,6 @@ define spir_kernel void @bar(ptr addrspace(1) %arg) { entry: %p1 = addrspacecast ptr addrspace(1) %arg to ptr addrspace(4) %p2 = addrspacecast ptr addrspace(4) %p1 to ptr addrspace(3) + store ptr addrspace(3) %p2, ptr @G_p2 ret void } diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll b/llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll index 46eaba9d5ceb1..c752e278927a9 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll @@ -184,6 +184,8 @@ ; CHECK-SPIRV: %[[#r89]] = OpUnordered %[[#bool]] ; CHECK-SPIRV: %[[#r90]] = OpUnordered %[[#bool]] +@G = global [90 x i1] zeroinitializer + define spir_kernel void @testFCmp(float %a, float %b) local_unnamed_addr { entry: %r1 = fcmp oeq float %a, %b @@ -276,5 +278,185 @@ entry: %r88 = fcmp uno float %a, %b %r89 = fcmp ninf uno float %a, %b %r90 = fcmp nsz uno float %a, %b + %p1 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 0 + store i1 %r1, ptr %p1 + %p2 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 1 + store i1 %r2, ptr %p2 + %p3 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 2 + store i1 %r3, ptr %p3 + %p4 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 3 + store i1 %r4, ptr %p4 + %p5 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 4 + store i1 %r5, ptr %p5 + %p6 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 5 + store i1 %r6, ptr %p6 + %p7 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 6 + store i1 %r7, ptr %p7 + %p8 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 7 + store i1 %r8, ptr %p8 + %p9 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 8 + store i1 %r9, ptr %p9 + %p10 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 9 + store i1 %r10, ptr %p10 + %p11 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 10 + store i1 %r11, ptr %p11 + %p12 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 11 + store i1 %r12, ptr %p12 + %p13 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 12 + store i1 %r13, ptr %p13 + %p14 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 13 + store i1 %r14, ptr %p14 + %p15 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 14 + store i1 %r15, ptr %p15 + %p16 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 15 + store i1 %r16, ptr %p16 + %p17 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 16 + store i1 %r17, ptr %p17 + %p18 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 17 + store i1 %r18, ptr %p18 + %p19 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 18 + store i1 %r19, ptr %p19 + %p20 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 19 + store i1 %r20, ptr %p20 + %p21 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 20 + store i1 %r21, ptr %p21 + %p22 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 21 + store i1 %r22, ptr %p22 + %p23 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 22 + store i1 %r23, ptr %p23 + %p24 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 23 + store i1 %r24, ptr %p24 + %p25 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 24 + store i1 %r25, ptr %p25 + %p26 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 25 + store i1 %r26, ptr %p26 + %p27 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 26 + store i1 %r27, ptr %p27 + %p28 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 27 + store i1 %r28, ptr %p28 + %p29 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 28 + store i1 %r29, ptr %p29 + %p30 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 29 + store i1 %r30, ptr %p30 + %p31 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 30 + store i1 %r31, ptr %p31 + %p32 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 31 + store i1 %r32, ptr %p32 + %p33 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 32 + store i1 %r33, ptr %p33 + %p34 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 33 + store i1 %r34, ptr %p34 + %p35 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 34 + store i1 %r35, ptr %p35 + %p36 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 35 + store i1 %r36, ptr %p36 + %p37 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 36 + store i1 %r37, ptr %p37 + %p38 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 37 + store i1 %r38, ptr %p38 + %p39 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 38 + store i1 %r39, ptr %p39 + %p40 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 39 + store i1 %r40, ptr %p40 + %p41 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 40 + store i1 %r41, ptr %p41 + %p42 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 41 + store i1 %r42, ptr %p42 + %p43 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 42 + store i1 %r43, ptr %p43 + %p44 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 43 + store i1 %r44, ptr %p44 + %p45 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 44 + store i1 %r45, ptr %p45 + %p46 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 45 + store i1 %r46, ptr %p46 + %p47 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 46 + store i1 %r47, ptr %p47 + %p48 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 47 + store i1 %r48, ptr %p48 + %p49 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 48 + store i1 %r49, ptr %p49 + %p50 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 49 + store i1 %r50, ptr %p50 + %p51 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 50 + store i1 %r51, ptr %p51 + %p52 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 51 + store i1 %r52, ptr %p52 + %p53 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 52 + store i1 %r53, ptr %p53 + %p54 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 53 + store i1 %r54, ptr %p54 + %p55 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 54 + store i1 %r55, ptr %p55 + %p56 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 55 + store i1 %r56, ptr %p56 + %p57 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 56 + store i1 %r57, ptr %p57 + %p58 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 57 + store i1 %r58, ptr %p58 + %p59 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 58 + store i1 %r59, ptr %p59 + %p60 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 59 + store i1 %r60, ptr %p60 + %p61 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 60 + store i1 %r61, ptr %p61 + %p62 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 61 + store i1 %r62, ptr %p62 + %p63 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 62 + store i1 %r63, ptr %p63 + %p64 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 63 + store i1 %r64, ptr %p64 + %p65 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 64 + store i1 %r65, ptr %p65 + %p66 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 65 + store i1 %r66, ptr %p66 + %p67 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 66 + store i1 %r67, ptr %p67 + %p68 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 67 + store i1 %r68, ptr %p68 + %p69 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 68 + store i1 %r69, ptr %p69 + %p70 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 69 + store i1 %r70, ptr %p70 + %p71 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 70 + store i1 %r71, ptr %p71 + %p72 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 71 + store i1 %r72, ptr %p72 + %p73 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 72 + store i1 %r73, ptr %p73 + %p74 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 73 + store i1 %r74, ptr %p74 + %p75 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 74 + store i1 %r75, ptr %p75 + %p76 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 75 + store i1 %r76, ptr %p76 + %p77 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 76 + store i1 %r77, ptr %p77 + %p78 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 77 + store i1 %r78, ptr %p78 + %p79 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 78 + store i1 %r79, ptr %p79 + %p80 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 79 + store i1 %r80, ptr %p80 + %p81 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 80 + store i1 %r81, ptr %p81 + %p82 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 81 + store i1 %r82, ptr %p82 + %p83 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 82 + store i1 %r83, ptr %p83 + %p84 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 83 + store i1 %r84, ptr %p84 + %p85 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 84 + store i1 %r85, ptr %p85 + %p86 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 85 + store i1 %r86, ptr %p86 + %p87 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 86 + store i1 %r87, ptr %p87 + %p88 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 87 + store i1 %r88, ptr %p88 + %p89 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 88 + store i1 %r89, ptr %p89 + %p90 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 89 + store i1 %r90, ptr %p90 ret void } diff --git a/llvm/test/CodeGen/SPIRV/transcoding/spirv-event-null.ll b/llvm/test/CodeGen/SPIRV/transcoding/spirv-event-null.ll index c8691c32710ad..7658362773218 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/spirv-event-null.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/spirv-event-null.ll @@ -31,9 +31,12 @@ %StructEvent = type { target("spirv.Event") } +@G_r = global target("spirv.Event") poison + define spir_kernel void @test_half(ptr addrspace(3) %_arg1, ptr addrspace(1) %_arg2) { entry: %r = tail call spir_func target("spirv.Event") @_Z22__spirv_GroupAsyncCopyjPU3AS3Dv2_DF16_PU3AS1KS_mm9ocl_event(i32 2, ptr addrspace(3) %_arg1, ptr addrspace(1) %_arg2, i64 16, i64 10, target("spirv.Event") zeroinitializer) + store target("spirv.Event") %r, ptr @G_r ret void } @@ -42,7 +45,6 @@ declare dso_local spir_func target("spirv.Event") @_Z22__spirv_GroupAsyncCopyjPU ; CHECK: OpFunction ; CHECK: OpFunctionParameter ; CHECK: %[[#Src:]] = OpFunctionParameter -; CHECK: OpVariable %[[#TyStructPtr]] Function ; CHECK: %[[#EventVar:]] = OpVariable %[[#TyEventPtr]] Function ; CHECK: %[[#Dest:]] = OpInBoundsPtrAccessChain ; CHECK: %[[#CopyRes:]] = OpGroupAsyncCopy %[[#TyEvent]] %[[#]] %[[#Dest]] %[[#Src]] %[[#]] %[[#]] %[[#ConstEvent]] diff --git a/llvm/test/CodeGen/SPIRV/uitofp-with-bool.ll b/llvm/test/CodeGen/SPIRV/uitofp-with-bool.ll index 46668645f418b..9c8b4070d834d 100644 --- a/llvm/test/CodeGen/SPIRV/uitofp-with-bool.ll +++ b/llvm/test/CodeGen/SPIRV/uitofp-with-bool.ll @@ -68,6 +68,27 @@ ; SPV-DAG: %[[#ones_64:]] = OpConstantComposite %[[#vec_64]] %[[#one_64]] %[[#one_64]] ; SPV-DAG: %[[#pointer:]] = OpTypePointer CrossWorkgroup %[[#float]] +@G_s1 = global i8 0 +@G_s2 = global i16 0 +@G_s3 = global i32 0 +@G_s4 = global i64 0 +@G_s5 = global <2 x i8> zeroinitializer +@G_s6 = global <2 x i16> zeroinitializer +@G_s7 = global <2 x i32> zeroinitializer +@G_s8 = global <2 x i64> zeroinitializer +@G_z1 = global i8 0 +@G_z2 = global i16 0 +@G_z3 = global i32 0 +@G_z4 = global i64 0 +@G_z5 = global <2 x i8> zeroinitializer +@G_z6 = global <2 x i16> zeroinitializer +@G_z7 = global <2 x i32> zeroinitializer +@G_z8 = global <2 x i64> zeroinitializer +@G_ufp1 = global float 0.0 +@G_ufp2 = global <2 x float> zeroinitializer +@G_sfp1 = global float 0.0 +@G_sfp2 = global <2 x float> zeroinitializer + ; SPV-DAG: OpFunction ; SPV-DAG: %[[#A:]] = OpFunctionParameter %[[#pointer]] ; SPV-DAG: %[[#B:]] = OpFunctionParameter %[[#]] @@ -87,47 +108,67 @@ entry: ; SPV-DAG: %[[#s1]] = OpSelect %[[#int_8]] %[[#i1s]] %[[#mone_8]] %[[#zero_8]] %s1 = sext i1 %i1s to i8 + store i8 %s1, ptr @G_s1 ; SPV-DAG: %[[#s2]] = OpSelect %[[#int_16]] %[[#i1s]] %[[#mone_16]] %[[#zero_16]] %s2 = sext i1 %i1s to i16 + store i16 %s2, ptr @G_s2 ; SPV-DAG: %[[#s3]] = OpSelect %[[#int_32]] %[[#i1s]] %[[#mone_32]] %[[#zero_32]] %s3 = sext i1 %i1s to i32 + store i32 %s3, ptr @G_s3 ; SPV-DAG: %[[#s4]] = OpSelect %[[#int_64]] %[[#i1s]] %[[#mone_64]] %[[#zero_64]] %s4 = sext i1 %i1s to i64 + store i64 %s4, ptr @G_s4 ; SPV-DAG: %[[#s5]] = OpSelect %[[#vec_8]] %[[#i1v]] %[[#mones_8]] %[[#zeros_8]] %s5 = sext <2 x i1> %i1v to <2 x i8> + store <2 x i8> %s5, ptr @G_s5 ; SPV-DAG: %[[#s6]] = OpSelect %[[#vec_16]] %[[#i1v]] %[[#mones_16]] %[[#zeros_16]] %s6 = sext <2 x i1> %i1v to <2 x i16> + store <2 x i16> %s6, ptr @G_s6 ; SPV-DAG: %[[#s7]] = OpSelect %[[#vec_32]] %[[#i1v]] %[[#mones_32]] %[[#zeros_32]] %s7 = sext <2 x i1> %i1v to <2 x i32> + store <2 x i32> %s7, ptr @G_s7 ; SPV-DAG: %[[#s8]] = OpSelect %[[#vec_64]] %[[#i1v]] %[[#mones_64]] %[[#zeros_64]] %s8 = sext <2 x i1> %i1v to <2 x i64> + store <2 x i64> %s8, ptr @G_s8 ; SPV-DAG: %[[#z1]] = OpSelect %[[#int_8]] %[[#i1s]] %[[#one_8]] %[[#zero_8]] %z1 = zext i1 %i1s to i8 + store i8 %z1, ptr @G_z1 ; SPV-DAG: %[[#z2]] = OpSelect %[[#int_16]] %[[#i1s]] %[[#one_16]] %[[#zero_16]] %z2 = zext i1 %i1s to i16 + store i16 %z2, ptr @G_z2 ; SPV-DAG: %[[#z3]] = OpSelect %[[#int_32]] %[[#i1s]] %[[#one_32]] %[[#zero_32]] %z3 = zext i1 %i1s to i32 + store i32 %z3, ptr @G_z3 ; SPV-DAG: %[[#z4]] = OpSelect %[[#int_64]] %[[#i1s]] %[[#one_64]] %[[#zero_64]] %z4 = zext i1 %i1s to i64 + store i64 %z4, ptr @G_z4 ; SPV-DAG: %[[#z5]] = OpSelect %[[#vec_8]] %[[#i1v]] %[[#ones_8]] %[[#zeros_8]] %z5 = zext <2 x i1> %i1v to <2 x i8> + store <2 x i8> %z5, ptr @G_z5 ; SPV-DAG: %[[#z6]] = OpSelect %[[#vec_16]] %[[#i1v]] %[[#ones_16]] %[[#zeros_16]] %z6 = zext <2 x i1> %i1v to <2 x i16> + store <2 x i16> %z6, ptr @G_z6 ; SPV-DAG: %[[#z7]] = OpSelect %[[#vec_32]] %[[#i1v]] %[[#ones_32]] %[[#zeros_32]] %z7 = zext <2 x i1> %i1v to <2 x i32> + store <2 x i32> %z7, ptr @G_z7 ; SPV-DAG: %[[#z8]] = OpSelect %[[#vec_64]] %[[#i1v]] %[[#ones_64]] %[[#zeros_64]] %z8 = zext <2 x i1> %i1v to <2 x i64> + store <2 x i64> %z8, ptr @G_z8 ; SPV-DAG: %[[#ufp1_res:]] = OpSelect %[[#int_32]] %[[#i1s]] %[[#one_32]] %[[#zero_32]] ; SPV-DAG: %[[#ufp1]] = OpConvertUToF %[[#float]] %[[#ufp1_res]] %ufp1 = uitofp i1 %i1s to float + store float %ufp1, ptr @G_ufp1 ; SPV-DAG: %[[#ufp2_res:]] = OpSelect %[[#vec_32]] %[[#i1v]] %[[#ones_32]] %[[#zeros_32]] ; SPV-DAG: %[[#ufp2]] = OpConvertUToF %[[#vec_float]] %[[#ufp2_res]] %ufp2 = uitofp <2 x i1> %i1v to <2 x float> + store <2 x float> %ufp2, ptr @G_ufp2 ; SPV-DAG: %[[#sfp1_res:]] = OpSelect %[[#int_32]] %[[#i1s]] %[[#one_32]] %[[#zero_32]] ; SPV-DAG: %[[#sfp1]] = OpConvertSToF %[[#float]] %[[#sfp1_res]] %sfp1 = sitofp i1 %i1s to float + store float %sfp1, ptr @G_sfp1 ; SPV-DAG: %[[#sfp2_res:]] = OpSelect %[[#vec_32]] %[[#i1v]] %[[#ones_32]] %[[#zeros_32]] ; SPV-DAG: %[[#sfp2]] = OpConvertSToF %[[#vec_float]] %[[#sfp2_res]] %sfp2 = sitofp <2 x i1> %i1v to <2 x float> + store <2 x float> %sfp2, ptr @G_sfp2 ret void } diff --git a/llvm/test/CodeGen/SystemZ/zos-ppa1-argarea.ll b/llvm/test/CodeGen/SystemZ/zos-ppa1-argarea.ll new file mode 100644 index 0000000000000..511bc46567607 --- /dev/null +++ b/llvm/test/CodeGen/SystemZ/zos-ppa1-argarea.ll @@ -0,0 +1,66 @@ +; RUN: llc < %s -mtriple=s390x-ibm-zos -emit-gnuas-syntax-on-zos=0 | FileCheck %s +%struct.LargeStruct_t = type { [33 x i32] } + +@GlobLargeS = hidden global %struct.LargeStruct_t zeroinitializer, align 4 +@GlobInt = hidden global i32 0, align 4 + +; === Check that function with small frame does not emit PPA1 Argument Area Length. +define void @fSmallOutArgArea() { +; CHECK-LABEL: L#EPM_fSmallOutArgArea_0 DS 0H +; CHECK: * Bit 1: 1 = Leaf function +; CHECK: * Bit 2: 0 = Does not use alloca +; CHECK: DC XL4'00000008' +; CHECK: fSmallOutArgArea DS 0H +; CHECK: L#PPA1_fSmallOutArgArea_0 DS 0H +; CHECK: * PPA1 Flags 3 +; CHECK: DC XL1'00' + ret void +} + +; === Check that function with large frame does emit PPA1 Argument Area Length. +define void @fLargeOutArgArea() { +; CHECK-LABEL: L#EPM_fLargeOutArgArea_0 DS 0H +; CHECK: * Bit 1: 0 = Non-leaf function +; CHECK: * Bit 2: 0 = Does not use alloca +; CHECK: DC XL4'00000220' +; CHECK: fLargeOutArgArea DS 0H +; CHECK: L#PPA1_fLargeOutArgArea_0 DS 0H +; CHECK: * PPA1 Flags 3 +; CHECK: * Bit 1: 1 = Argument Area Length is in optional area +; CHECK: DC XL1'40' +; CHECK: * Argument Area Length +; CHECK: DC XL4'00000140' + %1 = load [33 x i32], ptr @GlobLargeS, align 4 + call void @fLargeParm([33 x i32] inreg %1) + ret void +} + +; === Check that function with parameter does emit PPA1 Length/4 of parms +define void @fLargeParm([33 x i64] inreg %arr) { +; CHECK-LABEL: L#EPM_fLargeParm_0 DS 0H +; CHECK: * Length/4 of Parms +; CHECK: DC XL2'0042' + %1 = extractvalue [33 x i64] %arr, 1 + call void @foo(i64 %1) + ret void +} + +; === Check that function with alloca call does emit PPA1 Argument Area Length. +define hidden void @fHasAlloca() { +; CHECK-LABEL: L#EPM_fHasAlloca_0 DS 0H +; CHECK: * Bit 2: 1 = Uses alloca +; CHECK: fHasAlloca DS 0H +; CHECK: L#PPA1_fHasAlloca_0 DS 0H +; CHECK: * PPA1 Flags 3 +; CHECK: * Bit 1: 1 = Argument Area Length is in optional area +; CHECK: DC XL1'40' +; CHECK: * Argument Area Length +; CHECK: DC XL4'00000040' + %p = alloca ptr, align 4 + %1 = load i32, ptr @GlobInt, align 4 + %2 = alloca i8, i32 %1, align 8 + store ptr %2, ptr %p, align 4 + ret void +} + +declare void @foo(i64) diff --git a/llvm/test/CodeGen/SystemZ/zos-target-flags.ll b/llvm/test/CodeGen/SystemZ/zos-target-flags.ll new file mode 100644 index 0000000000000..968337d87811d --- /dev/null +++ b/llvm/test/CodeGen/SystemZ/zos-target-flags.ll @@ -0,0 +1,17 @@ +; RUN: llc -mtriple=s390x-ibm-zos -stop-after=systemz-isel --simplify-mir < %s | FileCheck %s + + +declare i64 @calc(i64 noundef, ptr noundef) +declare i64 @morework(i64 noundef) + +@i = external local_unnamed_addr global i64, align 8 + +define i64 @work() { +entry: +; CHECK: %{{.*}}:addr64bit = ADA_ENTRY_VALUE target-flags(systemz-ada-datasymboladdr) @i, +; CHECK: %{{.*}}:addr64bit = ADA_ENTRY_VALUE target-flags(systemz-ada-directfuncdesc) @calc, +; CHECK: %{{.*}}:addr64bit = ADA_ENTRY_VALUE target-flags(systemz-ada-indirectfuncdesc) @morework, + %0 = load i64, ptr @i, align 8 + %call = tail call i64 @calc(i64 noundef %0, ptr noundef nonnull @morework) #2 + ret i64 %call +} diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-2-preds.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-2-preds.mir index b8657c27261ae..34adc0122bfc3 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-2-preds.mir +++ b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-2-preds.mir @@ -1,26 +1,6 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s +# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -run-pass arm-mve-vpt %s -o - | FileCheck %s ---- | - target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" - target triple = "thumbv8.1m.main-none-none-eabi" - - define hidden arm_aapcs_vfpcc <4 x float> @vpt_2_blocks_2_preds(<4 x float> %inactive1, <4 x float> %a, <4 x float> %b, i16 zeroext %p1, i16 zeroext %p2) local_unnamed_addr #0 { - entry: - %conv.i = zext i16 %p1 to i32 - %0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %a, <4 x float> %b, i32 %conv.i) #2 - %conv.i5 = zext i16 %p2 to i32 - %1 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %0, <4 x float> %b, i32 %conv.i5) #2 - ret <4 x float> %1 - } - - declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1 - - attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" } - attributes #1 = { nounwind readnone } - attributes #2 = { nounwind } - -... --- name: vpt_2_blocks_2_preds alignment: 4 @@ -61,7 +41,7 @@ fixedStack: [] stack: [] constants: [] body: | - bb.0.entry: + bb.0: liveins: $q0, $q1, $q2, $r0, $r1 ; CHECK-LABEL: name: vpt_2_blocks_2_preds diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-ctrl-flow.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-ctrl-flow.mir index 68a38a4c3f19d..bbfa1b2de1837 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-ctrl-flow.mir +++ b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-ctrl-flow.mir @@ -1,29 +1,6 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s +# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -run-pass arm-mve-vpt %s -o - | FileCheck %s ---- | - target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" - target triple = "thumbv8.1m.main-none-none-eabi" - - define hidden arm_aapcs_vfpcc <4 x float> @vpt_2_blocks_ctrl_flow(<4 x float> %inactive1, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 { - entry: - %conv.i = zext i16 %p to i32 - %0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %a, <4 x float> %b, i32 %conv.i) #2 - %1 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> undef, <4 x float> %0, <4 x float> %0, i32 %conv.i) #2 - br label %bb2 - bb2: - %2 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %1, <4 x float> %b, i32 %conv.i) #2 - %3 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %2, <4 x float> %b, i32 %conv.i) #2 - ret <4 x float> %3 - } - - declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1 - - attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" } - attributes #1 = { nounwind readnone } - attributes #2 = { nounwind } - -... --- name: vpt_2_blocks_ctrl_flow alignment: 4 @@ -64,7 +41,7 @@ stack: [] constants: [] body: | ; CHECK-LABEL: name: vpt_2_blocks_ctrl_flow - ; CHECK: bb.0.entry: + ; CHECK: bb.0: ; CHECK: successors: %bb.1(0x80000000) ; CHECK: liveins: $q0, $q1, $q2, $r0 ; CHECK: $vpr = VMSR_P0 killed $r0, 14 /* CC::al */, $noreg @@ -74,7 +51,7 @@ body: | ; CHECK: renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, $noreg, killed renamable $q3 ; CHECK: renamable $q1 = nnan ninf nsz MVE_VMINNMf32 internal renamable $q3, internal renamable $q3, 1, renamable $vpr, $noreg, undef renamable $q1 ; CHECK: } - ; CHECK: bb.1.bb2: + ; CHECK: bb.1: ; CHECK: liveins: $q0, $q1, $q2, $q3, $vpr ; CHECK: BUNDLE implicit-def dead $q3, implicit-def $q0, implicit killed $vpr, implicit killed $q1, implicit killed $q2, implicit killed $q3, implicit killed $q0 { ; CHECK: MVE_VPST 4, implicit $vpr @@ -82,7 +59,7 @@ body: | ; CHECK: renamable $q0 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q3, killed renamable $q2, 1, killed renamable $vpr, $noreg, killed renamable $q0 ; CHECK: } ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit $q0 - bb.0.entry: + bb.0: liveins: $q0, $q1, $q2, $r0 $vpr = VMSR_P0 killed $r0, 14, $noreg @@ -90,7 +67,7 @@ body: | renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, $noreg, killed renamable $q3 renamable $q1 = nnan ninf nsz MVE_VMINNMf32 renamable $q3, renamable $q3, 1, renamable $vpr, $noreg, undef renamable $q1 - bb.1.bb2: + bb.1: liveins: $q0, $q1, $q2, $q3, $vpr renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, $noreg, killed renamable $q3 diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-non-consecutive-ins.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-non-consecutive-ins.mir index caa7b174dee6f..c205ccd87be93 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-non-consecutive-ins.mir +++ b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-non-consecutive-ins.mir @@ -1,27 +1,6 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s +# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -run-pass arm-mve-vpt %s -o - | FileCheck %s ---- | - target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" - target triple = "thumbv8.1m.main-none-none-eabi" - - define hidden arm_aapcs_vfpcc <4 x float> @vpt_2_blocks_non_consecutive_ins(<4 x float> %inactive1, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 { - entry: - %conv.i = zext i16 %p to i32 - %0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %a, <4 x float> %b, i32 %conv.i) #2 - %1 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> undef, <4 x float> %0, <4 x float> %0, i32 %conv.i) #2 - %2 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %1, <4 x float> %b, i32 %conv.i) #2 - %3 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %2, <4 x float> %b, i32 %conv.i) #2 - ret <4 x float> %3 - } - - declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1 - - attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" } - attributes #1 = { nounwind readnone } - attributes #2 = { nounwind } - -... --- name: vpt_2_blocks_non_consecutive_ins alignment: 4 @@ -61,7 +40,7 @@ fixedStack: [] stack: [] constants: [] body: | - bb.0.entry: + bb.0: liveins: $q0, $q1, $q2, $r0 diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks.mir index 2f074850548ae..63095b69e7267 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks.mir +++ b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks.mir @@ -1,28 +1,6 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s +# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -run-pass arm-mve-vpt %s -o - | FileCheck %s ---- | - target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" - target triple = "thumbv8.1m.main-none-none-eabi" - - define hidden arm_aapcs_vfpcc <4 x float> @vpt_2_blocks(<4 x float> %inactive1, <4 x float> %inactive2, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 { - entry: - %conv.i = zext i16 %p to i32 - %0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> undef, <4 x float> %a, <4 x float> %b, i32 %conv.i) #2 - %1 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> undef, <4 x float> %0, <4 x float> %0, i32 %conv.i) #2 - %2 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %1, <4 x float> %b, i32 %conv.i) #2 - %3 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> undef, <4 x float> %2, <4 x float> %b, i32 %conv.i) #2 - %4 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive2, <4 x float> %3, <4 x float> %b, i32 %conv.i) #2 - ret <4 x float> %4 - } - - declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1 - - attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" } - attributes #1 = { nounwind readnone } - attributes #2 = { nounwind } - -... --- name: vpt_2_blocks alignment: 4 @@ -63,7 +41,7 @@ fixedStack: [] stack: [] constants: [] body: | - bb.0.entry: + bb.0: liveins: $q0, $q1, $q2, $q3, $r0 ; CHECK-LABEL: name: vpt_2_blocks diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-3-blocks-kill-vpr.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-3-blocks-kill-vpr.mir index f6b64a046a9fb..a30c2173c20d1 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vpt-3-blocks-kill-vpr.mir +++ b/llvm/test/CodeGen/Thumb2/mve-vpt-3-blocks-kill-vpr.mir @@ -1,27 +1,6 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s +# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -run-pass arm-mve-vpt %s -o - | FileCheck %s ---- | - target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" - target triple = "thumbv8.1m.main-none-none-eabi" - - define hidden arm_aapcs_vfpcc <4 x float> @vpt_3_blocks_kill_vpr(<4 x float> %inactive1, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 { - entry: - %conv.i = zext i16 %p to i32 - %0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %a, <4 x float> %b, i32 %conv.i) #2 - %1 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> undef, <4 x float> %0, <4 x float> %0, i32 %conv.i) #2 - %2 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %1, <4 x float> %b, i32 %conv.i) #2 - %3 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %2, <4 x float> %b, i32 %conv.i) #2 - ret <4 x float> %3 - } - - declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1 - - attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" } - attributes #1 = { nounwind readnone } - attributes #2 = { nounwind } - -... --- name: vpt_3_blocks_kill_vpr alignment: 4 @@ -61,7 +40,7 @@ fixedStack: [] stack: [] constants: [] body: | - bb.0.entry: + bb.0: liveins: $q0, $q1, $q2, $r0 ; CHECK-LABEL: name: vpt_3_blocks_kill_vpr diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-block-1-ins.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-block-1-ins.mir index d0865667fd62e..bfffe9934ddad 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vpt-block-1-ins.mir +++ b/llvm/test/CodeGen/Thumb2/mve-vpt-block-1-ins.mir @@ -1,25 +1,6 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s +# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -run-pass arm-mve-vpt %s -o - | FileCheck %s ---- | - target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" - target triple = "thumbv8.1m.main-none-none-eabi" - - define hidden arm_aapcs_vfpcc <4 x float> @vpt_block_1_ins(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 { - entry: - %conv.i = zext i16 %p to i32 - %0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i32 %conv.i) #2 - ret <4 x float> %0 - } - - declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1 - - attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" } - attributes #1 = { nounwind readnone } - attributes #2 = { nounwind } - - -... --- name: vpt_block_1_ins alignment: 4 @@ -59,7 +40,7 @@ fixedStack: [] stack: [] constants: [] body: | - bb.0.entry: + bb.0: liveins: $q0, $q1, $q2, $r0 ; CHECK-LABEL: name: vpt_block_1_ins diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-block-2-ins.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-block-2-ins.mir index 54368826d676a..fc779fe3c8c80 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vpt-block-2-ins.mir +++ b/llvm/test/CodeGen/Thumb2/mve-vpt-block-2-ins.mir @@ -1,26 +1,6 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s +# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -run-pass arm-mve-vpt %s -o - | FileCheck %s ---- | - target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" - target triple = "thumbv8.1m.main-none-none-eabi" - - define hidden arm_aapcs_vfpcc <4 x float> @vpt_block_2_ins(<4 x float> %inactive1, <4 x float> %inactive2, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 { - entry: - %conv.i = zext i16 %p to i32 - %0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %a, <4 x float> %b, i32 %conv.i) #2 - %1 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive2, <4 x float> %0, <4 x float> %b, i32 %conv.i) #2 - ret <4 x float> %1 - } - - declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1 - - attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" } - attributes #1 = { nounwind readnone } - attributes #2 = { nounwind } - - -... --- name: vpt_block_2_ins alignment: 4 @@ -61,7 +41,7 @@ fixedStack: [] stack: [] constants: [] body: | - bb.0.entry: + bb.0: liveins: $q0, $q1, $q2, $q3, $r0 ; CHECK-LABEL: name: vpt_block_2_ins diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-block-4-ins.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-block-4-ins.mir index 435836df1a69d..f07a09832de23 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vpt-block-4-ins.mir +++ b/llvm/test/CodeGen/Thumb2/mve-vpt-block-4-ins.mir @@ -1,27 +1,6 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s +# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -run-pass arm-mve-vpt %s -o - | FileCheck %s ---- | - target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" - target triple = "thumbv8.1m.main-none-none-eabi" - - define hidden arm_aapcs_vfpcc <4 x float> @vpt_block_4_ins(<4 x float> %inactive1, <4 x float> %inactive2, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 { - entry: - %conv.i = zext i16 %p to i32 - %0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> undef, <4 x float> %a, <4 x float> %b, i32 %conv.i) #2 - %1 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> undef, <4 x float> %0, <4 x float> %0, i32 %conv.i) #2 - %2 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %1, <4 x float> %b, i32 %conv.i) #2 - %3 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive2, <4 x float> %2, <4 x float> %b, i32 %conv.i) #2 - ret <4 x float> %3 - } - - declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1 - - attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" } - attributes #1 = { nounwind readnone } - attributes #2 = { nounwind } - -... --- name: vpt_block_4_ins alignment: 4 @@ -62,7 +41,7 @@ fixedStack: [] stack: [] constants: [] body: | - bb.0.entry: + bb.0: liveins: $q0, $q1, $q2, $q3, $r0 ; CHECK-LABEL: name: vpt_block_4_ins diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-block-elses.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-block-elses.mir index dc195dd917e77..bddeaf9221b8f 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vpt-block-elses.mir +++ b/llvm/test/CodeGen/Thumb2/mve-vpt-block-elses.mir @@ -1,27 +1,6 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s +# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -run-pass arm-mve-vpt %s -o - | FileCheck %s ---- | - target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" - target triple = "thumbv8.1m.main-none-none-eabi" - - define hidden arm_aapcs_vfpcc <4 x float> @vpt_block_else(<4 x float> %inactive1, <4 x float> %inactive2, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 { - entry: - %conv.i = zext i16 %p to i32 - %0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> undef, <4 x float> %a, <4 x float> %b, i32 %conv.i) #2 - %1 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> undef, <4 x float> %0, <4 x float> %0, i32 %conv.i) #2 - %2 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %1, <4 x float> %b, i32 %conv.i) #2 - %3 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive2, <4 x float> %2, <4 x float> %b, i32 %conv.i) #2 - ret <4 x float> %3 - } - - declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1 - - attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" } - attributes #1 = { nounwind readnone } - attributes #2 = { nounwind } - -... --- name: vpt_block_else alignment: 4 @@ -62,7 +41,7 @@ fixedStack: [] stack: [] constants: [] body: | - bb.0.entry: + bb.0: liveins: $q0, $q1, $q2 ; CHECK-LABEL: name: vpt_block_else diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-block-optnone.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-block-optnone.mir index ba210681178c4..86186e172e2c8 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vpt-block-optnone.mir +++ b/llvm/test/CodeGen/Thumb2/mve-vpt-block-optnone.mir @@ -1,25 +1,6 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s +# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -run-pass arm-mve-vpt %s -o - | FileCheck %s ---- | - target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" - target triple = "thumbv8.1m.main-none-none-eabi" - - define hidden arm_aapcs_vfpcc <4 x float> @test_vminnmq_m_f32_v2(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 { - entry: - %conv.i = zext i16 %p to i32 - %0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i32 %conv.i) #2 - ret <4 x float> %0 - } - - declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1 - - attributes #0 = { noinline optnone nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "no-frame-pointer-elim"="false" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" } - attributes #1 = { nounwind readnone } - attributes #2 = { nounwind } - - -... --- name: test_vminnmq_m_f32_v2 alignment: 4 @@ -59,7 +40,7 @@ fixedStack: [] stack: [] constants: [] body: | - bb.0.entry: + bb.0: liveins: $q0, $q1, $q2, $r0 diff --git a/llvm/test/CodeGen/X86/addcarry.ll b/llvm/test/CodeGen/X86/addcarry.ll index 97894db1188e2..f8a04f8514988 100644 --- a/llvm/test/CodeGen/X86/addcarry.ll +++ b/llvm/test/CodeGen/X86/addcarry.ll @@ -1513,3 +1513,41 @@ define i1 @pr84831(i64 %arg) { %trunc = trunc i63 %or to i1 ret i1 %trunc } + +define void @pr169691(ptr %p0, i64 %implicit, i1 zeroext %carry) { +; CHECK-LABEL: pr169691: +; CHECK: # %bb.0: +; CHECK-NEXT: movq (%rdi), %rax +; CHECK-NEXT: addq %rsi, %rax +; CHECK-NEXT: setb %cl +; CHECK-NEXT: movl %edx, %edx +; CHECK-NEXT: addq %rax, %rdx +; CHECK-NEXT: setb %al +; CHECK-NEXT: orb %cl, %al +; CHECK-NEXT: movq %rdx, (%rdi) +; CHECK-NEXT: addq 8(%rdi), %rsi +; CHECK-NEXT: movzbl %al, %eax +; CHECK-NEXT: addq %rsi, %rax +; CHECK-NEXT: movq %rax, 8(%rdi) +; CHECK-NEXT: retq + %a0 = load i64, ptr %p0, align 8 + %uaddo0 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a0, i64 %implicit) + %uaddo0.1 = extractvalue { i64, i1 } %uaddo0, 1 + %uaddo0.0 = extractvalue { i64, i1 } %uaddo0, 0 + %zextc = zext i1 %carry to i64 + %uaddo0b = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %uaddo0.0, i64 %zextc) + %uaddo0b.1 = extractvalue { i64, i1 } %uaddo0b, 1 + %uaddo0b.0 = extractvalue { i64, i1 } %uaddo0b, 0 + %carry0 = or i1 %uaddo0.1, %uaddo0b.1 + store i64 %uaddo0b.0, ptr %p0, align 8 + + %p1 = getelementptr inbounds nuw i8, ptr %p0, i64 8 + %a1 = load i64, ptr %p1, align 8 + %uaddo1 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a1, i64 %implicit) + %uaddo1.0 = extractvalue { i64, i1 } %uaddo1, 0 + %zext0 = zext i1 %carry0 to i64 + %uaddo1b = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %uaddo1.0, i64 %zext0) + %uaddo1b.0 = extractvalue { i64, i1 } %uaddo1b, 0 + store i64 %uaddo1b.0, ptr %p1, align 8 + ret void +} diff --git a/llvm/test/CodeGen/X86/haddsubsat.ll b/llvm/test/CodeGen/X86/haddsubsat.ll new file mode 100644 index 0000000000000..588f3383ec415 --- /dev/null +++ b/llvm/test/CodeGen/X86/haddsubsat.ll @@ -0,0 +1,101 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s -check-prefix=SSSE3 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s -check-prefix=AVX2 + +define <8 x i16> @phaddsw_v8i16_intrinsic(<8 x i16> %a, <8 x i16> %b) { +; SSSE3-LABEL: phaddsw_v8i16_intrinsic: +; SSSE3: # %bb.0: +; SSSE3-NEXT: phaddsw %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; AVX2-LABEL: phaddsw_v8i16_intrinsic: +; AVX2: # %bb.0: +; AVX2-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq + %res = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %a, <8 x i16> %b) + ret <8 x i16> %res +} + +define <8 x i16> @phaddsw_v8i16_generic(<8 x i16> %a, <8 x i16> %b) { +; SSSE3-LABEL: phaddsw_v8i16_generic: +; SSSE3: # %bb.0: +; SSSE3-NEXT: phaddsw %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; AVX2-LABEL: phaddsw_v8i16_generic: +; AVX2: # %bb.0: +; AVX2-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq + %even = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> + %odd = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> + %sum = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %even, <8 x i16> %odd) + ret <8 x i16> %sum +} + +define <16 x i16> @phaddsw_v16i16_generic(<16 x i16> %a, <16 x i16> %b) { +; SSSE3-LABEL: phaddsw_v16i16_generic: +; SSSE3: # %bb.0: +; SSSE3-NEXT: phaddsw %xmm1, %xmm0 +; SSSE3-NEXT: phaddsw %xmm3, %xmm2 +; SSSE3-NEXT: movdqa %xmm2, %xmm1 +; SSSE3-NEXT: retq +; +; AVX2-LABEL: phaddsw_v16i16_generic: +; AVX2: # %bb.0: +; AVX2-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX2-NEXT: retq + %even = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> + %odd = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> + %sum = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %even, <16 x i16> %odd) + ret <16 x i16> %sum +} + +define <8 x i16> @phsubsw_v8i16_intrinsic(<8 x i16> %a, <8 x i16> %b) { +; SSSE3-LABEL: phsubsw_v8i16_intrinsic: +; SSSE3: # %bb.0: +; SSSE3-NEXT: phsubsw %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; AVX2-LABEL: phsubsw_v8i16_intrinsic: +; AVX2: # %bb.0: +; AVX2-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq + %res = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %a, <8 x i16> %b) + ret <8 x i16> %res +} + +define <8 x i16> @phsubsw_v8i16_generic(<8 x i16> %a, <8 x i16> %b) { +; SSSE3-LABEL: phsubsw_v8i16_generic: +; SSSE3: # %bb.0: +; SSSE3-NEXT: phsubsw %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; AVX2-LABEL: phsubsw_v8i16_generic: +; AVX2: # %bb.0: +; AVX2-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq + %even = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> + %odd = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> + %diff = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %even, <8 x i16> %odd) + ret <8 x i16> %diff +} + +define <16 x i16> @phsubsw_v16i16_generic(<16 x i16> %a, <16 x i16> %b) { +; SSSE3-LABEL: phsubsw_v16i16_generic: +; SSSE3: # %bb.0: +; SSSE3-NEXT: phsubsw %xmm1, %xmm0 +; SSSE3-NEXT: phsubsw %xmm3, %xmm2 +; SSSE3-NEXT: movdqa %xmm2, %xmm1 +; SSSE3-NEXT: retq +; +; AVX2-LABEL: phsubsw_v16i16_generic: +; AVX2: # %bb.0: +; AVX2-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX2-NEXT: retq + %even = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> + %odd = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> + %diff = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %even, <16 x i16> %odd) + ret <16 x i16> %diff +} diff --git a/llvm/test/CodeGen/X86/kmov.ll b/llvm/test/CodeGen/X86/kmov.ll index 8b1e69a97d545..5d216a218cf9b 100644 --- a/llvm/test/CodeGen/X86/kmov.ll +++ b/llvm/test/CodeGen/X86/kmov.ll @@ -477,16 +477,13 @@ define <32 x i1> @invert_i64_mask_extract_32(i64 %mask) { ; X64-AVX512-LABEL: invert_i64_mask_extract_32: ; X64-AVX512: # %bb.0: ; X64-AVX512-NEXT: kmovq %rdi, %k0 -; X64-AVX512-NEXT: knotb %k0, %k1 -; X64-AVX512-NEXT: kshiftrd $8, %k0, %k2 -; X64-AVX512-NEXT: knotb %k2, %k2 -; X64-AVX512-NEXT: kunpckbw %k1, %k2, %k1 +; X64-AVX512-NEXT: kshiftrd $8, %k0, %k1 +; X64-AVX512-NEXT: kunpckbw %k0, %k1, %k1 ; X64-AVX512-NEXT: kshiftrd $16, %k0, %k2 -; X64-AVX512-NEXT: knotb %k2, %k2 ; X64-AVX512-NEXT: kshiftrd $24, %k0, %k0 -; X64-AVX512-NEXT: knotb %k0, %k0 ; X64-AVX512-NEXT: kunpckbw %k2, %k0, %k0 ; X64-AVX512-NEXT: kunpckwd %k1, %k0, %k0 +; X64-AVX512-NEXT: knotd %k0, %k0 ; X64-AVX512-NEXT: vpmovm2b %k0, %ymm0 ; X64-AVX512-NEXT: retq ; @@ -495,18 +492,16 @@ define <32 x i1> @invert_i64_mask_extract_32(i64 %mask) { ; X64-KNL-NEXT: movl %edi, %eax ; X64-KNL-NEXT: shrl $16, %eax ; X64-KNL-NEXT: kmovw %eax, %k0 -; X64-KNL-NEXT: knotw %k0, %k0 ; X64-KNL-NEXT: movl %edi, %eax ; X64-KNL-NEXT: shrl $24, %eax ; X64-KNL-NEXT: kmovw %eax, %k1 -; X64-KNL-NEXT: knotw %k1, %k1 -; X64-KNL-NEXT: kunpckbw %k0, %k1, %k1 +; X64-KNL-NEXT: kunpckbw %k0, %k1, %k0 +; X64-KNL-NEXT: knotw %k0, %k1 ; X64-KNL-NEXT: kmovw %edi, %k0 -; X64-KNL-NEXT: knotw %k0, %k0 ; X64-KNL-NEXT: shrl $8, %edi ; X64-KNL-NEXT: kmovw %edi, %k2 -; X64-KNL-NEXT: knotw %k2, %k2 -; X64-KNL-NEXT: kunpckbw %k0, %k2, %k2 +; X64-KNL-NEXT: kunpckbw %k0, %k2, %k0 +; X64-KNL-NEXT: knotw %k0, %k2 ; X64-KNL-NEXT: vpternlogd {{.*#+}} zmm0 {%k2} {z} = -1 ; X64-KNL-NEXT: vpmovdb %zmm0, %xmm0 ; X64-KNL-NEXT: vpternlogd {{.*#+}} zmm1 {%k1} {z} = -1 @@ -586,27 +581,20 @@ define <64 x i1> @invert_i64_mask_extract_64(i64 %mask) { ; X64-AVX512: # %bb.0: ; X64-AVX512-NEXT: kmovq %rdi, %k0 ; X64-AVX512-NEXT: kshiftrq $32, %k0, %k1 -; X64-AVX512-NEXT: knotb %k1, %k1 ; X64-AVX512-NEXT: kshiftrq $40, %k0, %k2 -; X64-AVX512-NEXT: knotb %k2, %k2 ; X64-AVX512-NEXT: kunpckbw %k1, %k2, %k1 ; X64-AVX512-NEXT: kshiftrq $48, %k0, %k2 -; X64-AVX512-NEXT: knotb %k2, %k2 ; X64-AVX512-NEXT: kshiftrq $56, %k0, %k3 -; X64-AVX512-NEXT: knotb %k3, %k3 ; X64-AVX512-NEXT: kunpckbw %k2, %k3, %k2 ; X64-AVX512-NEXT: kunpckwd %k1, %k2, %k1 -; X64-AVX512-NEXT: knotb %k0, %k2 -; X64-AVX512-NEXT: kshiftrd $8, %k0, %k3 -; X64-AVX512-NEXT: knotb %k3, %k3 -; X64-AVX512-NEXT: kunpckbw %k2, %k3, %k2 +; X64-AVX512-NEXT: kshiftrd $8, %k0, %k2 +; X64-AVX512-NEXT: kunpckbw %k0, %k2, %k2 ; X64-AVX512-NEXT: kshiftrd $16, %k0, %k3 -; X64-AVX512-NEXT: knotb %k3, %k3 ; X64-AVX512-NEXT: kshiftrd $24, %k0, %k0 -; X64-AVX512-NEXT: knotb %k0, %k0 ; X64-AVX512-NEXT: kunpckbw %k3, %k0, %k0 ; X64-AVX512-NEXT: kunpckwd %k2, %k0, %k0 ; X64-AVX512-NEXT: kunpckdq %k0, %k1, %k0 +; X64-AVX512-NEXT: knotq %k0, %k0 ; X64-AVX512-NEXT: vpmovm2b %k0, %zmm0 ; X64-AVX512-NEXT: retq ; @@ -614,38 +602,34 @@ define <64 x i1> @invert_i64_mask_extract_64(i64 %mask) { ; X64-KNL: # %bb.0: ; X64-KNL-NEXT: movq %rdi, %rax ; X64-KNL-NEXT: kmovw %esi, %k0 -; X64-KNL-NEXT: knotw %k0, %k0 ; X64-KNL-NEXT: movl %esi, %ecx ; X64-KNL-NEXT: shrl $8, %ecx ; X64-KNL-NEXT: kmovw %ecx, %k1 -; X64-KNL-NEXT: knotw %k1, %k1 ; X64-KNL-NEXT: kunpckbw %k0, %k1, %k0 +; X64-KNL-NEXT: knotw %k0, %k0 ; X64-KNL-NEXT: movl %esi, %ecx ; X64-KNL-NEXT: shrl $16, %ecx ; X64-KNL-NEXT: kmovw %ecx, %k1 -; X64-KNL-NEXT: knotw %k1, %k1 ; X64-KNL-NEXT: movl %esi, %ecx ; X64-KNL-NEXT: shrl $24, %ecx ; X64-KNL-NEXT: kmovw %ecx, %k2 -; X64-KNL-NEXT: knotw %k2, %k2 ; X64-KNL-NEXT: kunpckbw %k1, %k2, %k1 +; X64-KNL-NEXT: knotw %k1, %k1 ; X64-KNL-NEXT: movq %rsi, %rcx ; X64-KNL-NEXT: shrq $32, %rcx ; X64-KNL-NEXT: kmovw %ecx, %k2 -; X64-KNL-NEXT: knotw %k2, %k2 ; X64-KNL-NEXT: movq %rsi, %rcx ; X64-KNL-NEXT: shrq $40, %rcx ; X64-KNL-NEXT: kmovw %ecx, %k3 -; X64-KNL-NEXT: knotw %k3, %k3 ; X64-KNL-NEXT: kunpckbw %k2, %k3, %k2 +; X64-KNL-NEXT: knotw %k2, %k2 ; X64-KNL-NEXT: movq %rsi, %rcx ; X64-KNL-NEXT: shrq $48, %rcx ; X64-KNL-NEXT: kmovw %ecx, %k3 -; X64-KNL-NEXT: knotw %k3, %k3 ; X64-KNL-NEXT: shrq $56, %rsi ; X64-KNL-NEXT: kmovw %esi, %k4 -; X64-KNL-NEXT: knotw %k4, %k4 ; X64-KNL-NEXT: kunpckbw %k3, %k4, %k3 +; X64-KNL-NEXT: knotw %k3, %k3 ; X64-KNL-NEXT: kmovw %k3, 6(%rdi) ; X64-KNL-NEXT: kmovw %k2, 4(%rdi) ; X64-KNL-NEXT: kmovw %k1, 2(%rdi) diff --git a/llvm/test/CodeGen/X86/srem-vector-lkk.ll b/llvm/test/CodeGen/X86/srem-vector-lkk.ll index e936e1ef81b74..0fb6eb3c58893 100644 --- a/llvm/test/CodeGen/X86/srem-vector-lkk.ll +++ b/llvm/test/CodeGen/X86/srem-vector-lkk.ll @@ -1,7 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE4 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512 define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) { ; SSE-LABEL: fold_srem_vec_1: @@ -55,55 +57,105 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) { ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: fold_srem_vec_1: -; AVX: # %bb.0: -; AVX-NEXT: vpextrw $3, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $32081, %ecx, %ecx # imm = 0x7D51 -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: subl %eax, %ecx -; AVX-NEXT: movzwl %cx, %ecx -; AVX-NEXT: movswl %cx, %edx -; AVX-NEXT: shrl $15, %ecx -; AVX-NEXT: sarl $9, %edx -; AVX-NEXT: addl %ecx, %edx -; AVX-NEXT: imull $-1003, %edx, %ecx # imm = 0xFC15 -; AVX-NEXT: subl %ecx, %eax -; AVX-NEXT: vmovd %xmm0, %ecx -; AVX-NEXT: movswl %cx, %edx -; AVX-NEXT: imull $-21385, %edx, %edx # imm = 0xAC77 -; AVX-NEXT: shrl $16, %edx -; AVX-NEXT: addl %ecx, %edx -; AVX-NEXT: movzwl %dx, %edx -; AVX-NEXT: movswl %dx, %esi -; AVX-NEXT: shrl $15, %edx -; AVX-NEXT: sarl $6, %esi -; AVX-NEXT: addl %edx, %esi -; AVX-NEXT: imull $95, %esi, %edx -; AVX-NEXT: subl %edx, %ecx -; AVX-NEXT: vmovd %ecx, %xmm1 -; AVX-NEXT: vpextrw $1, %xmm0, %ecx -; AVX-NEXT: movswl %cx, %edx -; AVX-NEXT: imull $-16913, %edx, %edx # imm = 0xBDEF -; AVX-NEXT: movl %edx, %esi -; AVX-NEXT: shrl $31, %esi -; AVX-NEXT: sarl $21, %edx -; AVX-NEXT: addl %esi, %edx -; AVX-NEXT: imull $-124, %edx, %edx -; AVX-NEXT: subl %edx, %ecx -; AVX-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $2, %xmm0, %ecx -; AVX-NEXT: movswl %cx, %edx -; AVX-NEXT: imull $2675, %edx, %edx # imm = 0xA73 -; AVX-NEXT: movl %edx, %esi -; AVX-NEXT: shrl $31, %esi -; AVX-NEXT: sarl $18, %edx -; AVX-NEXT: addl %esi, %edx -; AVX-NEXT: imull $98, %edx, %edx -; AVX-NEXT: subl %edx, %ecx -; AVX-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm0 -; AVX-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1OR2-LABEL: fold_srem_vec_1: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: vpextrw $3, %xmm0, %eax +; AVX1OR2-NEXT: movswl %ax, %ecx +; AVX1OR2-NEXT: imull $32081, %ecx, %ecx # imm = 0x7D51 +; AVX1OR2-NEXT: shrl $16, %ecx +; AVX1OR2-NEXT: subl %eax, %ecx +; AVX1OR2-NEXT: movzwl %cx, %ecx +; AVX1OR2-NEXT: movswl %cx, %edx +; AVX1OR2-NEXT: shrl $15, %ecx +; AVX1OR2-NEXT: sarl $9, %edx +; AVX1OR2-NEXT: addl %ecx, %edx +; AVX1OR2-NEXT: imull $-1003, %edx, %ecx # imm = 0xFC15 +; AVX1OR2-NEXT: subl %ecx, %eax +; AVX1OR2-NEXT: vmovd %xmm0, %ecx +; AVX1OR2-NEXT: movswl %cx, %edx +; AVX1OR2-NEXT: imull $-21385, %edx, %edx # imm = 0xAC77 +; AVX1OR2-NEXT: shrl $16, %edx +; AVX1OR2-NEXT: addl %ecx, %edx +; AVX1OR2-NEXT: movzwl %dx, %edx +; AVX1OR2-NEXT: movswl %dx, %esi +; AVX1OR2-NEXT: shrl $15, %edx +; AVX1OR2-NEXT: sarl $6, %esi +; AVX1OR2-NEXT: addl %edx, %esi +; AVX1OR2-NEXT: imull $95, %esi, %edx +; AVX1OR2-NEXT: subl %edx, %ecx +; AVX1OR2-NEXT: vmovd %ecx, %xmm1 +; AVX1OR2-NEXT: vpextrw $1, %xmm0, %ecx +; AVX1OR2-NEXT: movswl %cx, %edx +; AVX1OR2-NEXT: imull $-16913, %edx, %edx # imm = 0xBDEF +; AVX1OR2-NEXT: movl %edx, %esi +; AVX1OR2-NEXT: shrl $31, %esi +; AVX1OR2-NEXT: sarl $21, %edx +; AVX1OR2-NEXT: addl %esi, %edx +; AVX1OR2-NEXT: imull $-124, %edx, %edx +; AVX1OR2-NEXT: subl %edx, %ecx +; AVX1OR2-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1 +; AVX1OR2-NEXT: vpextrw $2, %xmm0, %ecx +; AVX1OR2-NEXT: movswl %cx, %edx +; AVX1OR2-NEXT: imull $2675, %edx, %edx # imm = 0xA73 +; AVX1OR2-NEXT: movl %edx, %esi +; AVX1OR2-NEXT: shrl $31, %esi +; AVX1OR2-NEXT: sarl $18, %edx +; AVX1OR2-NEXT: addl %esi, %edx +; AVX1OR2-NEXT: imull $98, %edx, %edx +; AVX1OR2-NEXT: subl %edx, %ecx +; AVX1OR2-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm0 +; AVX1OR2-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: fold_srem_vec_1: +; AVX512: # %bb.0: +; AVX512-NEXT: vpextrw $3, %xmm0, %eax +; AVX512-NEXT: movswl %ax, %ecx +; AVX512-NEXT: imull $32081, %ecx, %ecx # imm = 0x7D51 +; AVX512-NEXT: shrl $16, %ecx +; AVX512-NEXT: subl %eax, %ecx +; AVX512-NEXT: movzwl %cx, %edx +; AVX512-NEXT: movswl %dx, %ecx +; AVX512-NEXT: shrl $15, %edx +; AVX512-NEXT: sarl $9, %ecx +; AVX512-NEXT: addl %edx, %ecx +; AVX512-NEXT: vmovd %xmm0, %edx +; AVX512-NEXT: movswl %dx, %esi +; AVX512-NEXT: imull $-21385, %esi, %esi # imm = 0xAC77 +; AVX512-NEXT: shrl $16, %esi +; AVX512-NEXT: addl %edx, %esi +; AVX512-NEXT: movzwl %si, %esi +; AVX512-NEXT: movswl %si, %edi +; AVX512-NEXT: shrl $15, %esi +; AVX512-NEXT: sarl $6, %edi +; AVX512-NEXT: addl %esi, %edi +; AVX512-NEXT: imull $95, %edi, %esi +; AVX512-NEXT: subl %esi, %edx +; AVX512-NEXT: vmovd %edx, %xmm1 +; AVX512-NEXT: vpextrw $1, %xmm0, %edx +; AVX512-NEXT: movswl %dx, %esi +; AVX512-NEXT: imull $-16913, %esi, %esi # imm = 0xBDEF +; AVX512-NEXT: movl %esi, %edi +; AVX512-NEXT: shrl $31, %edi +; AVX512-NEXT: sarl $21, %esi +; AVX512-NEXT: addl %edi, %esi +; AVX512-NEXT: imull $-1003, %ecx, %ecx # imm = 0xFC15 +; AVX512-NEXT: imull $-124, %esi, %esi +; AVX512-NEXT: subl %esi, %edx +; AVX512-NEXT: vpinsrw $1, %edx, %xmm1, %xmm1 +; AVX512-NEXT: vpextrw $2, %xmm0, %edx +; AVX512-NEXT: subl %ecx, %eax +; AVX512-NEXT: movswl %dx, %ecx +; AVX512-NEXT: imull $2675, %ecx, %ecx # imm = 0xA73 +; AVX512-NEXT: movl %ecx, %esi +; AVX512-NEXT: shrl $31, %esi +; AVX512-NEXT: sarl $18, %ecx +; AVX512-NEXT: addl %esi, %ecx +; AVX512-NEXT: imull $98, %ecx, %ecx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: vpinsrw $2, %edx, %xmm1, %xmm0 +; AVX512-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 +; AVX512-NEXT: retq %1 = srem <4 x i16> %x, ret <4 x i16> %1 } @@ -139,20 +191,35 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) { ; Don't fold if we can combine srem with sdiv. define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) { -; SSE-LABEL: combine_srem_sdiv: -; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm1 = [44151,44151,44151,44151,44151,44151,44151,44151] -; SSE-NEXT: pmulhw %xmm0, %xmm1 -; SSE-NEXT: paddw %xmm0, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm2 -; SSE-NEXT: psrlw $15, %xmm2 -; SSE-NEXT: psraw $6, %xmm1 -; SSE-NEXT: paddw %xmm2, %xmm1 -; SSE-NEXT: pmovsxbw {{.*#+}} xmm2 = [95,95,95,95,95,95,95,95] -; SSE-NEXT: pmullw %xmm1, %xmm2 -; SSE-NEXT: psubw %xmm2, %xmm0 -; SSE-NEXT: paddw %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: combine_srem_sdiv: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [44151,44151,44151,44151,44151,44151,44151,44151] +; SSE2-NEXT: pmulhw %xmm0, %xmm1 +; SSE2-NEXT: paddw %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psrlw $15, %xmm2 +; SSE2-NEXT: psraw $6, %xmm1 +; SSE2-NEXT: paddw %xmm2, %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [95,95,95,95,95,95,95,95] +; SSE2-NEXT: pmullw %xmm1, %xmm2 +; SSE2-NEXT: psubw %xmm2, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE4-LABEL: combine_srem_sdiv: +; SSE4: # %bb.0: +; SSE4-NEXT: movdqa {{.*#+}} xmm1 = [44151,44151,44151,44151,44151,44151,44151,44151] +; SSE4-NEXT: pmulhw %xmm0, %xmm1 +; SSE4-NEXT: paddw %xmm0, %xmm1 +; SSE4-NEXT: movdqa %xmm1, %xmm2 +; SSE4-NEXT: psrlw $15, %xmm2 +; SSE4-NEXT: psraw $6, %xmm1 +; SSE4-NEXT: paddw %xmm2, %xmm1 +; SSE4-NEXT: pmovsxbw {{.*#+}} xmm2 = [95,95,95,95,95,95,95,95] +; SSE4-NEXT: pmullw %xmm1, %xmm2 +; SSE4-NEXT: psubw %xmm2, %xmm0 +; SSE4-NEXT: paddw %xmm1, %xmm0 +; SSE4-NEXT: retq ; ; AVX-LABEL: combine_srem_sdiv: ; AVX: # %bb.0: @@ -421,48 +488,93 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) { ; Don't fold i64 srem. define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) { -; SSE-LABEL: dont_fold_srem_i64: -; SSE: # %bb.0: -; SSE-NEXT: movdqa %xmm1, %xmm2 -; SSE-NEXT: movq %xmm1, %rcx -; SSE-NEXT: movabsq $-5614226457215950491, %rdx # imm = 0xB21642C8590B2165 -; SSE-NEXT: movq %rcx, %rax -; SSE-NEXT: imulq %rdx -; SSE-NEXT: addq %rcx, %rdx -; SSE-NEXT: movq %rdx, %rax -; SSE-NEXT: shrq $63, %rax -; SSE-NEXT: sarq $4, %rdx -; SSE-NEXT: addq %rax, %rdx -; SSE-NEXT: leaq (%rdx,%rdx,2), %rax -; SSE-NEXT: shlq $3, %rax -; SSE-NEXT: subq %rax, %rdx -; SSE-NEXT: addq %rcx, %rdx -; SSE-NEXT: movq %rdx, %xmm1 -; SSE-NEXT: pextrq $1, %xmm2, %rcx -; SSE-NEXT: movabsq $6966426675817289639, %rdx # imm = 0x60ADB826E5E517A7 -; SSE-NEXT: movq %rcx, %rax -; SSE-NEXT: imulq %rdx -; SSE-NEXT: movq %rdx, %rax -; SSE-NEXT: shrq $63, %rax -; SSE-NEXT: sarq $11, %rdx -; SSE-NEXT: addq %rax, %rdx -; SSE-NEXT: imulq $5423, %rdx, %rax # imm = 0x152F -; SSE-NEXT: subq %rax, %rcx -; SSE-NEXT: movq %rcx, %xmm2 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; SSE-NEXT: pextrq $1, %xmm0, %rcx -; SSE-NEXT: movabsq $7220743857598845893, %rdx # imm = 0x64353C48064353C5 -; SSE-NEXT: movq %rcx, %rax -; SSE-NEXT: imulq %rdx -; SSE-NEXT: movq %rdx, %rax -; SSE-NEXT: shrq $63, %rax -; SSE-NEXT: sarq $8, %rdx -; SSE-NEXT: addq %rax, %rdx -; SSE-NEXT: imulq $654, %rdx, %rax # imm = 0x28E -; SSE-NEXT: subq %rax, %rcx -; SSE-NEXT: movq %rcx, %xmm0 -; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] -; SSE-NEXT: retq +; SSE2-LABEL: dont_fold_srem_i64: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: movq %xmm1, %rcx +; SSE2-NEXT: movabsq $-5614226457215950491, %rdx # imm = 0xB21642C8590B2165 +; SSE2-NEXT: movq %rcx, %rax +; SSE2-NEXT: imulq %rdx +; SSE2-NEXT: addq %rcx, %rdx +; SSE2-NEXT: movq %rdx, %rax +; SSE2-NEXT: shrq $63, %rax +; SSE2-NEXT: sarq $4, %rdx +; SSE2-NEXT: addq %rax, %rdx +; SSE2-NEXT: leaq (%rdx,%rdx,2), %rax +; SSE2-NEXT: shlq $3, %rax +; SSE2-NEXT: subq %rax, %rdx +; SSE2-NEXT: addq %rcx, %rdx +; SSE2-NEXT: movq %rdx, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] +; SSE2-NEXT: movq %xmm2, %rcx +; SSE2-NEXT: movabsq $6966426675817289639, %rdx # imm = 0x60ADB826E5E517A7 +; SSE2-NEXT: movq %rcx, %rax +; SSE2-NEXT: imulq %rdx +; SSE2-NEXT: movq %rdx, %rax +; SSE2-NEXT: shrq $63, %rax +; SSE2-NEXT: sarq $11, %rdx +; SSE2-NEXT: addq %rax, %rdx +; SSE2-NEXT: imulq $5423, %rdx, %rax # imm = 0x152F +; SSE2-NEXT: subq %rax, %rcx +; SSE2-NEXT: movq %rcx, %xmm2 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] +; SSE2-NEXT: movq %xmm0, %rcx +; SSE2-NEXT: movabsq $7220743857598845893, %rdx # imm = 0x64353C48064353C5 +; SSE2-NEXT: movq %rcx, %rax +; SSE2-NEXT: imulq %rdx +; SSE2-NEXT: movq %rdx, %rax +; SSE2-NEXT: shrq $63, %rax +; SSE2-NEXT: sarq $8, %rdx +; SSE2-NEXT: addq %rax, %rdx +; SSE2-NEXT: imulq $654, %rdx, %rax # imm = 0x28E +; SSE2-NEXT: subq %rax, %rcx +; SSE2-NEXT: movq %rcx, %xmm0 +; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] +; SSE2-NEXT: retq +; +; SSE4-LABEL: dont_fold_srem_i64: +; SSE4: # %bb.0: +; SSE4-NEXT: movdqa %xmm1, %xmm2 +; SSE4-NEXT: movq %xmm1, %rcx +; SSE4-NEXT: movabsq $-5614226457215950491, %rdx # imm = 0xB21642C8590B2165 +; SSE4-NEXT: movq %rcx, %rax +; SSE4-NEXT: imulq %rdx +; SSE4-NEXT: addq %rcx, %rdx +; SSE4-NEXT: movq %rdx, %rax +; SSE4-NEXT: shrq $63, %rax +; SSE4-NEXT: sarq $4, %rdx +; SSE4-NEXT: addq %rax, %rdx +; SSE4-NEXT: leaq (%rdx,%rdx,2), %rax +; SSE4-NEXT: shlq $3, %rax +; SSE4-NEXT: subq %rax, %rdx +; SSE4-NEXT: addq %rcx, %rdx +; SSE4-NEXT: movq %rdx, %xmm1 +; SSE4-NEXT: pextrq $1, %xmm2, %rcx +; SSE4-NEXT: movabsq $6966426675817289639, %rdx # imm = 0x60ADB826E5E517A7 +; SSE4-NEXT: movq %rcx, %rax +; SSE4-NEXT: imulq %rdx +; SSE4-NEXT: movq %rdx, %rax +; SSE4-NEXT: shrq $63, %rax +; SSE4-NEXT: sarq $11, %rdx +; SSE4-NEXT: addq %rax, %rdx +; SSE4-NEXT: imulq $5423, %rdx, %rax # imm = 0x152F +; SSE4-NEXT: subq %rax, %rcx +; SSE4-NEXT: movq %rcx, %xmm2 +; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE4-NEXT: pextrq $1, %xmm0, %rcx +; SSE4-NEXT: movabsq $7220743857598845893, %rdx # imm = 0x64353C48064353C5 +; SSE4-NEXT: movq %rcx, %rax +; SSE4-NEXT: imulq %rdx +; SSE4-NEXT: movq %rdx, %rax +; SSE4-NEXT: shrq $63, %rax +; SSE4-NEXT: sarq $8, %rdx +; SSE4-NEXT: addq %rax, %rdx +; SSE4-NEXT: imulq $654, %rdx, %rax # imm = 0x28E +; SSE4-NEXT: subq %rax, %rcx +; SSE4-NEXT: movq %rcx, %xmm0 +; SSE4-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] +; SSE4-NEXT: retq ; ; AVX1-LABEL: dont_fold_srem_i64: ; AVX1: # %bb.0: @@ -551,6 +663,50 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) { ; AVX2-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: retq +; +; AVX512-LABEL: dont_fold_srem_i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX512-NEXT: vmovq %xmm1, %rcx +; AVX512-NEXT: movabsq $-5614226457215950491, %rdx # imm = 0xB21642C8590B2165 +; AVX512-NEXT: movq %rcx, %rax +; AVX512-NEXT: imulq %rdx +; AVX512-NEXT: addq %rcx, %rdx +; AVX512-NEXT: movq %rdx, %rax +; AVX512-NEXT: shrq $63, %rax +; AVX512-NEXT: sarq $4, %rdx +; AVX512-NEXT: addq %rax, %rdx +; AVX512-NEXT: leaq (%rdx,%rdx,2), %rax +; AVX512-NEXT: shlq $3, %rax +; AVX512-NEXT: subq %rax, %rdx +; AVX512-NEXT: addq %rcx, %rdx +; AVX512-NEXT: vpextrq $1, %xmm1, %rcx +; AVX512-NEXT: vmovq %rdx, %xmm1 +; AVX512-NEXT: movabsq $6966426675817289639, %rdx # imm = 0x60ADB826E5E517A7 +; AVX512-NEXT: movq %rcx, %rax +; AVX512-NEXT: imulq %rdx +; AVX512-NEXT: movq %rdx, %rax +; AVX512-NEXT: shrq $63, %rax +; AVX512-NEXT: sarq $11, %rdx +; AVX512-NEXT: addq %rax, %rdx +; AVX512-NEXT: imulq $5423, %rdx, %rax # imm = 0x152F +; AVX512-NEXT: subq %rax, %rcx +; AVX512-NEXT: vmovq %rcx, %xmm2 +; AVX512-NEXT: vpextrq $1, %xmm0, %rcx +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm2[0] +; AVX512-NEXT: movabsq $7220743857598845893, %rdx # imm = 0x64353C48064353C5 +; AVX512-NEXT: movq %rcx, %rax +; AVX512-NEXT: imulq %rdx +; AVX512-NEXT: movq %rdx, %rax +; AVX512-NEXT: shrq $63, %rax +; AVX512-NEXT: sarq $8, %rdx +; AVX512-NEXT: addq %rax, %rdx +; AVX512-NEXT: imulq $654, %rdx, %rax # imm = 0x28E +; AVX512-NEXT: subq %rax, %rcx +; AVX512-NEXT: vmovq %rcx, %xmm1 +; AVX512-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7] +; AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX512-NEXT: retq %1 = srem <4 x i64> %x, ret <4 x i64> %1 } diff --git a/llvm/test/CodeGen/X86/urem-vector-lkk.ll b/llvm/test/CodeGen/X86/urem-vector-lkk.ll index 94c7892795c2b..3d0d73be9a589 100644 --- a/llvm/test/CodeGen/X86/urem-vector-lkk.ll +++ b/llvm/test/CodeGen/X86/urem-vector-lkk.ll @@ -1,7 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE4 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512 define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) { ; SSE-LABEL: fold_urem_vec_1: @@ -110,16 +112,27 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) { ; Don't fold if we can combine urem with udiv. define <4 x i16> @combine_urem_udiv(<4 x i16> %x) { -; SSE-LABEL: combine_urem_udiv: -; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm1 = [44151,44151,44151,44151,44151,44151,44151,44151] -; SSE-NEXT: pmulhuw %xmm0, %xmm1 -; SSE-NEXT: psrlw $6, %xmm1 -; SSE-NEXT: pmovsxbw {{.*#+}} xmm2 = [95,95,95,95,95,95,95,95] -; SSE-NEXT: pmullw %xmm1, %xmm2 -; SSE-NEXT: psubw %xmm2, %xmm0 -; SSE-NEXT: paddw %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: combine_urem_udiv: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [44151,44151,44151,44151,44151,44151,44151,44151] +; SSE2-NEXT: pmulhuw %xmm0, %xmm1 +; SSE2-NEXT: psrlw $6, %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [95,95,95,95,95,95,95,95] +; SSE2-NEXT: pmullw %xmm1, %xmm2 +; SSE2-NEXT: psubw %xmm2, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE4-LABEL: combine_urem_udiv: +; SSE4: # %bb.0: +; SSE4-NEXT: movdqa {{.*#+}} xmm1 = [44151,44151,44151,44151,44151,44151,44151,44151] +; SSE4-NEXT: pmulhuw %xmm0, %xmm1 +; SSE4-NEXT: psrlw $6, %xmm1 +; SSE4-NEXT: pmovsxbw {{.*#+}} xmm2 = [95,95,95,95,95,95,95,95] +; SSE4-NEXT: pmullw %xmm1, %xmm2 +; SSE4-NEXT: psubw %xmm2, %xmm0 +; SSE4-NEXT: paddw %xmm1, %xmm0 +; SSE4-NEXT: retq ; ; AVX-LABEL: combine_urem_udiv: ; AVX: # %bb.0: @@ -137,24 +150,43 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) { ; Don't fold for divisors that are a power of two. define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) { -; SSE-LABEL: dont_fold_urem_power_of_two: -; SSE: # %bb.0: -; SSE-NEXT: pmovsxbd {{.*#+}} xmm1 = [63,63,63,63] -; SSE-NEXT: pand %xmm0, %xmm1 -; SSE-NEXT: pextrw $1, %xmm0, %eax -; SSE-NEXT: andl $31, %eax -; SSE-NEXT: pinsrw $1, %eax, %xmm1 -; SSE-NEXT: pextrw $2, %xmm0, %eax -; SSE-NEXT: andl $7, %eax -; SSE-NEXT: pinsrw $2, %eax, %xmm1 -; SSE-NEXT: pextrw $3, %xmm0, %eax -; SSE-NEXT: imull $44151, %eax, %ecx # imm = 0xAC77 -; SSE-NEXT: shrl $22, %ecx -; SSE-NEXT: imull $95, %ecx, %ecx -; SSE-NEXT: subl %ecx, %eax -; SSE-NEXT: pinsrw $3, %eax, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: dont_fold_urem_power_of_two: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [63,63,63,63] +; SSE2-NEXT: pand %xmm0, %xmm1 +; SSE2-NEXT: pextrw $1, %xmm0, %eax +; SSE2-NEXT: andl $31, %eax +; SSE2-NEXT: pinsrw $1, %eax, %xmm1 +; SSE2-NEXT: pextrw $2, %xmm0, %eax +; SSE2-NEXT: andl $7, %eax +; SSE2-NEXT: pinsrw $2, %eax, %xmm1 +; SSE2-NEXT: pextrw $3, %xmm0, %eax +; SSE2-NEXT: imull $44151, %eax, %ecx # imm = 0xAC77 +; SSE2-NEXT: shrl $22, %ecx +; SSE2-NEXT: imull $95, %ecx, %ecx +; SSE2-NEXT: subl %ecx, %eax +; SSE2-NEXT: pinsrw $3, %eax, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE4-LABEL: dont_fold_urem_power_of_two: +; SSE4: # %bb.0: +; SSE4-NEXT: pmovsxbd {{.*#+}} xmm1 = [63,63,63,63] +; SSE4-NEXT: pand %xmm0, %xmm1 +; SSE4-NEXT: pextrw $1, %xmm0, %eax +; SSE4-NEXT: andl $31, %eax +; SSE4-NEXT: pinsrw $1, %eax, %xmm1 +; SSE4-NEXT: pextrw $2, %xmm0, %eax +; SSE4-NEXT: andl $7, %eax +; SSE4-NEXT: pinsrw $2, %eax, %xmm1 +; SSE4-NEXT: pextrw $3, %xmm0, %eax +; SSE4-NEXT: imull $44151, %eax, %ecx # imm = 0xAC77 +; SSE4-NEXT: shrl $22, %ecx +; SSE4-NEXT: imull $95, %ecx, %ecx +; SSE4-NEXT: subl %ecx, %eax +; SSE4-NEXT: pinsrw $3, %eax, %xmm1 +; SSE4-NEXT: movdqa %xmm1, %xmm0 +; SSE4-NEXT: retq ; ; AVX1-LABEL: dont_fold_urem_power_of_two: ; AVX1: # %bb.0: @@ -190,6 +222,23 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) { ; AVX2-NEXT: subl %ecx, %eax ; AVX2-NEXT: vpinsrw $3, %eax, %xmm1, %xmm0 ; AVX2-NEXT: retq +; +; AVX512-LABEL: dont_fold_urem_power_of_two: +; AVX512: # %bb.0: +; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1 +; AVX512-NEXT: vpextrw $1, %xmm0, %eax +; AVX512-NEXT: andl $31, %eax +; AVX512-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1 +; AVX512-NEXT: vpextrw $2, %xmm0, %eax +; AVX512-NEXT: andl $7, %eax +; AVX512-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1 +; AVX512-NEXT: vpextrw $3, %xmm0, %eax +; AVX512-NEXT: imull $44151, %eax, %ecx # imm = 0xAC77 +; AVX512-NEXT: shrl $22, %ecx +; AVX512-NEXT: imull $95, %ecx, %ecx +; AVX512-NEXT: subl %ecx, %eax +; AVX512-NEXT: vpinsrw $3, %eax, %xmm1, %xmm0 +; AVX512-NEXT: retq %1 = urem <4 x i16> %x, ret <4 x i16> %1 } @@ -228,36 +277,67 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) { ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: dont_fold_urem_one: -; AVX: # %bb.0: -; AVX-NEXT: vpextrw $2, %xmm0, %eax -; AVX-NEXT: imull $25645, %eax, %ecx # imm = 0x642D -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: movl %eax, %edx -; AVX-NEXT: subl %ecx, %edx -; AVX-NEXT: movzwl %dx, %edx -; AVX-NEXT: shrl %edx -; AVX-NEXT: addl %ecx, %edx -; AVX-NEXT: shrl $4, %edx -; AVX-NEXT: leal (%rdx,%rdx,2), %ecx -; AVX-NEXT: shll $3, %ecx -; AVX-NEXT: subl %ecx, %edx -; AVX-NEXT: addl %eax, %edx -; AVX-NEXT: vpextrw $1, %xmm0, %eax -; AVX-NEXT: imull $51307, %eax, %ecx # imm = 0xC86B -; AVX-NEXT: shrl $25, %ecx -; AVX-NEXT: imull $654, %ecx, %ecx # imm = 0x28E -; AVX-NEXT: subl %ecx, %eax -; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpinsrw $2, %edx, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $3, %xmm0, %eax -; AVX-NEXT: imull $12375, %eax, %ecx # imm = 0x3057 -; AVX-NEXT: shrl $26, %ecx -; AVX-NEXT: imull $5423, %ecx, %ecx # imm = 0x152F -; AVX-NEXT: subl %ecx, %eax -; AVX-NEXT: vpinsrw $3, %eax, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX1OR2-LABEL: dont_fold_urem_one: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: vpextrw $2, %xmm0, %eax +; AVX1OR2-NEXT: imull $25645, %eax, %ecx # imm = 0x642D +; AVX1OR2-NEXT: shrl $16, %ecx +; AVX1OR2-NEXT: movl %eax, %edx +; AVX1OR2-NEXT: subl %ecx, %edx +; AVX1OR2-NEXT: movzwl %dx, %edx +; AVX1OR2-NEXT: shrl %edx +; AVX1OR2-NEXT: addl %ecx, %edx +; AVX1OR2-NEXT: shrl $4, %edx +; AVX1OR2-NEXT: leal (%rdx,%rdx,2), %ecx +; AVX1OR2-NEXT: shll $3, %ecx +; AVX1OR2-NEXT: subl %ecx, %edx +; AVX1OR2-NEXT: addl %eax, %edx +; AVX1OR2-NEXT: vpextrw $1, %xmm0, %eax +; AVX1OR2-NEXT: imull $51307, %eax, %ecx # imm = 0xC86B +; AVX1OR2-NEXT: shrl $25, %ecx +; AVX1OR2-NEXT: imull $654, %ecx, %ecx # imm = 0x28E +; AVX1OR2-NEXT: subl %ecx, %eax +; AVX1OR2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1OR2-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1 +; AVX1OR2-NEXT: vpinsrw $2, %edx, %xmm1, %xmm1 +; AVX1OR2-NEXT: vpextrw $3, %xmm0, %eax +; AVX1OR2-NEXT: imull $12375, %eax, %ecx # imm = 0x3057 +; AVX1OR2-NEXT: shrl $26, %ecx +; AVX1OR2-NEXT: imull $5423, %ecx, %ecx # imm = 0x152F +; AVX1OR2-NEXT: subl %ecx, %eax +; AVX1OR2-NEXT: vpinsrw $3, %eax, %xmm1, %xmm0 +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: dont_fold_urem_one: +; AVX512: # %bb.0: +; AVX512-NEXT: vpextrw $2, %xmm0, %eax +; AVX512-NEXT: imull $25645, %eax, %ecx # imm = 0x642D +; AVX512-NEXT: shrl $16, %ecx +; AVX512-NEXT: movl %eax, %edx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: movzwl %dx, %edx +; AVX512-NEXT: shrl %edx +; AVX512-NEXT: addl %ecx, %edx +; AVX512-NEXT: shrl $4, %edx +; AVX512-NEXT: leal (%rdx,%rdx,2), %ecx +; AVX512-NEXT: shll $3, %ecx +; AVX512-NEXT: subl %ecx, %edx +; AVX512-NEXT: vpextrw $1, %xmm0, %ecx +; AVX512-NEXT: addl %eax, %edx +; AVX512-NEXT: imull $51307, %ecx, %eax # imm = 0xC86B +; AVX512-NEXT: shrl $25, %eax +; AVX512-NEXT: imull $654, %eax, %eax # imm = 0x28E +; AVX512-NEXT: subl %eax, %ecx +; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1 +; AVX512-NEXT: vpinsrw $2, %edx, %xmm1, %xmm1 +; AVX512-NEXT: vpextrw $3, %xmm0, %eax +; AVX512-NEXT: imull $12375, %eax, %ecx # imm = 0x3057 +; AVX512-NEXT: shrl $26, %ecx +; AVX512-NEXT: imull $5423, %ecx, %ecx # imm = 0x152F +; AVX512-NEXT: subl %ecx, %eax +; AVX512-NEXT: vpinsrw $3, %eax, %xmm1, %xmm0 +; AVX512-NEXT: retq %1 = urem <4 x i16> %x, ret <4 x i16> %1 } @@ -267,49 +347,96 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) { ; CHECK-LABEL: dont_fold_urem_i16_smax: ; CHECK: # %bb.0: ; CHECK-NEXT: retq +; SSE-LABEL: dont_fold_urem_i16_smax: +; SSE: # %bb.0: +; SSE-NEXT: retq +; +; AVX-LABEL: dont_fold_urem_i16_smax: +; AVX: # %bb.0: +; AVX-NEXT: retq %1 = urem <4 x i16> %x, ret <4 x i16> %1 } ; Don't fold i64 urem. define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) { -; SSE-LABEL: dont_fold_urem_i64: -; SSE: # %bb.0: -; SSE-NEXT: movq %xmm1, %rcx -; SSE-NEXT: movabsq $7218291159277650633, %rdx # imm = 0x642C8590B21642C9 -; SSE-NEXT: movq %rcx, %rax -; SSE-NEXT: mulq %rdx -; SSE-NEXT: movq %rcx, %rax -; SSE-NEXT: subq %rdx, %rax -; SSE-NEXT: shrq %rax -; SSE-NEXT: addq %rdx, %rax -; SSE-NEXT: shrq $4, %rax -; SSE-NEXT: leaq (%rax,%rax,2), %rdx -; SSE-NEXT: shlq $3, %rdx -; SSE-NEXT: subq %rdx, %rax -; SSE-NEXT: addq %rcx, %rax -; SSE-NEXT: movq %rax, %xmm2 -; SSE-NEXT: pextrq $1, %xmm1, %rcx -; SSE-NEXT: movabsq $-4513890722074972339, %rdx # imm = 0xC15B704DCBCA2F4D -; SSE-NEXT: movq %rcx, %rax -; SSE-NEXT: mulq %rdx -; SSE-NEXT: shrq $12, %rdx -; SSE-NEXT: imulq $5423, %rdx, %rax # imm = 0x152F -; SSE-NEXT: subq %rax, %rcx -; SSE-NEXT: movq %rcx, %xmm1 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] -; SSE-NEXT: pextrq $1, %xmm0, %rcx -; SSE-NEXT: movq %rcx, %rax -; SSE-NEXT: shrq %rax -; SSE-NEXT: movabsq $7220743857598845893, %rdx # imm = 0x64353C48064353C5 -; SSE-NEXT: mulq %rdx -; SSE-NEXT: shrq $7, %rdx -; SSE-NEXT: imulq $654, %rdx, %rax # imm = 0x28E -; SSE-NEXT: subq %rax, %rcx -; SSE-NEXT: movq %rcx, %xmm0 -; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] -; SSE-NEXT: movdqa %xmm2, %xmm1 -; SSE-NEXT: retq +; SSE2-LABEL: dont_fold_urem_i64: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: movq %xmm1, %rcx +; SSE2-NEXT: movabsq $7218291159277650633, %rdx # imm = 0x642C8590B21642C9 +; SSE2-NEXT: movq %rcx, %rax +; SSE2-NEXT: mulq %rdx +; SSE2-NEXT: movq %rcx, %rax +; SSE2-NEXT: subq %rdx, %rax +; SSE2-NEXT: shrq %rax +; SSE2-NEXT: addq %rdx, %rax +; SSE2-NEXT: shrq $4, %rax +; SSE2-NEXT: leaq (%rax,%rax,2), %rdx +; SSE2-NEXT: shlq $3, %rdx +; SSE2-NEXT: subq %rdx, %rax +; SSE2-NEXT: addq %rcx, %rax +; SSE2-NEXT: movq %rax, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] +; SSE2-NEXT: movq %xmm2, %rcx +; SSE2-NEXT: movabsq $-4513890722074972339, %rdx # imm = 0xC15B704DCBCA2F4D +; SSE2-NEXT: movq %rcx, %rax +; SSE2-NEXT: mulq %rdx +; SSE2-NEXT: shrq $12, %rdx +; SSE2-NEXT: imulq $5423, %rdx, %rax # imm = 0x152F +; SSE2-NEXT: subq %rax, %rcx +; SSE2-NEXT: movq %rcx, %xmm2 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] +; SSE2-NEXT: movq %xmm0, %rcx +; SSE2-NEXT: movq %rcx, %rax +; SSE2-NEXT: shrq %rax +; SSE2-NEXT: movabsq $7220743857598845893, %rdx # imm = 0x64353C48064353C5 +; SSE2-NEXT: mulq %rdx +; SSE2-NEXT: shrq $7, %rdx +; SSE2-NEXT: imulq $654, %rdx, %rax # imm = 0x28E +; SSE2-NEXT: subq %rax, %rcx +; SSE2-NEXT: movq %rcx, %xmm0 +; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] +; SSE2-NEXT: retq +; +; SSE4-LABEL: dont_fold_urem_i64: +; SSE4: # %bb.0: +; SSE4-NEXT: movq %xmm1, %rcx +; SSE4-NEXT: movabsq $7218291159277650633, %rdx # imm = 0x642C8590B21642C9 +; SSE4-NEXT: movq %rcx, %rax +; SSE4-NEXT: mulq %rdx +; SSE4-NEXT: movq %rcx, %rax +; SSE4-NEXT: subq %rdx, %rax +; SSE4-NEXT: shrq %rax +; SSE4-NEXT: addq %rdx, %rax +; SSE4-NEXT: shrq $4, %rax +; SSE4-NEXT: leaq (%rax,%rax,2), %rdx +; SSE4-NEXT: shlq $3, %rdx +; SSE4-NEXT: subq %rdx, %rax +; SSE4-NEXT: addq %rcx, %rax +; SSE4-NEXT: movq %rax, %xmm2 +; SSE4-NEXT: pextrq $1, %xmm1, %rcx +; SSE4-NEXT: movabsq $-4513890722074972339, %rdx # imm = 0xC15B704DCBCA2F4D +; SSE4-NEXT: movq %rcx, %rax +; SSE4-NEXT: mulq %rdx +; SSE4-NEXT: shrq $12, %rdx +; SSE4-NEXT: imulq $5423, %rdx, %rax # imm = 0x152F +; SSE4-NEXT: subq %rax, %rcx +; SSE4-NEXT: movq %rcx, %xmm1 +; SSE4-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] +; SSE4-NEXT: pextrq $1, %xmm0, %rcx +; SSE4-NEXT: movq %rcx, %rax +; SSE4-NEXT: shrq %rax +; SSE4-NEXT: movabsq $7220743857598845893, %rdx # imm = 0x64353C48064353C5 +; SSE4-NEXT: mulq %rdx +; SSE4-NEXT: shrq $7, %rdx +; SSE4-NEXT: imulq $654, %rdx, %rax # imm = 0x28E +; SSE4-NEXT: subq %rax, %rcx +; SSE4-NEXT: movq %rcx, %xmm0 +; SSE4-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] +; SSE4-NEXT: movdqa %xmm2, %xmm1 +; SSE4-NEXT: retq ; ; AVX1-LABEL: dont_fold_urem_i64: ; AVX1: # %bb.0: @@ -388,6 +515,43 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) { ; AVX2-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: retq +; +; AVX512-LABEL: dont_fold_urem_i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX512-NEXT: vmovq %xmm1, %rdx +; AVX512-NEXT: movabsq $7218291159277650633, %rax # imm = 0x642C8590B21642C9 +; AVX512-NEXT: mulxq %rax, %rax, %rax +; AVX512-NEXT: movq %rdx, %rcx +; AVX512-NEXT: subq %rax, %rcx +; AVX512-NEXT: shrq %rcx +; AVX512-NEXT: addq %rax, %rcx +; AVX512-NEXT: shrq $4, %rcx +; AVX512-NEXT: leaq (%rcx,%rcx,2), %rax +; AVX512-NEXT: shlq $3, %rax +; AVX512-NEXT: subq %rax, %rcx +; AVX512-NEXT: addq %rdx, %rcx +; AVX512-NEXT: vpextrq $1, %xmm1, %rdx +; AVX512-NEXT: movabsq $-4513890722074972339, %rax # imm = 0xC15B704DCBCA2F4D +; AVX512-NEXT: mulxq %rax, %rax, %rax +; AVX512-NEXT: vmovq %rcx, %xmm1 +; AVX512-NEXT: shrq $12, %rax +; AVX512-NEXT: imulq $5423, %rax, %rax # imm = 0x152F +; AVX512-NEXT: subq %rax, %rdx +; AVX512-NEXT: vmovq %rdx, %xmm2 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX512-NEXT: vpextrq $1, %xmm0, %rax +; AVX512-NEXT: movq %rax, %rdx +; AVX512-NEXT: shrq %rdx +; AVX512-NEXT: movabsq $7220743857598845893, %rcx # imm = 0x64353C48064353C5 +; AVX512-NEXT: mulxq %rcx, %rcx, %rcx +; AVX512-NEXT: shrq $7, %rcx +; AVX512-NEXT: imulq $654, %rcx, %rcx # imm = 0x28E +; AVX512-NEXT: subq %rcx, %rax +; AVX512-NEXT: vmovq %rax, %xmm0 +; AVX512-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] +; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX512-NEXT: retq %1 = urem <4 x i64> %x, ret <4 x i64> %1 } diff --git a/llvm/test/DebugInfo/RISCV/relax_dwo_ranges.ll b/llvm/test/DebugInfo/RISCV/relax_dwo_ranges.ll new file mode 100644 index 0000000000000..3916a205dd19c --- /dev/null +++ b/llvm/test/DebugInfo/RISCV/relax_dwo_ranges.ll @@ -0,0 +1,204 @@ +; In the RISC-V architecture, the .text section is subject to +; relaxation, meaning the start address of each function can change +; during the linking process. Therefore, the .debug_rnglists.dwo +; section must obtain function's start addresses from the .debug_addr +; section. + +; Generally, a function's body can be relaxed (for example, the +; square() and main() functions in this test, which contain call +; instructions). For such code ranges, the linker must place the +; start and end addresses into the .debug_addr section and use +; the DW_RLE_startx_endx entry form in the .debug_rnglists.dwo +; section within the .dwo file. + +; However, some functions may not contain any relaxable instructions +; (for example, the boo() function in this test). In these cases, +; it is possible to use the more space-efficient DW_RLE_startx_length +; range entry form. + +; RUN: rm -rf %t && split-file %s %t && cd %t + +; RUN: llc -dwarf-version=5 -split-dwarf-file=foo.dwo -O0 -mtriple=riscv64-unknown-linux-gnu -filetype=obj relax_dwo_ranges.ll -o %t.o +; RUN: llvm-dwarfdump -v %t.o | FileCheck --check-prefix=DWARF5 %s +; RUN: llvm-dwarfdump --debug-info %t.o > /dev/null 2>&1 | count 0 +; RUN: llvm-objdump -h %t.o | FileCheck --check-prefix=HDR %s + +; RUN: llc -dwarf-version=4 -split-dwarf-file=foo.dwo -O0 -mtriple=riscv64-unknown-linux-gnu -filetype=obj relax_dwo_ranges.ll -o %t.o +; RUN: llvm-dwarfdump -v %t.o | FileCheck --check-prefix=DWARF4 %s +; RUN: llvm-dwarfdump --debug-info %t.o > /dev/null 2>&1 | count 0 +; RUN: llvm-objdump -h %t.o | FileCheck --check-prefix=HDR %s + +; Make sure we don't produce any relocations in any .dwo section +; HDR-NOT: .rela.{{.*}}.dwo + +; Ensure that 'square()' function uses indexed start and end addresses +; DWARF5: .debug_info.dwo contents: +; DWARF5: DW_TAG_subprogram +; DWARF5-NEXT: DW_AT_low_pc [DW_FORM_addrx] (indexed (00000000) address = 0x0000000000000000 ".text") +; DWARF5-NEXT: DW_AT_high_pc [DW_FORM_addrx] (indexed (00000001) address = 0x000000000000002c ".text") +; DWARF5: DW_AT_name {{.*}} "square") +; DWARF5: DW_TAG_formal_parameter + +; HDR-NOT: .rela.{{.*}}.dwo + +; Ensure there is no unnecessary addresses in .o file +; DWARF5: .debug_addr contents: +; DWARF5: Addrs: [ +; DWARF5-NEXT: 0x0000000000000000 +; DWARF5-NEXT: 0x000000000000002c +; DWARF5-NEXT: 0x000000000000002c +; DWARF5-NEXT: 0x000000000000003e +; DWARF5-NEXT: 0x000000000000006e +; DWARF5-NEXT: ] + +; HDR-NOT: .rela.{{.*}}.dwo + +; Ensure that 'boo()' and 'main()' use DW_RLE_startx_length and DW_RLE_startx_endx +; entries respectively +; DWARF5: .debug_rnglists.dwo contents: +; DWARF5: ranges: +; DWARF5-NEXT: 0x00000014: [DW_RLE_startx_length]: 0x0000000000000002, 0x0000000000000012 => [0x000000000000002c, 0x000000000000003e) +; DWARF5-NEXT: 0x00000017: [DW_RLE_end_of_list ] +; DWARF5-NEXT: 0x00000018: [DW_RLE_startx_endx ]: 0x0000000000000003, 0x0000000000000004 => [0x000000000000003e, 0x000000000000006e) +; DWARF5-NEXT: 0x0000001b: [DW_RLE_end_of_list ] +; DWARF5-EMPTY: + +; HDR-NOT: .rela.{{.*}}.dwo + +; DWARF4: .debug_info.dwo contents: +; DWARF4: DW_TAG_subprogram +; DWARF4-NEXT: DW_AT_low_pc [DW_FORM_GNU_addr_index] (indexed (00000000) address = 0x0000000000000000 ".text") +; DWARF4-NEXT: DW_AT_high_pc [DW_FORM_GNU_addr_index] (indexed (00000001) address = 0x000000000000002c ".text") +; DWARF4: DW_AT_name {{.*}} "square") + +; DWARF4: DW_TAG_subprogram +; DWARF4-NEXT: DW_AT_low_pc [DW_FORM_GNU_addr_index] (indexed (00000002) address = 0x000000000000002c ".text") +; DWARF4-NEXT: DW_AT_high_pc [DW_FORM_data4] (0x00000012) +; DWARF4: DW_AT_name {{.*}} "boo") + +; DWARF4: DW_TAG_subprogram +; DWARF4-NEXT: DW_AT_low_pc [DW_FORM_GNU_addr_index] (indexed (00000003) address = 0x000000000000003e ".text") +; DWARF4-NEXT: DW_AT_high_pc [DW_FORM_GNU_addr_index] (indexed (00000004) address = 0x000000000000006e ".text") +; DWARF4: DW_AT_name {{.*}} "main") + +; HDR-NOT: .rela.{{.*}}.dwo + +; Ensure there is no unnecessary addresses in .o file +; DWARF4: .debug_addr contents: +; DWARF4: Addrs: [ +; DWARF4-NEXT: 0x0000000000000000 +; DWARF4-NEXT: 0x000000000000002c +; DWARF4-NEXT: 0x000000000000002c +; DWARF4-NEXT: 0x000000000000003e +; DWARF4-NEXT: 0x000000000000006e +; DWARF4-NEXT: ] + +; HDR-NOT: .rela.{{.*}}.dwo + +#--- relax_dwo_ranges.cpp +__attribute__((noinline)) int boo(); + +int square(int num) { + int num1 = boo(); + return num1 * num; +} + +__attribute__((noinline)) int boo() { + return 8; +} + +int main() { + int a = 10; + int squared = square(a); + return squared; +} + +#--- gen +clang -g -S -emit-llvm -gsplit-dwarf --target=riscv64 -march=rv64gc -O0 relax_dwo_ranges.cpp -o - + +#--- relax_dwo_ranges.ll +; ModuleID = 'relax_dwo_ranges.cpp' +source_filename = "relax_dwo_ranges.cpp" +target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128" +target triple = "riscv64-unknown-unknown" + +; Function Attrs: mustprogress noinline optnone +define dso_local noundef signext i32 @_Z6squarei(i32 noundef signext %0) #0 !dbg !10 { + %2 = alloca i32, align 4 + %3 = alloca i32, align 4 + store i32 %0, ptr %2, align 4 + #dbg_declare(ptr %2, !15, !DIExpression(), !16) + #dbg_declare(ptr %3, !17, !DIExpression(), !18) + %4 = call noundef signext i32 @_Z3boov(), !dbg !19 + store i32 %4, ptr %3, align 4, !dbg !18 + %5 = load i32, ptr %3, align 4, !dbg !20 + %6 = load i32, ptr %2, align 4, !dbg !21 + %7 = mul nsw i32 %5, %6, !dbg !22 + ret i32 %7, !dbg !23 +} + +; Function Attrs: mustprogress noinline nounwind optnone +define dso_local noundef signext i32 @_Z3boov() #1 !dbg !24 { + ret i32 8, !dbg !27 +} + +; Function Attrs: mustprogress noinline norecurse optnone +define dso_local noundef signext i32 @main() #2 !dbg !28 { + %1 = alloca i32, align 4 + %2 = alloca i32, align 4 + %3 = alloca i32, align 4 + store i32 0, ptr %1, align 4 + #dbg_declare(ptr %2, !29, !DIExpression(), !30) + store i32 10, ptr %2, align 4, !dbg !30 + #dbg_declare(ptr %3, !31, !DIExpression(), !32) + %4 = load i32, ptr %2, align 4, !dbg !33 + %5 = call noundef signext i32 @_Z6squarei(i32 noundef signext %4), !dbg !34 + store i32 %5, ptr %3, align 4, !dbg !32 + %6 = load i32, ptr %3, align 4, !dbg !35 + ret i32 %6, !dbg !36 +} + +attributes #0 = { mustprogress noinline optnone "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic-rv64" "target-features"="+64bit,+a,+c,+d,+f,+i,+m,+relax,+zaamo,+zalrsc,+zca,+zcd,+zicsr,+zifencei,+zmmul,-b,-e,-experimental-p,-experimental-svukte,-experimental-xqccmp,-experimental-xqcia,-experimental-xqciac,-experimental-xqcibi,-experimental-xqcibm,-experimental-xqcicli,-experimental-xqcicm,-experimental-xqcics,-experimental-xqcicsr,-experimental-xqciint,-experimental-xqciio,-experimental-xqcilb,-experimental-xqcili,-experimental-xqcilia,-experimental-xqcilo,-experimental-xqcilsm,-experimental-xqcisim,-experimental-xqcisls,-experimental-xqcisync,-experimental-xrivosvisni,-experimental-xrivosvizip,-experimental-xsfmclic,-experimental-xsfsclic,-experimental-zalasr,-experimental-zibi,-experimental-zicfilp,-experimental-zicfiss,-experimental-zvbc32e,-experimental-zvfbfa,-experimental-zvfofp8min,-experimental-zvkgs,-experimental-zvqdotq,-h,-q,-sdext,-sdtrig,-sha,-shcounterenw,-shgatpa,-shlcofideleg,-shtvala,-shvsatpa,-shvstvala,-shvstvecd,-smaia,-smcdeleg,-smcntrpmf,-smcsrind,-smctr,-smdbltrp,-smepmp,-smmpm,-smnpm,-smrnmi,-smstateen,-ssaia,-ssccfg,-ssccptr,-sscofpmf,-sscounterenw,-sscsrind,-ssctr,-ssdbltrp,-ssnpm,-sspm,-ssqosid,-ssstateen,-ssstrict,-sstc,-sstvala,-sstvecd,-ssu64xl,-supm,-svade,-svadu,-svbare,-svinval,-svnapot,-svpbmt,-svvptc,-v,-xandesbfhcvt,-xandesperf,-xandesvbfhcvt,-xandesvdot,-xandesvpackfph,-xandesvsinth,-xandesvsintload,-xcvalu,-xcvbi,-xcvbitmanip,-xcvelw,-xcvmac,-xcvmem,-xcvsimd,-xmipscbop,-xmipscmov,-xmipsexectl,-xmipslsp,-xsfcease,-xsfmm128t,-xsfmm16t,-xsfmm32a16f,-xsfmm32a32f,-xsfmm32a8f,-xsfmm32a8i,-xsfmm32t,-xsfmm64a64f,-xsfmm64t,-xsfmmbase,-xsfvcp,-xsfvfbfexp16e,-xsfvfexp16e,-xsfvfexp32e,-xsfvfexpa,-xsfvfexpa64e,-xsfvfnrclipxfqf,-xsfvfwmaccqqq,-xsfvqmaccdod,-xsfvqmaccqoq,-xsifivecdiscarddlone,-xsifivecflushdlone,-xsmtvdot,-xtheadba,-xtheadbb,-xtheadbs,-xtheadcmo,-xtheadcondmov,-xtheadfmemidx,-xtheadmac,-xtheadmemidx,-xtheadmempair,-xtheadsync,-xtheadvdot,-xventanacondops,-xwchc,-za128rs,-za64rs,-zabha,-zacas,-zama16b,-zawrs,-zba,-zbb,-zbc,-zbkb,-zbkc,-zbkx,-zbs,-zcb,-zce,-zcf,-zclsd,-zcmop,-zcmp,-zcmt,-zdinx,-zfa,-zfbfmin,-zfh,-zfhmin,-zfinx,-zhinx,-zhinxmin,-zic64b,-zicbom,-zicbop,-zicboz,-ziccamoa,-ziccamoc,-ziccif,-zicclsm,-ziccrse,-zicntr,-zicond,-zihintntl,-zihintpause,-zihpm,-zilsd,-zimop,-zk,-zkn,-zknd,-zkne,-zknh,-zkr,-zks,-zksed,-zksh,-zkt,-ztso,-zvbb,-zvbc,-zve32f,-zve32x,-zve64d,-zve64f,-zve64x,-zvfbfmin,-zvfbfwma,-zvfh,-zvfhmin,-zvkb,-zvkg,-zvkn,-zvknc,-zvkned,-zvkng,-zvknha,-zvknhb,-zvks,-zvksc,-zvksed,-zvksg,-zvksh,-zvkt,-zvl1024b,-zvl128b,-zvl16384b,-zvl2048b,-zvl256b,-zvl32768b,-zvl32b,-zvl4096b,-zvl512b,-zvl64b,-zvl65536b,-zvl8192b" } +attributes #1 = { mustprogress noinline nounwind optnone "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic-rv64" "target-features"="+64bit,+a,+c,+d,+f,+i,+m,+relax,+zaamo,+zalrsc,+zca,+zcd,+zicsr,+zifencei,+zmmul,-b,-e,-experimental-p,-experimental-svukte,-experimental-xqccmp,-experimental-xqcia,-experimental-xqciac,-experimental-xqcibi,-experimental-xqcibm,-experimental-xqcicli,-experimental-xqcicm,-experimental-xqcics,-experimental-xqcicsr,-experimental-xqciint,-experimental-xqciio,-experimental-xqcilb,-experimental-xqcili,-experimental-xqcilia,-experimental-xqcilo,-experimental-xqcilsm,-experimental-xqcisim,-experimental-xqcisls,-experimental-xqcisync,-experimental-xrivosvisni,-experimental-xrivosvizip,-experimental-xsfmclic,-experimental-xsfsclic,-experimental-zalasr,-experimental-zibi,-experimental-zicfilp,-experimental-zicfiss,-experimental-zvbc32e,-experimental-zvfbfa,-experimental-zvfofp8min,-experimental-zvkgs,-experimental-zvqdotq,-h,-q,-sdext,-sdtrig,-sha,-shcounterenw,-shgatpa,-shlcofideleg,-shtvala,-shvsatpa,-shvstvala,-shvstvecd,-smaia,-smcdeleg,-smcntrpmf,-smcsrind,-smctr,-smdbltrp,-smepmp,-smmpm,-smnpm,-smrnmi,-smstateen,-ssaia,-ssccfg,-ssccptr,-sscofpmf,-sscounterenw,-sscsrind,-ssctr,-ssdbltrp,-ssnpm,-sspm,-ssqosid,-ssstateen,-ssstrict,-sstc,-sstvala,-sstvecd,-ssu64xl,-supm,-svade,-svadu,-svbare,-svinval,-svnapot,-svpbmt,-svvptc,-v,-xandesbfhcvt,-xandesperf,-xandesvbfhcvt,-xandesvdot,-xandesvpackfph,-xandesvsinth,-xandesvsintload,-xcvalu,-xcvbi,-xcvbitmanip,-xcvelw,-xcvmac,-xcvmem,-xcvsimd,-xmipscbop,-xmipscmov,-xmipsexectl,-xmipslsp,-xsfcease,-xsfmm128t,-xsfmm16t,-xsfmm32a16f,-xsfmm32a32f,-xsfmm32a8f,-xsfmm32a8i,-xsfmm32t,-xsfmm64a64f,-xsfmm64t,-xsfmmbase,-xsfvcp,-xsfvfbfexp16e,-xsfvfexp16e,-xsfvfexp32e,-xsfvfexpa,-xsfvfexpa64e,-xsfvfnrclipxfqf,-xsfvfwmaccqqq,-xsfvqmaccdod,-xsfvqmaccqoq,-xsifivecdiscarddlone,-xsifivecflushdlone,-xsmtvdot,-xtheadba,-xtheadbb,-xtheadbs,-xtheadcmo,-xtheadcondmov,-xtheadfmemidx,-xtheadmac,-xtheadmemidx,-xtheadmempair,-xtheadsync,-xtheadvdot,-xventanacondops,-xwchc,-za128rs,-za64rs,-zabha,-zacas,-zama16b,-zawrs,-zba,-zbb,-zbc,-zbkb,-zbkc,-zbkx,-zbs,-zcb,-zce,-zcf,-zclsd,-zcmop,-zcmp,-zcmt,-zdinx,-zfa,-zfbfmin,-zfh,-zfhmin,-zfinx,-zhinx,-zhinxmin,-zic64b,-zicbom,-zicbop,-zicboz,-ziccamoa,-ziccamoc,-ziccif,-zicclsm,-ziccrse,-zicntr,-zicond,-zihintntl,-zihintpause,-zihpm,-zilsd,-zimop,-zk,-zkn,-zknd,-zkne,-zknh,-zkr,-zks,-zksed,-zksh,-zkt,-ztso,-zvbb,-zvbc,-zve32f,-zve32x,-zve64d,-zve64f,-zve64x,-zvfbfmin,-zvfbfwma,-zvfh,-zvfhmin,-zvkb,-zvkg,-zvkn,-zvknc,-zvkned,-zvkng,-zvknha,-zvknhb,-zvks,-zvksc,-zvksed,-zvksg,-zvksh,-zvkt,-zvl1024b,-zvl128b,-zvl16384b,-zvl2048b,-zvl256b,-zvl32768b,-zvl32b,-zvl4096b,-zvl512b,-zvl64b,-zvl65536b,-zvl8192b" } +attributes #2 = { mustprogress noinline norecurse optnone "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic-rv64" "target-features"="+64bit,+a,+c,+d,+f,+i,+m,+relax,+zaamo,+zalrsc,+zca,+zcd,+zicsr,+zifencei,+zmmul,-b,-e,-experimental-p,-experimental-svukte,-experimental-xqccmp,-experimental-xqcia,-experimental-xqciac,-experimental-xqcibi,-experimental-xqcibm,-experimental-xqcicli,-experimental-xqcicm,-experimental-xqcics,-experimental-xqcicsr,-experimental-xqciint,-experimental-xqciio,-experimental-xqcilb,-experimental-xqcili,-experimental-xqcilia,-experimental-xqcilo,-experimental-xqcilsm,-experimental-xqcisim,-experimental-xqcisls,-experimental-xqcisync,-experimental-xrivosvisni,-experimental-xrivosvizip,-experimental-xsfmclic,-experimental-xsfsclic,-experimental-zalasr,-experimental-zibi,-experimental-zicfilp,-experimental-zicfiss,-experimental-zvbc32e,-experimental-zvfbfa,-experimental-zvfofp8min,-experimental-zvkgs,-experimental-zvqdotq,-h,-q,-sdext,-sdtrig,-sha,-shcounterenw,-shgatpa,-shlcofideleg,-shtvala,-shvsatpa,-shvstvala,-shvstvecd,-smaia,-smcdeleg,-smcntrpmf,-smcsrind,-smctr,-smdbltrp,-smepmp,-smmpm,-smnpm,-smrnmi,-smstateen,-ssaia,-ssccfg,-ssccptr,-sscofpmf,-sscounterenw,-sscsrind,-ssctr,-ssdbltrp,-ssnpm,-sspm,-ssqosid,-ssstateen,-ssstrict,-sstc,-sstvala,-sstvecd,-ssu64xl,-supm,-svade,-svadu,-svbare,-svinval,-svnapot,-svpbmt,-svvptc,-v,-xandesbfhcvt,-xandesperf,-xandesvbfhcvt,-xandesvdot,-xandesvpackfph,-xandesvsinth,-xandesvsintload,-xcvalu,-xcvbi,-xcvbitmanip,-xcvelw,-xcvmac,-xcvmem,-xcvsimd,-xmipscbop,-xmipscmov,-xmipsexectl,-xmipslsp,-xsfcease,-xsfmm128t,-xsfmm16t,-xsfmm32a16f,-xsfmm32a32f,-xsfmm32a8f,-xsfmm32a8i,-xsfmm32t,-xsfmm64a64f,-xsfmm64t,-xsfmmbase,-xsfvcp,-xsfvfbfexp16e,-xsfvfexp16e,-xsfvfexp32e,-xsfvfexpa,-xsfvfexpa64e,-xsfvfnrclipxfqf,-xsfvfwmaccqqq,-xsfvqmaccdod,-xsfvqmaccqoq,-xsifivecdiscarddlone,-xsifivecflushdlone,-xsmtvdot,-xtheadba,-xtheadbb,-xtheadbs,-xtheadcmo,-xtheadcondmov,-xtheadfmemidx,-xtheadmac,-xtheadmemidx,-xtheadmempair,-xtheadsync,-xtheadvdot,-xventanacondops,-xwchc,-za128rs,-za64rs,-zabha,-zacas,-zama16b,-zawrs,-zba,-zbb,-zbc,-zbkb,-zbkc,-zbkx,-zbs,-zcb,-zce,-zcf,-zclsd,-zcmop,-zcmp,-zcmt,-zdinx,-zfa,-zfbfmin,-zfh,-zfhmin,-zfinx,-zhinx,-zhinxmin,-zic64b,-zicbom,-zicbop,-zicboz,-ziccamoa,-ziccamoc,-ziccif,-zicclsm,-ziccrse,-zicntr,-zicond,-zihintntl,-zihintpause,-zihpm,-zilsd,-zimop,-zk,-zkn,-zknd,-zkne,-zknh,-zkr,-zks,-zksed,-zksh,-zkt,-ztso,-zvbb,-zvbc,-zve32f,-zve32x,-zve64d,-zve64f,-zve64x,-zvfbfmin,-zvfbfwma,-zvfh,-zvfhmin,-zvkb,-zvkg,-zvkn,-zvknc,-zvkned,-zvkng,-zvknha,-zvknhb,-zvks,-zvksc,-zvksed,-zvksg,-zvksh,-zvkt,-zvl1024b,-zvl128b,-zvl16384b,-zvl2048b,-zvl256b,-zvl32768b,-zvl32b,-zvl4096b,-zvl512b,-zvl64b,-zvl65536b,-zvl8192b" } + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!2, !3, !4, !5, !6, !8, !9} + +!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1, isOptimized: false, runtimeVersion: 0, splitDebugFilename: "relax_dwo_ranges.dwo", emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: GNU) +!1 = !DIFile(filename: "relax_dwo_ranges.cpp", directory: "/proc/self/cwd", checksumkind: CSK_MD5, checksum: "50a257b0f63ed1a964aff88c3623bf0a") +!2 = !{i32 7, !"Dwarf Version", i32 5} +!3 = !{i32 2, !"Debug Info Version", i32 3} +!4 = !{i32 1, !"wchar_size", i32 4} +!5 = !{i32 1, !"target-abi", !"lp64d"} +!6 = !{i32 6, !"riscv-isa", !7} +!7 = !{!"rv64i2p1_m2p0_a2p1_f2p2_d2p2_c2p0_zicsr2p0_zifencei2p0_zmmul1p0_zaamo1p0_zalrsc1p0_zca1p0_zcd1p0"} +!8 = !{i32 7, !"frame-pointer", i32 2} +!9 = !{i32 8, !"SmallDataLimit", i32 0} +!10 = distinct !DISubprogram(name: "square", linkageName: "_Z6squarei", scope: !1, file: !1, line: 3, type: !11, scopeLine: 3, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !14) +!11 = !DISubroutineType(types: !12) +!12 = !{!13, !13} +!13 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) +!14 = !{} +!15 = !DILocalVariable(name: "num", arg: 1, scope: !10, file: !1, line: 3, type: !13) +!16 = !DILocation(line: 3, column: 16, scope: !10) +!17 = !DILocalVariable(name: "num1", scope: !10, file: !1, line: 4, type: !13) +!18 = !DILocation(line: 4, column: 7, scope: !10) +!19 = !DILocation(line: 4, column: 14, scope: !10) +!20 = !DILocation(line: 5, column: 10, scope: !10) +!21 = !DILocation(line: 5, column: 17, scope: !10) +!22 = !DILocation(line: 5, column: 15, scope: !10) +!23 = !DILocation(line: 5, column: 3, scope: !10) +!24 = distinct !DISubprogram(name: "boo", linkageName: "_Z3boov", scope: !1, file: !1, line: 8, type: !25, scopeLine: 8, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0) +!25 = !DISubroutineType(types: !26) +!26 = !{!13} +!27 = !DILocation(line: 9, column: 3, scope: !24) +!28 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 12, type: !25, scopeLine: 12, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !14) +!29 = !DILocalVariable(name: "a", scope: !28, file: !1, line: 13, type: !13) +!30 = !DILocation(line: 13, column: 7, scope: !28) +!31 = !DILocalVariable(name: "squared", scope: !28, file: !1, line: 14, type: !13) +!32 = !DILocation(line: 14, column: 7, scope: !28) +!33 = !DILocation(line: 14, column: 24, scope: !28) +!34 = !DILocation(line: 14, column: 17, scope: !28) +!35 = !DILocation(line: 15, column: 10, scope: !28) +!36 = !DILocation(line: 15, column: 3, scope: !28) diff --git a/llvm/test/Instrumentation/BoundsChecking/runtimes.ll b/llvm/test/Instrumentation/BoundsChecking/runtimes.ll index 84dd51cd3fa28..74e1eef7ebe35 100644 --- a/llvm/test/Instrumentation/BoundsChecking/runtimes.ll +++ b/llvm/test/Instrumentation/BoundsChecking/runtimes.ll @@ -8,6 +8,9 @@ ; RUN: opt < %s -passes='bounds-checking' -S | FileCheck %s --check-prefixes=RTABORT-NOMERGE ; RUN: opt < %s -passes='bounds-checking' -S | FileCheck %s --check-prefixes=MINRT-NOMERGE ; RUN: opt < %s -passes='bounds-checking' -S | FileCheck %s --check-prefixes=MINRTABORT-NOMERGE + +; RUN: opt < %s -passes='bounds-checking' -S | FileCheck %s --check-prefixes=MINRT-PRESERVE-NOMERGE +; RUN: opt < %s -passes='bounds-checking' -S | FileCheck %s --check-prefixes=MINRTABORT-NOMERGE ; ; RUN: opt < %s -passes='bounds-checking' -S | FileCheck %s --check-prefixes=TR-GUARD-COMMON,TR-GUARD-THREE ; RUN: opt < %s -passes='bounds-checking' -S | FileCheck %s --check-prefixes=TR-GUARD-COMMON,TR-GUARD-THIRTEEN @@ -95,6 +98,22 @@ define void @f1(i64 %x) nounwind { ; RTABORT-NOMERGE-NEXT: call void @__ubsan_handle_local_out_of_bounds_abort() #[[ATTR2:[0-9]+]], !nosanitize [[META0]] ; RTABORT-NOMERGE-NEXT: unreachable, !nosanitize [[META0]] ; +; MINRT-PRESERVE-NOMERGE-LABEL: define void @f1( +; MINRT-PRESERVE-NOMERGE-SAME: i64 [[X:%.*]]) #[[ATTR0:[0-9]+]] { +; MINRT-PRESERVE-NOMERGE-NEXT: [[TMP1:%.*]] = mul i64 16, [[X]] +; MINRT-PRESERVE-NOMERGE-NEXT: [[TMP2:%.*]] = alloca i128, i64 [[X]], align 8 +; MINRT-PRESERVE-NOMERGE-NEXT: [[TMP3:%.*]] = sub i64 [[TMP1]], 0, !nosanitize [[META0:![0-9]+]] +; MINRT-PRESERVE-NOMERGE-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP3]], 16, !nosanitize [[META0]] +; MINRT-PRESERVE-NOMERGE-NEXT: [[TMP5:%.*]] = or i1 false, [[TMP4]], !nosanitize [[META0]] +; MINRT-PRESERVE-NOMERGE-NEXT: [[TMP6:%.*]] = or i1 false, [[TMP5]], !nosanitize [[META0]] +; MINRT-PRESERVE-NOMERGE-NEXT: br i1 [[TMP6]], label %[[TRAP:.*]], label %[[BB7:.*]] +; MINRT-PRESERVE-NOMERGE: [[BB7]]: +; MINRT-PRESERVE-NOMERGE-NEXT: [[TMP8:%.*]] = load i128, ptr [[TMP2]], align 4 +; MINRT-PRESERVE-NOMERGE-NEXT: ret void +; MINRT-PRESERVE-NOMERGE: [[TRAP]]: +; MINRT-PRESERVE-NOMERGE-NEXT: call preserve_allcc void @__ubsan_handle_local_out_of_bounds_minimal_preserve() #[[ATTR1:[0-9]+]], !nosanitize [[META0]] +; MINRT-PRESERVE-NOMERGE-NEXT: br label %[[BB7]], !nosanitize [[META0]] +; ; MINRT-NOMERGE-LABEL: define void @f1( ; MINRT-NOMERGE-SAME: i64 [[X:%.*]]) #[[ATTR0:[0-9]+]] { ; MINRT-NOMERGE-NEXT: [[TMP1:%.*]] = mul i64 16, [[X]] diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_sopk.s b/llvm/test/MC/AMDGPU/gfx12_asm_sopk.s index 819ecb866c5ae..ba5159482df50 100644 --- a/llvm/test/MC/AMDGPU/gfx12_asm_sopk.s +++ b/llvm/test/MC/AMDGPU/gfx12_asm_sopk.s @@ -258,3 +258,12 @@ s_getreg_b32 s0, hwreg(HW_REG_SHADER_CYCLES_LO) s_getreg_b32 s0, hwreg(HW_REG_SHADER_CYCLES_HI) // GFX12: encoding: [0x1e,0xf8,0x80,0xb8] + +s_getreg_b32 s0, hwreg(HW_REG_WAVE_SCHED_MODE) +// GFX12: encoding: [0x1a,0xf8,0x80,0xb8] + +s_setreg_b32 hwreg(HW_REG_WAVE_SCHED_MODE, 0, 2), s2 +// GFX12: encoding: [0x1a,0x08,0x02,0xb9] + +s_setreg_imm32_b32 hwreg(HW_REG_WAVE_SCHED_MODE), 0x2 +// GFX12: encoding: [0x1a,0xf8,0x80,0xb9,0x02,0x00,0x00,0x00] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sopk.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sopk.txt index 41c5724a596f9..63ad07acee36f 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sopk.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sopk.txt @@ -276,3 +276,12 @@ # GFX12: s_getreg_b32 s0, hwreg(HW_REG_SHADER_CYCLES_HI) ; encoding: [0x1e,0xf8,0x80,0xb8] 0x1e,0xf8,0x80,0xb8 + +# GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_SCHED_MODE) ; encoding: [0x1a,0xf8,0x80,0xb8] +0x1a,0xf8,0x80,0xb8 + +# GFX12: s_setreg_b32 hwreg(HW_REG_WAVE_SCHED_MODE, 0, 2), s2 ; encoding: [0x1a,0x08,0x02,0xb9] +0x1a,0x08,0x02,0xb9 + +# GFX12: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_SCHED_MODE), 2 ; encoding: [0x1a,0xf8,0x80,0xb9,0x02,0x00,0x00,0x00] +0x1a,0xf8,0x80,0xb9,0x02,0x00,0x00,0x00 diff --git a/llvm/test/MC/Disassembler/PowerPC/ppc-encoding-ISAFuture.txt b/llvm/test/MC/Disassembler/PowerPC/ppc-encoding-ISAFuture.txt index b27a50d93f5b9..1024c6b546c4a 100644 --- a/llvm/test/MC/Disassembler/PowerPC/ppc-encoding-ISAFuture.txt +++ b/llvm/test/MC/Disassembler/PowerPC/ppc-encoding-ISAFuture.txt @@ -298,6 +298,12 @@ #CHECK: mtlpl 3, 4 0x7c,0x80,0x1a,0x26 +#CHECK: paddis 10, 12, 1000000000, 0 +0x06,0x00,0x3b,0x9a,0x3d,0x4c,0xca,0x00 + +#CHECK: paddis 10, 0, 1000000000, 1 +0x06,0x10,0x3b,0x9a,0x3d,0x40,0xca,0x00 + #CHECK: xxmulmul 8, 3, 4, 2 0xed,0x03,0x22,0x08 diff --git a/llvm/test/MC/Disassembler/PowerPC/ppc64le-encoding-ISAFuture.txt b/llvm/test/MC/Disassembler/PowerPC/ppc64le-encoding-ISAFuture.txt index 72662d9736740..bda8d1e69442f 100644 --- a/llvm/test/MC/Disassembler/PowerPC/ppc64le-encoding-ISAFuture.txt +++ b/llvm/test/MC/Disassembler/PowerPC/ppc64le-encoding-ISAFuture.txt @@ -292,6 +292,12 @@ #CHECK: mtlpl 3, 4 0x26,0x1a,0x80,0x7c +#CHECK: paddis 10, 12, 1000000000, 0 +0x9a,0x3b,0x00,0x06,0x00,0xca,0x4c,0x3d + +#CHECK: paddis 10, 0, 1000000000, 1 +0x9a,0x3b,0x10,0x06,0x00,0xca,0x40,0x3d + #CHECK: xxmulmul 8, 3, 4, 2 0x08,0x22,0x03,0xed diff --git a/llvm/test/MC/PowerPC/ppc-encoding-ISAFuture.s b/llvm/test/MC/PowerPC/ppc-encoding-ISAFuture.s index ab72649fc3404..eb616a15500f1 100644 --- a/llvm/test/MC/PowerPC/ppc-encoding-ISAFuture.s +++ b/llvm/test/MC/PowerPC/ppc-encoding-ISAFuture.s @@ -419,6 +419,18 @@ #CHECK-BE: mtlpl 3, 4 # encoding: [0x7c,0x80,0x1a,0x26] #CHECK-LE: mtlpl 3, 4 # encoding: [0x26,0x1a,0x80,0x7c] + paddis 10, 12, 1000000000, 0 +#CHECK-BE: paddis 10, 12, 1000000000, 0 # encoding: [0x06,0x00,0x3b,0x9a, +#CHECK-BE-SAME: 0x3d,0x4c,0xca,0x00] +#CHECK-LE: paddis 10, 12, 1000000000, 0 # encoding: [0x9a,0x3b,0x00,0x06, +#CHECK-LE-SAME: 0x00,0xca,0x4c,0x3d] + + paddis 10, 0, 1000000000, 1 +#CHECK-BE: paddis 10, 0, 1000000000, 1 # encoding: [0x06,0x10,0x3b,0x9a, +#CHECK-BE-SAME: 0x3d,0x40,0xca,0x00] +#CHECK-LE: paddis 10, 0, 1000000000, 1 # encoding: [0x9a,0x3b,0x10,0x06, +#CHECK-LE-SAME: 0x00,0xca,0x40,0x3d] + xxmulmul 8, 3, 4, 2 #CHECK-BE: xxmulmul 8, 3, 4, 2 # encoding: [0xed,0x03,0x22,0x08] #CHECK-LE: xxmulmul 8, 3, 4, 2 # encoding: [0x08,0x22,0x03,0xed] diff --git a/llvm/test/MC/PowerPC/ppc64-encoding-ISA31-errors.s b/llvm/test/MC/PowerPC/ppc64-encoding-ISA31-errors.s deleted file mode 100644 index 69cdb5cb75ebb..0000000000000 --- a/llvm/test/MC/PowerPC/ppc64-encoding-ISA31-errors.s +++ /dev/null @@ -1,71 +0,0 @@ -# RUN: not llvm-mc -triple powerpc64-unknown-unknown < %s 2> %t -# RUN: FileCheck < %t %s -# RUN: not llvm-mc -triple powerpc64le-unknown-unknown < %s 2> %t -# RUN: FileCheck < %t %s - - # CHECK: error: invalid operand for instruction -paddi 1, 1, 32, 1 - -# CHECK: error: invalid operand for instruction -pld 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -paddi 1, 1, 32, 1 - -# CHECK: error: invalid operand for instruction -plbz 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plfd 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plfs 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plha 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plhz 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plwa 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plwz 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plxsd 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plxssp 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plxv 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -pstb 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -pstd 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -pstfd 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -pstfs 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -psth 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -pstw 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -pstxsd 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -pstxssp 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -pstxv 1, 32(1), 1 - diff --git a/llvm/test/MC/PowerPC/ppc64-errors.s b/llvm/test/MC/PowerPC/ppc64-errors.s index 17905a396885a..4d4da58f650fe 100644 --- a/llvm/test/MC/PowerPC/ppc64-errors.s +++ b/llvm/test/MC/PowerPC/ppc64-errors.s @@ -4,6 +4,76 @@ # RUN: not llvm-mc -triple powerpc64le-unknown-unknown < %s 2> %t # RUN: FileCheck < %t %s +# From ISAFuture + +# CHECK: error: invalid operand for instruction +paddis 10, 5, 1000000000, 1 + +# From ISA31 + +# CHECK: error: invalid operand for instruction +paddi 1, 1, 32, 1 + +# CHECK: error: invalid operand for instruction +pld 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plbz 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plfd 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plfs 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plha 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plhz 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plwa 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plwz 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plxsd 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plxssp 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plxv 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +pstb 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +pstd 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +pstfd 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +pstfs 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +psth 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +pstw 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +pstxsd 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +pstxssp 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +pstxv 1, 32(1), 1 + # Register operands # CHECK: error: invalid operand for instruction diff --git a/llvm/test/MC/RISCV/corev/XCVelw-pseudo.s b/llvm/test/MC/RISCV/corev/XCVelw-pseudo.s new file mode 100644 index 0000000000000..172ebfde9f338 --- /dev/null +++ b/llvm/test/MC/RISCV/corev/XCVelw-pseudo.s @@ -0,0 +1,11 @@ +# RUN: llvm-mc %s -triple=riscv32 --mattr=+xcvelw | FileCheck %s + +# CHECK: .Lpcrel_hi0: +# CHECK: auipc a2, %pcrel_hi(a_symbol) +# CHECK: cv.elw a2, %pcrel_lo(.Lpcrel_hi0)(a2) +cv.elw a2, a_symbol + +# CHECK: .Lpcrel_hi1: +# CHECK: auipc a3, %pcrel_hi(a_symbol) +# CHECK: cv.elw a3, %pcrel_lo(.Lpcrel_hi1)(a3) +cv.elw a3, a_symbol diff --git a/llvm/test/TableGen/DuplicateFieldValues.td b/llvm/test/TableGen/DuplicateFieldValues.td index 50c77fa88ccec..85cb5bbfb6c56 100644 --- a/llvm/test/TableGen/DuplicateFieldValues.td +++ b/llvm/test/TableGen/DuplicateFieldValues.td @@ -82,3 +82,4 @@ let BaseName = "0" in { def E0 : I, ABCRel, isEForm; } +defm : RemapAllTargetPseudoPointerOperands; diff --git a/llvm/test/TableGen/RegClassByHwMode.td b/llvm/test/TableGen/RegClassByHwMode.td index a21a396f7fd52..ec723f8b70478 100644 --- a/llvm/test/TableGen/RegClassByHwMode.td +++ b/llvm/test/TableGen/RegClassByHwMode.td @@ -13,6 +13,7 @@ include "llvm/Target/Target.td" // INSTRINFO-EMPTY: // INSTRINFO-NEXT: enum { // INSTRINFO-NEXT: PHI +// INSTRINFO: LOAD_STACK_GUARD = [[LOAD_STACK_GUARD_OPCODE:[0-9]+]] // INSTRINFO: }; // INSTRINFO: enum RegClassByHwModeUses : uint16_t { // INSTRINFO-NEXT: MyPtrRC, @@ -22,10 +23,20 @@ include "llvm/Target/Target.td" // INSTRINFO-EMPTY: // INSTRINFO-NEXT: } // namespace llvm::MyTarget + +// INSTRINFO: { [[LOAD_STACK_GUARD_OPCODE]], 1, 1, 0, 0, 0, 0, [[LOAD_STACK_GUARD_OP_INDEX:[0-9]+]], MyTargetImpOpBase + 0, 0|(1ULL<; +defm : RemapAllTargetPseudoPointerOperands; + def MyTargetISA : InstrInfo; def MyTarget : Target { let InstructionSet = MyTargetISA; } diff --git a/llvm/test/TableGen/def-multiple-operands.td b/llvm/test/TableGen/def-multiple-operands.td index 5d215056920e8..dc5ea09eff9ba 100644 --- a/llvm/test/TableGen/def-multiple-operands.td +++ b/llvm/test/TableGen/def-multiple-operands.td @@ -35,3 +35,5 @@ def InstA : Instruction { field bits<8> SoftFail = 0; let hasSideEffects = false; } + +defm : RemapAllTargetPseudoPointerOperands; diff --git a/llvm/test/TableGen/get-named-operand-idx.td b/llvm/test/TableGen/get-named-operand-idx.td index b3569510dd6fc..7982822c0a895 100644 --- a/llvm/test/TableGen/get-named-operand-idx.td +++ b/llvm/test/TableGen/get-named-operand-idx.td @@ -48,6 +48,8 @@ def InstD : InstBase { let UseNamedOperandTable = 0; } +defm : RemapAllTargetPseudoPointerOperands; + // CHECK-LABEL: #ifdef GET_INSTRINFO_OPERAND_ENUM // CHECK-NEXT: #undef GET_INSTRINFO_OPERAND_ENUM // CHECK-EMPTY: diff --git a/llvm/test/TableGen/get-operand-type-no-expand.td b/llvm/test/TableGen/get-operand-type-no-expand.td index a0a8fa957f9b6..fcaf3684528b2 100644 --- a/llvm/test/TableGen/get-operand-type-no-expand.td +++ b/llvm/test/TableGen/get-operand-type-no-expand.td @@ -46,3 +46,5 @@ def InstA : Instruction { // CHECK-NOEXPAND: /* InstA */ // CHECK-NOEXPAND-NEXT: i512complex, i8complex, i32imm, // CHECK-NOEXPAND: #endif // GET_INSTRINFO_OPERAND_TYPE + +defm : RemapAllTargetPseudoPointerOperands; diff --git a/llvm/test/TableGen/get-operand-type.td b/llvm/test/TableGen/get-operand-type.td index b2f63cafd6a89..49fbb63ac5974 100644 --- a/llvm/test/TableGen/get-operand-type.td +++ b/llvm/test/TableGen/get-operand-type.td @@ -18,6 +18,8 @@ def OpB : Operand; def RegOp : RegisterOperand; +defm : RemapAllTargetPseudoPointerOperands; + def InstA : Instruction { let Size = 1; let OutOperandList = (outs OpA:$a); diff --git a/llvm/test/TableGen/target-specialized-pseudos.td b/llvm/test/TableGen/target-specialized-pseudos.td index 99c63f3ec29d9..3953a36101fe0 100644 --- a/llvm/test/TableGen/target-specialized-pseudos.td +++ b/llvm/test/TableGen/target-specialized-pseudos.td @@ -1,6 +1,11 @@ -// RUN: llvm-tblgen -gen-instr-info -I %p/../../include %s -DONECASE -o - | FileCheck -check-prefixes=CHECK,ONECASE %s // RUN: llvm-tblgen -gen-instr-info -I %p/../../include %s -DALLCASES -o - | FileCheck -check-prefixes=CHECK,ALLCASES %s -// RUN: not llvm-tblgen -gen-instr-info -I %p/../../include %s -DERROR -o /dev/null 2>&1 | FileCheck -check-prefix=ERROR %s +// RUN: not llvm-tblgen -gen-instr-info -I %p/../../include %s -DONECASE -o /dev/null 2>&1 | FileCheck -check-prefixes=ERROR-MISSING %s +// RUN: not llvm-tblgen -gen-instr-info -I %p/../../include %s -DMULTIPLE_OVERRIDE_ERROR -o /dev/null 2>&1 | FileCheck -implicit-check-not=error: -check-prefix=MULTIPLE-OVERRIDE-ERROR %s +// RUN: not llvm-tblgen -gen-instr-info -I %p/../../include %s -DALLCASES -DERROR_NONPSEUDO -o /dev/null 2>&1 | FileCheck -implicit-check-not=error: -check-prefix=ERROR-NONPSEUDO %s + + +// def PREALLOCATED_ARG : StandardPseudoInstruction { + // CHECK: namespace llvm::MyTarget { // CHECK: enum { @@ -20,8 +25,6 @@ // CHECK-NEXT: { [[MY_MOV_OPCODE]], 2, 1, 2, 0, 0, 0, {{[0-9]+}}, MyTargetImpOpBase + 0, 0|(1ULL<; #endif -#ifdef ERROR +#ifdef MULTIPLE_OVERRIDE_ERROR def MY_LOAD_STACK_GUARD_0 : TargetSpecializedStandardPseudoInstruction; -// ERROR: :[[@LINE+1]]:5: error: multiple overrides of 'LOAD_STACK_GUARD' defined +// MULTIPLE-OVERRIDE-ERROR: :[[@LINE+1]]:5: error: multiple overrides of 'LOAD_STACK_GUARD' defined def MY_LOAD_STACK_GUARD_1 : TargetSpecializedStandardPseudoInstruction; #endif +#ifdef ERROR_NONPSEUDO + +// FIXME: Double error +// ERROR-NONPSEUDO: [[@LINE+2]]:5: error: non-pseudoinstruction user of PointerLikeRegClass +// ERROR-NONPSEUDO: [[@LINE+1]]:5: error: non-pseudoinstruction user of PointerLikeRegClass +def NON_PSEUDO : TestInstruction { + let OutOperandList = (outs XRegs:$dst); + let InOperandList = (ins ptr_rc:$src); + let AsmString = "non_pseudo $dst, $src"; +} + +#endif + def MY_MOV : TestInstruction { let OutOperandList = (outs XRegs:$dst); let InOperandList = (ins XRegs:$src); diff --git a/llvm/test/Transforms/AggressiveInstCombine/umulh_carry.ll b/llvm/test/Transforms/AggressiveInstCombine/umulh_carry.ll new file mode 100644 index 0000000000000..b78095cac0df9 --- /dev/null +++ b/llvm/test/Transforms/AggressiveInstCombine/umulh_carry.ll @@ -0,0 +1,755 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=aggressive-instcombine,instcombine -S | FileCheck %s + +; Carry variant of mul-high. https://alive2.llvm.org/ce/z/G2bD6o +define i32 @mul_carry(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[X]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[Y]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 32 +; CHECK-NEXT: [[ADD11:%.*]] = trunc nuw i64 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[ADD11]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 65535 + %shr1 = lshr i32 %y, 16 + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %shr, %and2 + %mul3 = mul nuw i32 %and, %shr1 + %add = add i32 %mul, %mul3 + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul4, 16 + %add6 = add i32 %add, %shr5 + %cmp = icmp ult i32 %add6, %mul + %cond = select i1 %cmp, i32 65536, i32 0 + %mul8 = mul nuw i32 %shr, %shr1 + %add9 = add nuw i32 %mul8, %cond + %shr10 = lshr i32 %add6, 16 + %add11 = add i32 %add9, %shr10 + ret i32 %add11 +} + +; Carry variant of mul-high. https://alive2.llvm.org/ce/z/G2bD6o +define i128 @mul_carry_i128(i128 %x, i128 %y) { +; CHECK-LABEL: define i128 @mul_carry_i128( +; CHECK-SAME: i128 [[X:%.*]], i128 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = zext i128 [[X]] to i256 +; CHECK-NEXT: [[TMP1:%.*]] = zext i128 [[Y]] to i256 +; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i256 [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = lshr i256 [[TMP2]], 128 +; CHECK-NEXT: [[ADD11:%.*]] = trunc nuw i256 [[TMP3]] to i128 +; CHECK-NEXT: ret i128 [[ADD11]] +; +entry: + %shr = lshr i128 %x, 64 + %and = and i128 %x, u0xffffffffffffffff + %shr1 = lshr i128 %y, 64 + %and2 = and i128 %y, u0xffffffffffffffff + %mul = mul nuw i128 %shr, %and2 + %mul3 = mul nuw i128 %and, %shr1 + %add = add i128 %mul, %mul3 + %mul4 = mul nuw i128 %and, %and2 + %shr5 = lshr i128 %mul4, 64 + %add6 = add i128 %add, %shr5 + %cmp = icmp ult i128 %add6, %mul + %cond = select i1 %cmp, i128 u0x10000000000000000, i128 0 + %mul8 = mul nuw i128 %shr, %shr1 + %add9 = add nuw i128 %mul8, %cond + %shr10 = lshr i128 %add6, 64 + %add11 = add i128 %add9, %shr10 + ret i128 %add11 +} + +; Carry variant of mul-high. https://alive2.llvm.org/ce/z/G2bD6o +define <4 x i32> @mul_carry_v4i32(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: define <4 x i32> @mul_carry_v4i32( +; CHECK-SAME: <4 x i32> [[X:%.*]], <4 x i32> [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = zext <4 x i32> [[X]] to <4 x i64> +; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i32> [[Y]] to <4 x i64> +; CHECK-NEXT: [[TMP2:%.*]] = mul nuw <4 x i64> [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = lshr <4 x i64> [[TMP2]], splat (i64 32) +; CHECK-NEXT: [[ADD11:%.*]] = trunc nuw <4 x i64> [[TMP3]] to <4 x i32> +; CHECK-NEXT: ret <4 x i32> [[ADD11]] +; +entry: + %shr = lshr <4 x i32> %x, + %and = and <4 x i32> %x, + %shr1 = lshr <4 x i32> %y, + %and2 = and <4 x i32> %y, + %mul = mul nuw <4 x i32> %shr, %and2 + %mul3 = mul nuw <4 x i32> %and, %shr1 + %add = add <4 x i32> %mul, %mul3 + %mul4 = mul nuw <4 x i32> %and, %and2 + %shr5 = lshr <4 x i32> %mul4, + %add6 = add <4 x i32> %add, %shr5 + %cmp = icmp ult <4 x i32> %add6, %mul + %cond = select <4 x i1> %cmp, <4 x i32> , <4 x i32> zeroinitializer + %mul8 = mul nuw <4 x i32> %shr, %shr1 + %add9 = add nuw <4 x i32> %mul8, %cond + %shr10 = lshr <4 x i32> %add6, + %add11 = add <4 x i32> %add9, %shr10 + ret <4 x i32> %add11 +} + +; Check carry against xlyh, not xhyl +define i32 @mul_carry_xlyh(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry_xlyh( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[Y]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[X]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 32 +; CHECK-NEXT: [[ADD11:%.*]] = trunc nuw i64 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[ADD11]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 65535 + %shr1 = lshr i32 %y, 16 + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %shr, %and2 + %mul3 = mul nuw i32 %and, %shr1 + %add = add i32 %mul, %mul3 + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul4, 16 + %add6 = add i32 %add, %shr5 + %cmp = icmp ult i32 %add6, %mul3 + %cond = select i1 %cmp, i32 65536, i32 0 + %mul8 = mul nuw i32 %shr, %shr1 + %add9 = add nuw i32 %mul8, %cond + %shr10 = lshr i32 %add6, 16 + %add11 = add i32 %add9, %shr10 + ret i32 %add11 +} + +define i32 @mul_carry_comm(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry_comm( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[X]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[Y]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 32 +; CHECK-NEXT: [[ADD11:%.*]] = trunc nuw i64 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[ADD11]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 65535 + %shr1 = lshr i32 %y, 16 + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %and2, %shr + %mul3 = mul nuw i32 %shr1, %and + %add = add i32 %mul3, %mul + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul4, 16 + %add6 = add i32 %shr5, %add + %cmp = icmp ult i32 %add6, %mul + %cond = select i1 %cmp, i32 65536, i32 0 + %mul8 = mul nuw i32 %shr, %shr1 + %shr10 = lshr i32 %add6, 16 + %add9 = add nuw i32 %cond, %shr10 + %add11 = add i32 %add9, %mul8 + ret i32 %add11 +} + + +; Negative tests + + +define i32 @mul_carry_notxlo(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry_notxlo( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 32767 +; CHECK-NEXT: [[SHR1:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[SHR]], [[AND2]] +; CHECK-NEXT: [[MUL3:%.*]] = mul nuw nsw i32 [[AND]], [[SHR1]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[MUL]], [[MUL3]] +; CHECK-NEXT: [[MUL4:%.*]] = mul nuw nsw i32 [[AND]], [[AND2]] +; CHECK-NEXT: [[SHR5:%.*]] = lshr i32 [[MUL4]], 16 +; CHECK-NEXT: [[ADD6:%.*]] = add i32 [[ADD]], [[SHR5]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD6]], [[MUL]] +; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 65536, i32 0 +; CHECK-NEXT: [[MUL8:%.*]] = mul nuw i32 [[SHR]], [[SHR1]] +; CHECK-NEXT: [[ADD9:%.*]] = add nuw i32 [[MUL8]], [[COND]] +; CHECK-NEXT: [[SHR10:%.*]] = lshr i32 [[ADD6]], 16 +; CHECK-NEXT: [[ADD11:%.*]] = add i32 [[ADD9]], [[SHR10]] +; CHECK-NEXT: ret i32 [[ADD11]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 32767 ; wrong mask + %shr1 = lshr i32 %y, 16 + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %shr, %and2 + %mul3 = mul nuw i32 %and, %shr1 + %add = add i32 %mul, %mul3 + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul4, 16 + %add6 = add i32 %add, %shr5 + %cmp = icmp ult i32 %add6, %mul + %cond = select i1 %cmp, i32 65536, i32 0 + %mul8 = mul nuw i32 %shr, %shr1 + %add9 = add nuw i32 %mul8, %cond + %shr10 = lshr i32 %add6, 16 + %add11 = add i32 %add9, %shr10 + ret i32 %add11 +} + +define i32 @mul_carry_notyhi(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry_notyhi( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[SHR1:%.*]] = lshr i32 [[Y]], 14 +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[SHR]], [[AND2]] +; CHECK-NEXT: [[MUL3:%.*]] = mul nuw i32 [[AND]], [[SHR1]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[MUL]], [[MUL3]] +; CHECK-NEXT: [[MUL4:%.*]] = mul nuw i32 [[AND]], [[AND2]] +; CHECK-NEXT: [[SHR5:%.*]] = lshr i32 [[MUL4]], 16 +; CHECK-NEXT: [[ADD6:%.*]] = add i32 [[ADD]], [[SHR5]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD6]], [[MUL]] +; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 65536, i32 0 +; CHECK-NEXT: [[MUL8:%.*]] = mul nuw i32 [[SHR]], [[SHR1]] +; CHECK-NEXT: [[ADD9:%.*]] = add nuw i32 [[MUL8]], [[COND]] +; CHECK-NEXT: [[SHR10:%.*]] = lshr i32 [[ADD6]], 16 +; CHECK-NEXT: [[ADD11:%.*]] = add i32 [[ADD9]], [[SHR10]] +; CHECK-NEXT: ret i32 [[ADD11]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 65535 + %shr1 = lshr i32 %y, 14 ; wring shift + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %shr, %and2 + %mul3 = mul nuw i32 %and, %shr1 + %add = add i32 %mul, %mul3 + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul4, 16 + %add6 = add i32 %add, %shr5 + %cmp = icmp ult i32 %add6, %mul + %cond = select i1 %cmp, i32 65536, i32 0 + %mul8 = mul nuw i32 %shr, %shr1 + %add9 = add nuw i32 %mul8, %cond + %shr10 = lshr i32 %add6, 16 + %add11 = add i32 %add9, %shr10 + ret i32 %add11 +} + +define i32 @mul_carry_notcarry(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry_notcarry( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[SHR1:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[SHR]], [[AND2]] +; CHECK-NEXT: [[MUL3:%.*]] = mul nuw i32 [[AND]], [[SHR1]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[MUL]], [[MUL3]] +; CHECK-NEXT: [[MUL4:%.*]] = mul nuw i32 [[AND]], [[AND2]] +; CHECK-NEXT: [[SHR5:%.*]] = lshr i32 [[MUL4]], 16 +; CHECK-NEXT: [[ADD6:%.*]] = add i32 [[ADD]], [[SHR5]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD6]], [[MUL]] +; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 0, i32 65536 +; CHECK-NEXT: [[MUL8:%.*]] = mul nuw i32 [[SHR]], [[SHR1]] +; CHECK-NEXT: [[ADD9:%.*]] = add nuw i32 [[MUL8]], [[COND]] +; CHECK-NEXT: [[SHR10:%.*]] = lshr i32 [[ADD6]], 16 +; CHECK-NEXT: [[ADD11:%.*]] = add i32 [[ADD9]], [[SHR10]] +; CHECK-NEXT: ret i32 [[ADD11]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 65535 + %shr1 = lshr i32 %y, 16 + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %shr, %and2 + %mul3 = mul nuw i32 %and, %shr1 + %add = add i32 %mul, %mul3 + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul4, 16 + %add6 = add i32 %add, %shr5 + %cmp = icmp ult i32 %add6, %mul + %cond = select i1 %cmp, i32 0, i32 65536 ; backwards + %mul8 = mul nuw i32 %shr, %shr1 + %add9 = add nuw i32 %mul8, %cond + %shr10 = lshr i32 %add6, 16 + %add11 = add i32 %add9, %shr10 + ret i32 %add11 +} + +define i32 @mul_carry_notlolo(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry_notlolo( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[SHR1:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[SHR]], [[AND2]] +; CHECK-NEXT: [[MUL3:%.*]] = mul nuw i32 [[AND]], [[SHR1]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[MUL]], [[MUL3]] +; CHECK-NEXT: [[SHR5:%.*]] = lshr i32 [[MUL]], 16 +; CHECK-NEXT: [[ADD6:%.*]] = add i32 [[ADD]], [[SHR5]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD6]], [[MUL]] +; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 65536, i32 0 +; CHECK-NEXT: [[MUL8:%.*]] = mul nuw i32 [[SHR]], [[SHR1]] +; CHECK-NEXT: [[ADD9:%.*]] = add nuw i32 [[MUL8]], [[COND]] +; CHECK-NEXT: [[SHR10:%.*]] = lshr i32 [[ADD6]], 16 +; CHECK-NEXT: [[ADD11:%.*]] = add i32 [[ADD9]], [[SHR10]] +; CHECK-NEXT: ret i32 [[ADD11]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 65535 + %shr1 = lshr i32 %y, 16 + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %shr, %and2 + %mul3 = mul nuw i32 %and, %shr1 + %add = add i32 %mul, %mul3 + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul, 16 + %add6 = add i32 %add, %shr5 + %cmp = icmp ult i32 %add6, %mul + %cond = select i1 %cmp, i32 65536, i32 0 + %mul8 = mul nuw i32 %shr, %shr1 + %add9 = add nuw i32 %mul8, %cond + %shr10 = lshr i32 %add6, 16 + %add11 = add i32 %add9, %shr10 + ret i32 %add11 +} + +define i32 @mul_carry_nothihi(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry_nothihi( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[SHR1:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[SHR]], [[AND2]] +; CHECK-NEXT: [[MUL3:%.*]] = mul nuw i32 [[AND]], [[SHR1]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[MUL]], [[MUL3]] +; CHECK-NEXT: [[MUL4:%.*]] = mul nuw i32 [[AND]], [[AND2]] +; CHECK-NEXT: [[SHR5:%.*]] = lshr i32 [[MUL4]], 16 +; CHECK-NEXT: [[ADD6:%.*]] = add i32 [[ADD]], [[SHR5]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD6]], [[MUL]] +; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 65536, i32 0 +; CHECK-NEXT: [[ADD9:%.*]] = add nuw i32 [[MUL4]], [[COND]] +; CHECK-NEXT: [[SHR10:%.*]] = lshr i32 [[ADD6]], 16 +; CHECK-NEXT: [[ADD11:%.*]] = add i32 [[ADD9]], [[SHR10]] +; CHECK-NEXT: ret i32 [[ADD11]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 65535 + %shr1 = lshr i32 %y, 16 + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %shr, %and2 + %mul3 = mul nuw i32 %and, %shr1 + %add = add i32 %mul, %mul3 + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul4, 16 + %add6 = add i32 %add, %shr5 + %cmp = icmp ult i32 %add6, %mul + %cond = select i1 %cmp, i32 65536, i32 0 + %mul8 = mul nuw i32 %shr, %shr1 + %add9 = add nuw i32 %mul4, %cond + %shr10 = lshr i32 %add6, 16 + %add11 = add i32 %add9, %shr10 + ret i32 %add11 +} + +; Extra uses +define i32 @mul_carry_use_carry(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry_use_carry( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[SHR1:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[SHR]], [[AND2]] +; CHECK-NEXT: [[MUL3:%.*]] = mul nuw i32 [[AND]], [[SHR1]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[MUL]], [[MUL3]] +; CHECK-NEXT: [[MUL4:%.*]] = mul nuw i32 [[AND]], [[AND2]] +; CHECK-NEXT: [[SHR5:%.*]] = lshr i32 [[MUL4]], 16 +; CHECK-NEXT: [[ADD6:%.*]] = add i32 [[ADD]], [[SHR5]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD6]], [[MUL]] +; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 65536, i32 0 +; CHECK-NEXT: [[MUL8:%.*]] = mul nuw i32 [[SHR]], [[SHR1]] +; CHECK-NEXT: [[ADD9:%.*]] = add nuw i32 [[MUL8]], [[COND]] +; CHECK-NEXT: [[SHR10:%.*]] = lshr i32 [[ADD6]], 16 +; CHECK-NEXT: [[ADD11:%.*]] = add i32 [[ADD9]], [[SHR10]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i32 [[COND]]) +; CHECK-NEXT: ret i32 [[ADD11]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 65535 + %shr1 = lshr i32 %y, 16 + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %shr, %and2 + %mul3 = mul nuw i32 %and, %shr1 + %add = add i32 %mul, %mul3 + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul4, 16 + %add6 = add i32 %add, %shr5 + %cmp = icmp ult i32 %add6, %mul + %cond = select i1 %cmp, i32 65536, i32 0 + %mul8 = mul nuw i32 %shr, %shr1 + %add9 = add nuw i32 %mul8, %cond + %shr10 = lshr i32 %add6, 16 + %add11 = add i32 %add9, %shr10 + call void (...) @llvm.fake.use(i32 %cond) + ret i32 %add11 +} + +define i32 @mul_carry_use_mulhi(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry_use_mulhi( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[SHR1:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[SHR]], [[AND2]] +; CHECK-NEXT: [[MUL3:%.*]] = mul nuw i32 [[AND]], [[SHR1]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[MUL]], [[MUL3]] +; CHECK-NEXT: [[MUL4:%.*]] = mul nuw i32 [[AND]], [[AND2]] +; CHECK-NEXT: [[SHR5:%.*]] = lshr i32 [[MUL4]], 16 +; CHECK-NEXT: [[ADD6:%.*]] = add i32 [[ADD]], [[SHR5]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD6]], [[MUL]] +; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 65536, i32 0 +; CHECK-NEXT: [[MUL8:%.*]] = mul nuw i32 [[SHR]], [[SHR1]] +; CHECK-NEXT: [[ADD9:%.*]] = add nuw i32 [[MUL8]], [[COND]] +; CHECK-NEXT: [[SHR10:%.*]] = lshr i32 [[ADD6]], 16 +; CHECK-NEXT: [[ADD11:%.*]] = add i32 [[ADD9]], [[SHR10]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i32 [[MUL8]]) +; CHECK-NEXT: ret i32 [[ADD11]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 65535 + %shr1 = lshr i32 %y, 16 + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %shr, %and2 + %mul3 = mul nuw i32 %and, %shr1 + %add = add i32 %mul, %mul3 + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul4, 16 + %add6 = add i32 %add, %shr5 + %cmp = icmp ult i32 %add6, %mul + %cond = select i1 %cmp, i32 65536, i32 0 + %mul8 = mul nuw i32 %shr, %shr1 + %add9 = add nuw i32 %mul8, %cond + %shr10 = lshr i32 %add6, 16 + %add11 = add i32 %add9, %shr10 + call void (...) @llvm.fake.use(i32 %mul8) + ret i32 %add11 +} + +define i32 @mul_carry_use_llh(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry_use_llh( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[ADD6:%.*]] = mul nuw i32 [[AND]], [[AND2]] +; CHECK-NEXT: [[SHR10:%.*]] = lshr i32 [[ADD6]], 16 +; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[X]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[Y]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 32 +; CHECK-NEXT: [[ADD11:%.*]] = trunc nuw i64 [[TMP3]] to i32 +; CHECK-NEXT: call void (...) @llvm.fake.use(i32 [[SHR10]]) +; CHECK-NEXT: ret i32 [[ADD11]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 65535 + %shr1 = lshr i32 %y, 16 + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %shr, %and2 + %mul3 = mul nuw i32 %and, %shr1 + %add = add i32 %mul, %mul3 + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul4, 16 + %add6 = add i32 %add, %shr5 + %cmp = icmp ult i32 %add6, %mul + %cond = select i1 %cmp, i32 65536, i32 0 + %mul8 = mul nuw i32 %shr, %shr1 + %add9 = add nuw i32 %mul8, %cond + %shr10 = lshr i32 %add6, 16 + %add11 = add i32 %add9, %shr10 + call void (...) @llvm.fake.use(i32 %shr5) + ret i32 %add11 +} + +define i32 @mul_carry_use_mulll(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry_use_mulll( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[MUL4:%.*]] = mul nuw i32 [[AND]], [[AND2]] +; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[X]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[Y]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 32 +; CHECK-NEXT: [[ADD11:%.*]] = trunc nuw i64 [[TMP3]] to i32 +; CHECK-NEXT: call void (...) @llvm.fake.use(i32 [[MUL4]]) +; CHECK-NEXT: ret i32 [[ADD11]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 65535 + %shr1 = lshr i32 %y, 16 + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %shr, %and2 + %mul3 = mul nuw i32 %and, %shr1 + %add = add i32 %mul, %mul3 + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul4, 16 + %add6 = add i32 %add, %shr5 + %cmp = icmp ult i32 %add6, %mul + %cond = select i1 %cmp, i32 65536, i32 0 + %mul8 = mul nuw i32 %shr, %shr1 + %add9 = add nuw i32 %mul8, %cond + %shr10 = lshr i32 %add6, 16 + %add11 = add i32 %add9, %shr10 + call void (...) @llvm.fake.use(i32 %mul4) + ret i32 %add11 +} + +define i32 @mul_carry_use_mullh(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry_use_mullh( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[SHR1:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[SHR]], [[AND2]] +; CHECK-NEXT: [[MUL3:%.*]] = mul nuw i32 [[AND]], [[SHR1]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[MUL]], [[MUL3]] +; CHECK-NEXT: [[MUL4:%.*]] = mul nuw i32 [[AND]], [[AND2]] +; CHECK-NEXT: [[SHR5:%.*]] = lshr i32 [[MUL4]], 16 +; CHECK-NEXT: [[ADD6:%.*]] = add i32 [[ADD]], [[SHR5]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD6]], [[MUL]] +; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 65536, i32 0 +; CHECK-NEXT: [[MUL8:%.*]] = mul nuw i32 [[SHR]], [[SHR1]] +; CHECK-NEXT: [[ADD9:%.*]] = add nuw i32 [[MUL8]], [[COND]] +; CHECK-NEXT: [[SHR10:%.*]] = lshr i32 [[ADD6]], 16 +; CHECK-NEXT: [[ADD11:%.*]] = add i32 [[ADD9]], [[SHR10]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i32 [[MUL3]]) +; CHECK-NEXT: ret i32 [[ADD11]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 65535 + %shr1 = lshr i32 %y, 16 + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %shr, %and2 + %mul3 = mul nuw i32 %and, %shr1 + %add = add i32 %mul, %mul3 + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul4, 16 + %add6 = add i32 %add, %shr5 + %cmp = icmp ult i32 %add6, %mul + %cond = select i1 %cmp, i32 65536, i32 0 + %mul8 = mul nuw i32 %shr, %shr1 + %add9 = add nuw i32 %mul8, %cond + %shr10 = lshr i32 %add6, 16 + %add11 = add i32 %add9, %shr10 + call void (...) @llvm.fake.use(i32 %mul3) + ret i32 %add11 +} + +define i32 @mul_carry_use_mulhl(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry_use_mulhl( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[SHR1:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[SHR]], [[AND2]] +; CHECK-NEXT: [[MUL3:%.*]] = mul nuw i32 [[AND]], [[SHR1]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[MUL]], [[MUL3]] +; CHECK-NEXT: [[MUL4:%.*]] = mul nuw i32 [[AND]], [[AND2]] +; CHECK-NEXT: [[SHR5:%.*]] = lshr i32 [[MUL4]], 16 +; CHECK-NEXT: [[ADD6:%.*]] = add i32 [[ADD]], [[SHR5]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD6]], [[MUL]] +; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 65536, i32 0 +; CHECK-NEXT: [[MUL8:%.*]] = mul nuw i32 [[SHR]], [[SHR1]] +; CHECK-NEXT: [[ADD9:%.*]] = add nuw i32 [[MUL8]], [[COND]] +; CHECK-NEXT: [[SHR10:%.*]] = lshr i32 [[ADD6]], 16 +; CHECK-NEXT: [[ADD11:%.*]] = add i32 [[ADD9]], [[SHR10]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i32 [[MUL]]) +; CHECK-NEXT: ret i32 [[ADD11]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 65535 + %shr1 = lshr i32 %y, 16 + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %shr, %and2 + %mul3 = mul nuw i32 %and, %shr1 + %add = add i32 %mul, %mul3 + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul4, 16 + %add6 = add i32 %add, %shr5 + %cmp = icmp ult i32 %add6, %mul + %cond = select i1 %cmp, i32 65536, i32 0 + %mul8 = mul nuw i32 %shr, %shr1 + %add9 = add nuw i32 %mul8, %cond + %shr10 = lshr i32 %add6, 16 + %add11 = add i32 %add9, %shr10 + call void (...) @llvm.fake.use(i32 %mul) + ret i32 %add11 +} + +define i32 @mul_carry_use_crosssum(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry_use_crosssum( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[SHR1:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[ADD9:%.*]] = mul nuw i32 [[SHR]], [[AND2]] +; CHECK-NEXT: [[SHR10:%.*]] = mul nuw i32 [[AND]], [[SHR1]] +; CHECK-NEXT: [[ADD11:%.*]] = add i32 [[ADD9]], [[SHR10]] +; CHECK-NEXT: [[MUL4:%.*]] = mul nuw i32 [[AND]], [[AND2]] +; CHECK-NEXT: [[SHR5:%.*]] = lshr i32 [[MUL4]], 16 +; CHECK-NEXT: [[ADD6:%.*]] = add i32 [[ADD11]], [[SHR5]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD6]], [[ADD9]] +; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 65536, i32 0 +; CHECK-NEXT: [[MUL8:%.*]] = mul nuw i32 [[SHR]], [[SHR1]] +; CHECK-NEXT: [[ADD10:%.*]] = add nuw i32 [[MUL8]], [[COND]] +; CHECK-NEXT: [[SHR11:%.*]] = lshr i32 [[ADD6]], 16 +; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[ADD10]], [[SHR11]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i32 [[ADD11]]) +; CHECK-NEXT: ret i32 [[TMP4]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 65535 + %shr1 = lshr i32 %y, 16 + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %shr, %and2 + %mul3 = mul nuw i32 %and, %shr1 + %add = add i32 %mul, %mul3 + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul4, 16 + %add6 = add i32 %add, %shr5 + %cmp = icmp ult i32 %add6, %mul + %cond = select i1 %cmp, i32 65536, i32 0 + %mul8 = mul nuw i32 %shr, %shr1 + %add9 = add nuw i32 %mul8, %cond + %shr10 = lshr i32 %add6, 16 + %add11 = add i32 %add9, %shr10 + call void (...) @llvm.fake.use(i32 %add) + ret i32 %add11 +} + +define i32 @mul_carry_use_lowaccumhi(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry_use_lowaccumhi( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[SHR1:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[SHR]], [[AND2]] +; CHECK-NEXT: [[MUL3:%.*]] = mul nuw i32 [[AND]], [[SHR1]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[MUL]], [[MUL3]] +; CHECK-NEXT: [[ADD6:%.*]] = mul nuw i32 [[AND]], [[AND2]] +; CHECK-NEXT: [[SHR10:%.*]] = lshr i32 [[ADD6]], 16 +; CHECK-NEXT: [[ADD7:%.*]] = add i32 [[ADD]], [[SHR10]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD7]], [[MUL]] +; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 65536, i32 0 +; CHECK-NEXT: [[MUL8:%.*]] = mul nuw i32 [[SHR]], [[SHR1]] +; CHECK-NEXT: [[ADD9:%.*]] = add nuw i32 [[MUL8]], [[COND]] +; CHECK-NEXT: [[SHR11:%.*]] = lshr i32 [[ADD7]], 16 +; CHECK-NEXT: [[ADD11:%.*]] = add i32 [[ADD9]], [[SHR11]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i32 [[SHR11]]) +; CHECK-NEXT: ret i32 [[ADD11]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 65535 + %shr1 = lshr i32 %y, 16 + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %shr, %and2 + %mul3 = mul nuw i32 %and, %shr1 + %add = add i32 %mul, %mul3 + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul4, 16 + %add6 = add i32 %add, %shr5 + %cmp = icmp ult i32 %add6, %mul + %cond = select i1 %cmp, i32 65536, i32 0 + %mul8 = mul nuw i32 %shr, %shr1 + %add9 = add nuw i32 %mul8, %cond + %shr10 = lshr i32 %add6, 16 + %add11 = add i32 %add9, %shr10 + call void (...) @llvm.fake.use(i32 %shr10) + ret i32 %add11 +} + +define i32 @mul_carry_use_lowaccum(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_carry_use_lowaccum( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[SHR1:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[SHR]], [[AND2]] +; CHECK-NEXT: [[MUL3:%.*]] = mul nuw i32 [[AND]], [[SHR1]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[MUL]], [[MUL3]] +; CHECK-NEXT: [[MUL4:%.*]] = mul nuw i32 [[AND]], [[AND2]] +; CHECK-NEXT: [[SHR5:%.*]] = lshr i32 [[MUL4]], 16 +; CHECK-NEXT: [[ADD6:%.*]] = add i32 [[ADD]], [[SHR5]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD6]], [[MUL]] +; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 65536, i32 0 +; CHECK-NEXT: [[MUL8:%.*]] = mul nuw i32 [[SHR]], [[SHR1]] +; CHECK-NEXT: [[ADD9:%.*]] = add nuw i32 [[MUL8]], [[COND]] +; CHECK-NEXT: [[SHR10:%.*]] = lshr i32 [[ADD6]], 16 +; CHECK-NEXT: [[ADD11:%.*]] = add i32 [[ADD9]], [[SHR10]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i32 [[ADD6]]) +; CHECK-NEXT: ret i32 [[ADD11]] +; +entry: + %shr = lshr i32 %x, 16 + %and = and i32 %x, 65535 + %shr1 = lshr i32 %y, 16 + %and2 = and i32 %y, 65535 + %mul = mul nuw i32 %shr, %and2 + %mul3 = mul nuw i32 %and, %shr1 + %add = add i32 %mul, %mul3 + %mul4 = mul nuw i32 %and, %and2 + %shr5 = lshr i32 %mul4, 16 + %add6 = add i32 %add, %shr5 + %cmp = icmp ult i32 %add6, %mul + %cond = select i1 %cmp, i32 65536, i32 0 + %mul8 = mul nuw i32 %shr, %shr1 + %add9 = add nuw i32 %mul8, %cond + %shr10 = lshr i32 %add6, 16 + %add11 = add i32 %add9, %shr10 + call void (...) @llvm.fake.use(i32 %add6) + ret i32 %add11 +} diff --git a/llvm/test/Transforms/AggressiveInstCombine/umulh_carry4.ll b/llvm/test/Transforms/AggressiveInstCombine/umulh_carry4.ll new file mode 100644 index 0000000000000..fa21721f17762 --- /dev/null +++ b/llvm/test/Transforms/AggressiveInstCombine/umulh_carry4.ll @@ -0,0 +1,3019 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=aggressive-instcombine,instcombine -S | FileCheck %s + +; https://alive2.llvm.org/ce/z/KuJPnU +define i64 @umulh(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP5:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[TMP4:%.*]] = trunc nuw i128 [[TMP5]] to i64 +; CHECK-NEXT: ret i64 [[TMP4]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +; Commutative ops should match in any order. Ops where operand order has been +; reversed from above are marked 'commuted'. As per instcombine contributors +; guide, constants are always canonicalized to RHS, so don't bother commuting +; constants. +define i64 @umulh__commuted(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__commuted( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP5:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[TMP4:%.*]] = trunc nuw i128 [[TMP5]] to i64 +; CHECK-NEXT: ret i64 [[TMP4]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %x_hi, %y_lo ; commuted + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %x_lo, %y_hi ; commuted + %y_lo_x_lo = mul nuw i64 %x_lo, %y_lo ; commuted + + ; Add cross terms + %cross_sum = add i64 %y_lo_x_hi, %y_hi_x_lo ; commuted + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %y_lo_x_lo_hi, %cross_sum_lo ; commuted + + ; Final result accumulation + %intermediate = add nuw i64 %y_hi_x_hi, %cross_sum_hi ; commuted + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %carry, %intermediate ; commuted + %hw64 = add i64 %low_accum_hi, %intermediate_plus_carry ; commuted + + ret i64 %hw64 +} + +define i32 @mulh_src32(i32 %x, i32 %y) { + ; Extract low and high 16 bits +; CHECK-LABEL: define i32 @mulh_src32( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[X]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[Y]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i64 [[TMP3]], 32 +; CHECK-NEXT: [[TMP5:%.*]] = trunc nuw i64 [[TMP4]] to i32 +; CHECK-NEXT: ret i32 [[TMP5]] +; + %x_lo = and i32 %x, u0xffff ; x & 0xffffffff + %y_lo = and i32 %y, u0xffff ; y & 0xffffffff + %x_hi = lshr i32 %x, 16 ; x >> 16 + %y_hi = lshr i32 %y, 16 ; y >> 16 + + ; Cross products + %y_lo_x_hi = mul nuw i32 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i32 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i32 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i32 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i32 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i32 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i32 u0x10000, i32 0 ; if overflow, add 1 << 16 + + ; High 16 bits of low product + %y_lo_x_lo_hi = lshr i32 %y_lo_x_lo, 16 + + ; Low and high 16 bits of cross_sum + %cross_sum_lo = and i32 %cross_sum, u0xffff + %cross_sum_hi = lshr i32 %cross_sum, 16 + + %low_accum = add nuw nsw i32 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i32 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i32 %low_accum, 16 + %intermediate_plus_carry = add i32 %intermediate, %carry + %hw64 = add i32 %intermediate_plus_carry, %low_accum_hi + + ret i32 %hw64 +} + +define i128 @mulh_src128(i128 %x, i128 %y) { + ; Extract low and high 64 bits +; CHECK-LABEL: define i128 @mulh_src128( +; CHECK-SAME: i128 [[X:%.*]], i128 [[Y:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = zext i128 [[X]] to i256 +; CHECK-NEXT: [[TMP2:%.*]] = zext i128 [[Y]] to i256 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i256 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i256 [[TMP3]], 128 +; CHECK-NEXT: [[HW64:%.*]] = trunc nuw i256 [[TMP4]] to i128 +; CHECK-NEXT: ret i128 [[HW64]] +; + %x_lo = and i128 %x, u0xffffffffffffffff ; x & 0xffffffff + %y_lo = and i128 %y, u0xffffffffffffffff ; y & 0xffffffff + %x_hi = lshr i128 %x, 64 ; x >> 16 + %y_hi = lshr i128 %y, 64 ; y >> 16 + + ; Cross products + %y_lo_x_hi = mul nuw i128 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i128 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i128 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i128 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i128 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i128 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i128 u0x10000000000000000, i128 0 ; if overflow, add 1 << 16 + + ; High 16 bits of low product + %y_lo_x_lo_hi = lshr i128 %y_lo_x_lo, 64 + + ; Low and high 16 bits of cross_sum + %cross_sum_lo = and i128 %cross_sum, u0xffffffffffffffff + %cross_sum_hi = lshr i128 %cross_sum, 64 + + %low_accum = add nuw nsw i128 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i128 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i128 %low_accum, 64 + %intermediate_plus_carry = add i128 %intermediate, %carry + %hw64 = add i128 %intermediate_plus_carry, %low_accum_hi + + ret i128 %hw64 +} + +define <2 x i32> @mulh_v2i32(<2 x i32> %x, <2 x i32> %y) { + ; Extract low and high 16 bits +; CHECK-LABEL: define <2 x i32> @mulh_v2i32( +; CHECK-SAME: <2 x i32> [[X:%.*]], <2 x i32> [[Y:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i32> [[X]] to <2 x i64> +; CHECK-NEXT: [[TMP2:%.*]] = zext <2 x i32> [[Y]] to <2 x i64> +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw <2 x i64> [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr <2 x i64> [[TMP3]], splat (i64 32) +; CHECK-NEXT: [[HW64:%.*]] = trunc nuw <2 x i64> [[TMP4]] to <2 x i32> +; CHECK-NEXT: ret <2 x i32> [[HW64]] +; + %x_lo = and <2 x i32> %x, + %y_lo = and <2 x i32> %y, + %x_hi = lshr <2 x i32> %x, + %y_hi = lshr <2 x i32> %y, + + ; Cross products + %y_lo_x_hi = mul nuw <2 x i32> %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw <2 x i32> %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw <2 x i32> %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw <2 x i32> %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add <2 x i32> %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult <2 x i32> %cross_sum, %y_lo_x_hi + %carry = select <2 x i1> %carry_out, <2 x i32> , <2 x i32> + + ; High 16 bits of low product + %y_lo_x_lo_hi = lshr <2 x i32> %y_lo_x_lo, + + ; Low and high 16 bits of cross_sum + %cross_sum_lo = and <2 x i32> %cross_sum, + %cross_sum_hi = lshr <2 x i32> %cross_sum, + + %low_accum = add nuw nsw <2 x i32> %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw <2 x i32> %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr <2 x i32> %low_accum, + %intermediate_plus_carry = add <2 x i32> %intermediate, %carry + %hw64 = add <2 x i32> %intermediate_plus_carry, %low_accum_hi + + ret <2 x i32> %hw64 +} + +; https://alive2.llvm.org/ce/z/PPXtkR +define void @full_mul_int128(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP5:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[TMP4:%.*]] = trunc nuw i128 [[TMP5]] to i64 +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[TMP4]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[X]], [[Y]] +; CHECK-NEXT: store i64 [[TMP8]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + ; Store high 64 bits + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + ; Reconstruct low 64 bits + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + ; Store low 64 bits + store i64 %lw64, ptr %p, align 8 + + ret void +} + + +; Negative tests + +define i64 @umulh_notandx(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_notandx( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967294 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967294 ; x & 0xfffffffe + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +define i64 @umulh_notandy(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_notandy( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967294 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967294 ; y & 0xfffffffe + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +define i64 @umulh_notshiftx(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_notshiftx( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 16 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 16 ; x >> 16 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +define i64 @umulh_notshifty(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_notshifty( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 16 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 16 ; y >> 16 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +define i64 @umulh_notcarry(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_notcarry( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967295, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967295, i64 0 ; if overflow, add wrong value + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +define i64 @umulh_notxlo(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_notxlo( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x ; y_lo * x + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +define i64 @umulh_notcrosssum(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_notcrosssum( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = shl i64 [[Y_HI_X_LO]], 1 +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967294 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_hi_x_lo ; wrong crosssum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + + + +; Uses tests. + +; 'x_lo' can have more than 2 uses. +define i64 @umulh__mul_use__x_lo(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__mul_use__x_lo( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[X_LO]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[HW64:%.*]] = trunc nuw i128 [[TMP4]] to i64 +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + call void (...) @llvm.fake.use(i64 %x_lo) + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +; 'y_hi' can have more than 2 uses. +define i64 @umulh__mul_use__y_hi(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__mul_use__y_hi( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[Y_HI]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[HW64:%.*]] = trunc nuw i128 [[TMP4]] to i64 +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + call void (...) @llvm.fake.use(i64 %y_hi) + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +; 'y_hi * x_hi' must have no more than 2 uses. +define i64 @umulh__mul_use__y_lo_x_hi(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__mul_use__y_lo_x_hi( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[Y_LO_X_HI]]) +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + call void (...) @llvm.fake.use(i64 %y_lo_x_hi) + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +; 'y_hi * x_hi' must have single use. +define i64 @umulh__mul_use__y_hi_x_hi(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__mul_use__y_hi_x_hi( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[Y_HI_X_HI]]) +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + call void (...) @llvm.fake.use(i64 %y_hi_x_hi) + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +; 'y_hi * x_lo' must have single use. +define i64 @umulh__mul_use__y_hi_x_lo(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__mul_use__y_hi_x_lo( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[Y_HI_X_LO]]) +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + call void (...) @llvm.fake.use(i64 %y_hi_x_lo) + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +; 'y_lo * x_lo' has a single use if only doing high part of multiply and 2 uses +; when doing both low/high parts. Doing the optimization when only doing the +; high part and there's a 2nd unrelated use here still results in less +; instructions and is likely profitable, so this seems ok. +define i64 @umulh__mul_use__y_lo_x_lo(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__mul_use__y_lo_x_lo( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[Y_LO_X_LO]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[TMP5:%.*]] = trunc nuw i128 [[TMP4]] to i64 +; CHECK-NEXT: ret i64 [[TMP5]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + call void (...) @llvm.fake.use(i64 %y_lo_x_lo) + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +; 'cross_sum' must have no more than 3 uses. +define i64 @umulh__mul_use__cross_sum(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__mul_use__cross_sum( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[CROSS_SUM]]) +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + call void (...) @llvm.fake.use(i64 %cross_sum) + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +; 'carry_out' must have single use. +define i64 @umulh__mul_use__carry_out(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__mul_use__carry_out( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i1 [[CARRY_OUT]]) +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + call void (...) @llvm.fake.use(i1 %carry_out) + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +; 'carry' must have single use. +define i64 @umulh__mul_use__carry(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__mul_use__carry( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[CARRY]]) +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + call void (...) @llvm.fake.use(i64 %carry) + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +; 'y_lo_x_lo_hi' must have single use. +define i64 @umulh__mul_use__y_lo_x_lo_hi(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__mul_use__y_lo_x_lo_hi( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[Y_LO_X_LO_HI]]) +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + call void (...) @llvm.fake.use(i64 %y_lo_x_lo_hi) + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +; 'cross_sum_lo' must have single use. +define i64 @umulh__mul_use__cross_sum_lo(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__mul_use__cross_sum_lo( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[CROSS_SUM_LO]]) +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + call void (...) @llvm.fake.use(i64 %cross_sum_lo) + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +; 'cross_sum_hi' must have single use. +define i64 @umulh__mul_use__cross_sum_hi(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__mul_use__cross_sum_hi( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[CROSS_SUM_HI]]) +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + call void (...) @llvm.fake.use(i64 %cross_sum_hi) + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +; 'low_accum' has a single use if only doing high part of multiply and 2 uses +; when doing both low/high parts. Unrelated use here, but still seems +; profitable. +define i64 @umulh__mul_use__low_accum(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__mul_use__low_accum( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul i64 [[Y]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul i64 [[Y_HI]], [[X]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[LOW_ACCUM]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[TMP5:%.*]] = trunc nuw i128 [[TMP4]] to i64 +; CHECK-NEXT: ret i64 [[TMP5]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + call void (...) @llvm.fake.use(i64 %low_accum) + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +; 'intermediate' must have single use. +define i64 @umulh__mul_use__intermediate(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__mul_use__intermediate( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[INTERMEDIATE]]) +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + call void (...) @llvm.fake.use(i64 %intermediate) + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +; 'low_accum_hi' must have single use. +define i64 @umulh__mul_use__low_accum_hi(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__mul_use__low_accum_hi( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[LOW_ACCUM_HI]]) +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + call void (...) @llvm.fake.use(i64 %low_accum_hi) + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + + ret i64 %hw64 +} + +; 'intermediate_plus_carry' must have single use. +define i64 @umulh__mul_use__intermediate_plus_carry(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh__mul_use__intermediate_plus_carry( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[INTERMEDIATE:%.*]] = add nuw i64 [[CROSS_SUM_HI]], [[Y_HI_X_HI]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[INTERMEDIATE_PLUS_CARRY:%.*]] = add i64 [[INTERMEDIATE]], [[CARRY]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[INTERMEDIATE_PLUS_CARRY]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[INTERMEDIATE_PLUS_CARRY]]) +; CHECK-NEXT: ret i64 [[HW64]] +; + ; Extract low and high 32 bits + %x_lo = and i64 %x, 4294967295 ; x & 0xffffffff + %y_lo = and i64 %y, 4294967295 ; y & 0xffffffff + %x_hi = lshr i64 %x, 32 ; x >> 32 + %y_hi = lshr i64 %y, 32 ; y >> 32 + + ; Cross products + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi ; y_lo * x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi ; y_hi * x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo ; y_hi * x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo ; y_lo * x_lo + + ; Add cross terms + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi ; full 64-bit sum + + ; Carry if overflowed + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 ; if overflow, add 1 << 32 + + ; High 32 bits of low product + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + ; Low and high 32 bits of cross_sum + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + ; Final result accumulation + %intermediate = add nuw i64 %cross_sum_hi, %y_hi_x_hi + %low_accum_hi = lshr i64 %low_accum, 32 + %intermediate_plus_carry = add i64 %intermediate, %carry + %hw64 = add i64 %intermediate_plus_carry, %low_accum_hi + call void (...) @llvm.fake.use(i64 %intermediate_plus_carry) + + ret i64 %hw64 +} + + +; 'x_lo' can have multiple uses. +define void @full_mul_int128__mul_use__x_lo(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__x_lo( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[X_LO]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[HW64:%.*]] = trunc nuw i128 [[TMP4]] to i64 +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[HW64]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LW64:%.*]] = mul i64 [[X]], [[Y]] +; CHECK-NEXT: store i64 [[LW64]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + call void (...) @llvm.fake.use(i64 %x_lo) + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'y_lo' can have multiple uses. +define void @full_mul_int128__mul_use__y_lo(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__y_lo( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[Y_LO]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[HW64:%.*]] = trunc nuw i128 [[TMP4]] to i64 +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[HW64]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LW64:%.*]] = mul i64 [[X]], [[Y]] +; CHECK-NEXT: store i64 [[LW64]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + call void (...) @llvm.fake.use(i64 %y_lo) + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'x_hi' can have multiple uses. +define void @full_mul_int128__mul_use__x_hi(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__x_hi( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[X_HI]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[HW64:%.*]] = trunc nuw i128 [[TMP4]] to i64 +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[HW64]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LW64:%.*]] = mul i64 [[X]], [[Y]] +; CHECK-NEXT: store i64 [[LW64]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + call void (...) @llvm.fake.use(i64 %x_hi) + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'y_hi' can have multiple uses. +define void @full_mul_int128__mul_use__y_hi(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__y_hi( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[Y_HI]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[HW64:%.*]] = trunc nuw i128 [[TMP4]] to i64 +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[HW64]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LW64:%.*]] = mul i64 [[X]], [[Y]] +; CHECK-NEXT: store i64 [[LW64]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + call void (...) @llvm.fake.use(i64 %y_hi) + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'y_lo_x_hi' must have exactly 2 uses. +define void @full_mul_int128__mul_use__y_lo_x_hi(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__y_lo_x_hi( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[Y_LO_X_HI]]) +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[UPPER_MID:%.*]] = add nuw i64 [[Y_HI_X_HI]], [[CARRY]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[UPPER_MID_WITH_CROSS:%.*]] = add i64 [[UPPER_MID]], [[CROSS_SUM_HI]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[UPPER_MID_WITH_CROSS]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[HW64]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LOW_ACCUM_SHIFTED:%.*]] = shl i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[Y_LO_X_LO_LO:%.*]] = and i64 [[Y_LO_X_LO]], 4294967295 +; CHECK-NEXT: [[LW64:%.*]] = or disjoint i64 [[LOW_ACCUM_SHIFTED]], [[Y_LO_X_LO_LO]] +; CHECK-NEXT: store i64 [[LW64]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + call void (...) @llvm.fake.use(i64 %y_lo_x_hi) + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'y_hi_x_hi' must have single use. +define void @full_mul_int128__mul_use__y_hi_x_hi(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__y_hi_x_hi( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[Y_HI_X_HI]]) +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[UPPER_MID:%.*]] = add nuw i64 [[Y_HI_X_HI]], [[CARRY]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[UPPER_MID_WITH_CROSS:%.*]] = add i64 [[UPPER_MID]], [[CROSS_SUM_HI]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[UPPER_MID_WITH_CROSS]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[HW64]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LOW_ACCUM_SHIFTED:%.*]] = shl i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[Y_LO_X_LO_LO:%.*]] = and i64 [[Y_LO_X_LO]], 4294967295 +; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[LOW_ACCUM_SHIFTED]], [[Y_LO_X_LO_LO]] +; CHECK-NEXT: store i64 [[TMP4]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + call void (...) @llvm.fake.use(i64 %y_hi_x_hi) + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'y_hi_x_lo' must have single use. +define void @full_mul_int128__mul_use__y_hi_x_lo(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__y_hi_x_lo( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[Y_HI_X_LO]]) +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[UPPER_MID:%.*]] = add nuw i64 [[Y_HI_X_HI]], [[CARRY]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[UPPER_MID_WITH_CROSS:%.*]] = add i64 [[UPPER_MID]], [[CROSS_SUM_HI]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[UPPER_MID_WITH_CROSS]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[HW64]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LOW_ACCUM_SHIFTED:%.*]] = shl i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[Y_LO_X_LO_LO:%.*]] = and i64 [[Y_LO_X_LO]], 4294967295 +; CHECK-NEXT: [[LW64:%.*]] = or disjoint i64 [[LOW_ACCUM_SHIFTED]], [[Y_LO_X_LO_LO]] +; CHECK-NEXT: store i64 [[LW64]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + call void (...) @llvm.fake.use(i64 %y_hi_x_lo) + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'y_lo_x_lo' we allow multiple uses on y_lo_x_lo. +; TODO does not simplify like it should? +define void @full_mul_int128__mul_use__y_lo_x_lo(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__y_lo_x_lo( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = mul i64 [[Y]], [[X_HI]] +; CHECK-NEXT: [[UPPER_MID_WITH_CROSS:%.*]] = mul i64 [[Y_HI]], [[X]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[Y_LO_X_LO]]) +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[UPPER_MID_WITH_CROSS]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[TMP5:%.*]] = trunc nuw i128 [[TMP4]] to i64 +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[TMP5]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LOW_ACCUM1:%.*]] = shl i64 [[TMP6]], 32 +; CHECK-NEXT: [[LW64:%.*]] = add i64 [[Y_LO_X_LO]], [[LOW_ACCUM1]] +; CHECK-NEXT: store i64 [[LW64]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + call void (...) @llvm.fake.use(i64 %y_lo_x_lo) + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'cross_sum' must have no more than 3 uses. +define void @full_mul_int128__mul_use__cross_sum(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__cross_sum( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[CROSS_SUM]]) +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[UPPER_MID:%.*]] = add nuw i64 [[Y_HI_X_HI]], [[CARRY]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[UPPER_MID_WITH_CROSS:%.*]] = add i64 [[UPPER_MID]], [[CROSS_SUM_HI]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[UPPER_MID_WITH_CROSS]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[HW64]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LOW_ACCUM_SHIFTED:%.*]] = shl i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[Y_LO_X_LO_LO:%.*]] = and i64 [[Y_LO_X_LO]], 4294967295 +; CHECK-NEXT: [[LW64:%.*]] = or disjoint i64 [[LOW_ACCUM_SHIFTED]], [[Y_LO_X_LO_LO]] +; CHECK-NEXT: store i64 [[LW64]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + call void (...) @llvm.fake.use(i64 %cross_sum) + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'carry_out' must have single use. +define void @full_mul_int128__mul_use__carry_out(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__carry_out( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i1 [[CARRY_OUT]]) +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[UPPER_MID:%.*]] = add nuw i64 [[Y_HI_X_HI]], [[CARRY]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[UPPER_MID_WITH_CROSS:%.*]] = add i64 [[UPPER_MID]], [[CROSS_SUM_HI]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[UPPER_MID_WITH_CROSS]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[HW64]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LOW_ACCUM_SHIFTED:%.*]] = shl i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[Y_LO_X_LO_LO:%.*]] = and i64 [[Y_LO_X_LO]], 4294967295 +; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[LOW_ACCUM_SHIFTED]], [[Y_LO_X_LO_LO]] +; CHECK-NEXT: store i64 [[TMP4]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + call void (...) @llvm.fake.use(i1 %carry_out) + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'carry' must have single use. +define void @full_mul_int128__mul_use__carry(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__carry( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[CARRY]]) +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[UPPER_MID:%.*]] = add nuw i64 [[Y_HI_X_HI]], [[CARRY]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[UPPER_MID_WITH_CROSS:%.*]] = add i64 [[UPPER_MID]], [[CROSS_SUM_HI]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[UPPER_MID_WITH_CROSS]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[HW64]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LOW_ACCUM_SHIFTED:%.*]] = shl i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[Y_LO_X_LO_LO:%.*]] = and i64 [[Y_LO_X_LO]], 4294967295 +; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[LOW_ACCUM_SHIFTED]], [[Y_LO_X_LO_LO]] +; CHECK-NEXT: store i64 [[TMP4]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + call void (...) @llvm.fake.use(i64 %carry) + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'y_lo_x_lo_hi' must have single use. +define void @full_mul_int128__mul_use__y_lo_x_lo_hi(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__y_lo_x_lo_hi( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[Y_LO_X_LO_HI]]) +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[UPPER_MID:%.*]] = add nuw i64 [[Y_HI_X_HI]], [[CARRY]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[UPPER_MID_WITH_CROSS:%.*]] = add i64 [[UPPER_MID]], [[CROSS_SUM_HI]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[UPPER_MID_WITH_CROSS]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[HW64]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LOW_ACCUM_SHIFTED:%.*]] = shl i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[Y_LO_X_LO_LO:%.*]] = and i64 [[Y_LO_X_LO]], 4294967295 +; CHECK-NEXT: [[LW64:%.*]] = or disjoint i64 [[LOW_ACCUM_SHIFTED]], [[Y_LO_X_LO_LO]] +; CHECK-NEXT: store i64 [[LW64]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + call void (...) @llvm.fake.use(i64 %y_lo_x_lo_hi) + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'cross_sum_lo' must have single use. +define void @full_mul_int128__mul_use__cross_sum_lo(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__cross_sum_lo( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[CROSS_SUM_LO]]) +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[UPPER_MID:%.*]] = add nuw i64 [[Y_HI_X_HI]], [[CARRY]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[UPPER_MID_WITH_CROSS:%.*]] = add i64 [[UPPER_MID]], [[CROSS_SUM_HI]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[UPPER_MID_WITH_CROSS]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[HW64]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LOW_ACCUM_SHIFTED:%.*]] = shl i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[Y_LO_X_LO_LO:%.*]] = and i64 [[Y_LO_X_LO]], 4294967295 +; CHECK-NEXT: [[LW64:%.*]] = or disjoint i64 [[LOW_ACCUM_SHIFTED]], [[Y_LO_X_LO_LO]] +; CHECK-NEXT: store i64 [[LW64]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + call void (...) @llvm.fake.use(i64 %cross_sum_lo) + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'cross_sum_hi' must have single use. +define void @full_mul_int128__mul_use__cross_sum_hi(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__cross_sum_hi( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[CROSS_SUM_HI]]) +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[UPPER_MID:%.*]] = add nuw i64 [[Y_HI_X_HI]], [[CARRY]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[UPPER_MID_WITH_CROSS:%.*]] = add i64 [[UPPER_MID]], [[CROSS_SUM_HI]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[UPPER_MID_WITH_CROSS]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[HW64]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LOW_ACCUM_SHIFTED:%.*]] = shl i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[Y_LO_X_LO_LO:%.*]] = and i64 [[Y_LO_X_LO]], 4294967295 +; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[LOW_ACCUM_SHIFTED]], [[Y_LO_X_LO_LO]] +; CHECK-NEXT: store i64 [[TMP4]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + call void (...) @llvm.fake.use(i64 %cross_sum_hi) + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'low_accum' must have exactly 2 uses if doing high multiply. +define void @full_mul_int128__mul_use__low_accum(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__low_accum( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[LOW_ACCUM]]) +; CHECK-NEXT: [[UPPER_MID:%.*]] = add nuw i64 [[Y_HI_X_HI]], [[CARRY]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[UPPER_MID_WITH_CROSS:%.*]] = add i64 [[UPPER_MID]], [[CROSS_SUM_HI]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[UPPER_MID_WITH_CROSS]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[HW64]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LOW_ACCUM_SHIFTED:%.*]] = shl i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[Y_LO_X_LO_LO:%.*]] = and i64 [[Y_LO_X_LO]], 4294967295 +; CHECK-NEXT: [[LW64:%.*]] = or disjoint i64 [[LOW_ACCUM_SHIFTED]], [[Y_LO_X_LO_LO]] +; CHECK-NEXT: store i64 [[LW64]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + call void (...) @llvm.fake.use(i64 %low_accum) + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'upper_mid' must have single use. +define void @full_mul_int128__mul_use__upper_mid(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__upper_mid( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[UPPER_MID:%.*]] = add nuw i64 [[Y_HI_X_HI]], [[CARRY]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[UPPER_MID]]) +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[UPPER_MID_WITH_CROSS:%.*]] = add i64 [[UPPER_MID]], [[CROSS_SUM_HI]] +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[UPPER_MID_WITH_CROSS]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[TMP5]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LOW_ACCUM_SHIFTED:%.*]] = shl i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[Y_LO_X_LO_LO:%.*]] = and i64 [[Y_LO_X_LO]], 4294967295 +; CHECK-NEXT: [[TMP9:%.*]] = or disjoint i64 [[LOW_ACCUM_SHIFTED]], [[Y_LO_X_LO_LO]] +; CHECK-NEXT: store i64 [[TMP9]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + call void (...) @llvm.fake.use(i64 %upper_mid) + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'low_accum_hi' must have single use. +define void @full_mul_int128__mul_use__low_accum_hi(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__low_accum_hi( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[UPPER_MID:%.*]] = add nuw i64 [[Y_HI_X_HI]], [[CARRY]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[LOW_ACCUM_HI]]) +; CHECK-NEXT: [[UPPER_MID_WITH_CROSS:%.*]] = add i64 [[UPPER_MID]], [[CROSS_SUM_HI]] +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[UPPER_MID_WITH_CROSS]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[HW64]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LOW_ACCUM_SHIFTED:%.*]] = shl i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[Y_LO_X_LO_LO:%.*]] = and i64 [[Y_LO_X_LO]], 4294967295 +; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[LOW_ACCUM_SHIFTED]], [[Y_LO_X_LO_LO]] +; CHECK-NEXT: store i64 [[TMP4]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + call void (...) @llvm.fake.use(i64 %low_accum_hi) + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'upper_mid_with_cross' must have single use. +define void @full_mul_int128__mul_use__upper_mid_with_cross(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__upper_mid_with_cross( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[Y_LO_X_HI:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_HI:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[Y_HI_X_LO:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[Y_LO_X_LO:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[CROSS_SUM:%.*]] = add i64 [[Y_HI_X_LO]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY_OUT:%.*]] = icmp ult i64 [[CROSS_SUM]], [[Y_LO_X_HI]] +; CHECK-NEXT: [[CARRY:%.*]] = select i1 [[CARRY_OUT]], i64 4294967296, i64 0 +; CHECK-NEXT: [[Y_LO_X_LO_HI:%.*]] = lshr i64 [[Y_LO_X_LO]], 32 +; CHECK-NEXT: [[CROSS_SUM_LO:%.*]] = and i64 [[CROSS_SUM]], 4294967295 +; CHECK-NEXT: [[CROSS_SUM_HI:%.*]] = lshr i64 [[CROSS_SUM]], 32 +; CHECK-NEXT: [[LOW_ACCUM:%.*]] = add nuw nsw i64 [[CROSS_SUM_LO]], [[Y_LO_X_LO_HI]] +; CHECK-NEXT: [[UPPER_MID:%.*]] = add nuw i64 [[Y_HI_X_HI]], [[CARRY]] +; CHECK-NEXT: [[LOW_ACCUM_HI:%.*]] = lshr i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[UPPER_MID_WITH_CROSS:%.*]] = add i64 [[UPPER_MID]], [[CROSS_SUM_HI]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[LOW_ACCUM_HI]]) +; CHECK-NEXT: [[HW64:%.*]] = add i64 [[UPPER_MID_WITH_CROSS]], [[LOW_ACCUM_HI]] +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[HW64]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LOW_ACCUM_SHIFTED:%.*]] = shl i64 [[LOW_ACCUM]], 32 +; CHECK-NEXT: [[Y_LO_X_LO_LO:%.*]] = and i64 [[Y_LO_X_LO]], 4294967295 +; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[LOW_ACCUM_SHIFTED]], [[Y_LO_X_LO_LO]] +; CHECK-NEXT: store i64 [[TMP4]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + call void (...) @llvm.fake.use(i64 %low_accum_hi) + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + +; 'low_accum_shifted' can have multiple uses. +define void @full_mul_int128__mul_use__low_accum_shifted(i64 %x, i64 %y, ptr %p) { +; CHECK-LABEL: define void @full_mul_int128__mul_use__low_accum_shifted( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[TMP5:%.*]] = trunc nuw i128 [[TMP4]] to i64 +; CHECK-NEXT: [[HI_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8 +; CHECK-NEXT: store i64 [[TMP5]], ptr [[HI_PTR]], align 8 +; CHECK-NEXT: [[LW64:%.*]] = mul i64 [[X]], [[Y]] +; CHECK-NEXT: [[LOW_ACCUM_SHIFTED:%.*]] = and i64 [[LW64]], -4294967296 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[LOW_ACCUM_SHIFTED]]) +; CHECK-NEXT: store i64 [[LW64]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %y_lo_x_hi = mul nuw i64 %y_lo, %x_hi + %y_hi_x_hi = mul nuw i64 %y_hi, %x_hi + %y_hi_x_lo = mul nuw i64 %y_hi, %x_lo + %y_lo_x_lo = mul nuw i64 %y_lo, %x_lo + + %cross_sum = add i64 %y_hi_x_lo, %y_lo_x_hi + + %carry_out = icmp ult i64 %cross_sum, %y_lo_x_hi + %carry = select i1 %carry_out, i64 4294967296, i64 0 + + %y_lo_x_lo_hi = lshr i64 %y_lo_x_lo, 32 + + %cross_sum_lo = and i64 %cross_sum, 4294967295 + %cross_sum_hi = lshr i64 %cross_sum, 32 + + %low_accum = add nuw nsw i64 %cross_sum_lo, %y_lo_x_lo_hi + + %upper_mid = add nuw i64 %y_hi_x_hi, %carry + %low_accum_hi = lshr i64 %low_accum, 32 + %upper_mid_with_cross = add i64 %upper_mid, %cross_sum_hi + %hw64 = add i64 %upper_mid_with_cross, %low_accum_hi + + %hi_ptr = getelementptr inbounds i8, ptr %p, i64 8 + store i64 %hw64, ptr %hi_ptr, align 8 + + %low_accum_shifted = shl i64 %low_accum, 32 + call void (...) @llvm.fake.use(i64 %low_accum_shifted) + %y_lo_x_lo_lo = and i64 %y_lo_x_lo, 4294967295 + %lw64 = or disjoint i64 %low_accum_shifted, %y_lo_x_lo_lo + + store i64 %lw64, ptr %p, align 8 + + ret void +} + diff --git a/llvm/test/Transforms/AggressiveInstCombine/umulh_ladder.ll b/llvm/test/Transforms/AggressiveInstCombine/umulh_ladder.ll new file mode 100644 index 0000000000000..257cc0315c72f --- /dev/null +++ b/llvm/test/Transforms/AggressiveInstCombine/umulh_ladder.ll @@ -0,0 +1,858 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=aggressive-instcombine,instcombine -S | FileCheck %s + +; https://alive2.llvm.org/ce/z/MSo5S_ +define i64 @umulh_variant(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[TMP5:%.*]] = trunc nuw i128 [[TMP4]] to i64 +; CHECK-NEXT: ret i64 [[TMP5]] +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %t0 = mul nuw i64 %y_lo, %x_lo + %t1 = mul nuw i64 %y_lo, %x_hi + %t2 = mul nuw i64 %y_hi, %x_lo + %t3 = mul nuw i64 %y_hi, %x_hi + + %t0_hi = lshr i64 %t0, 32 + + %u0 = add nuw i64 %t0_hi, %t1 + %u0_lo = and i64 %u0, 4294967295 + %u0_hi = lshr i64 %u0, 32 + %u1 = add nuw i64 %u0_lo, %t2 + %u1_hi = lshr i64 %u1, 32 + %u2 = add nuw i64 %u0_hi, %t3 + %hw64 = add nuw i64 %u2, %u1_hi + ret i64 %hw64 +} + +define i32 @umulh_variant_i32(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @umulh_variant_i32( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[Y]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[X]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i64 [[TMP3]], 32 +; CHECK-NEXT: [[HW64:%.*]] = trunc nuw i64 [[TMP4]] to i32 +; CHECK-NEXT: ret i32 [[HW64]] +; + %x_lo = and i32 %x, u0xffff + %y_lo = and i32 %y, u0xffff + %x_hi = lshr i32 %x, 16 + %y_hi = lshr i32 %y, 16 + + %t0 = mul nuw i32 %y_lo, %x_lo + %t1 = mul nuw i32 %y_lo, %x_hi + %t2 = mul nuw i32 %y_hi, %x_lo + %t3 = mul nuw i32 %y_hi, %x_hi + + %t0_hi = lshr i32 %t0, 16 + + %u0 = add nuw i32 %t0_hi, %t1 + %u0_lo = and i32 %u0, u0xffff + %u0_hi = lshr i32 %u0, 16 + %u1 = add nuw i32 %u0_lo, %t2 + %u1_hi = lshr i32 %u1, 16 + %u2 = add nuw i32 %u0_hi, %t3 + %hw64 = add nuw i32 %u2, %u1_hi + ret i32 %hw64 +} + +define <2 x i32> @umulh_variant_v2i32(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: define <2 x i32> @umulh_variant_v2i32( +; CHECK-SAME: <2 x i32> [[X:%.*]], <2 x i32> [[Y:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i32> [[Y]] to <2 x i64> +; CHECK-NEXT: [[TMP2:%.*]] = zext <2 x i32> [[X]] to <2 x i64> +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw <2 x i64> [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr <2 x i64> [[TMP3]], splat (i64 32) +; CHECK-NEXT: [[HW64:%.*]] = trunc nuw <2 x i64> [[TMP4]] to <2 x i32> +; CHECK-NEXT: ret <2 x i32> [[HW64]] +; + %x_lo = and <2 x i32> %x, + %y_lo = and <2 x i32> %y, + %x_hi = lshr <2 x i32> %x, + %y_hi = lshr <2 x i32> %y, + + %t0 = mul nuw <2 x i32> %y_lo, %x_lo + %t1 = mul nuw <2 x i32> %y_lo, %x_hi + %t2 = mul nuw <2 x i32> %y_hi, %x_lo + %t3 = mul nuw <2 x i32> %y_hi, %x_hi + + %t0_hi = lshr <2 x i32> %t0, + + %u0 = add nuw <2 x i32> %t0_hi, %t1 + %u0_lo = and <2 x i32> %u0, + %u0_hi = lshr <2 x i32> %u0, + %u1 = add nuw <2 x i32> %u0_lo, %t2 + %u1_hi = lshr <2 x i32> %u1, + %u2 = add nuw <2 x i32> %u0_hi, %t3 + %hw64 = add nuw <2 x i32> %u2, %u1_hi + ret <2 x i32> %hw64 +} + +define i128 @umulh_variant_i128(i128 %x, i128 %y) { +; CHECK-LABEL: define i128 @umulh_variant_i128( +; CHECK-SAME: i128 [[X:%.*]], i128 [[Y:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = zext i128 [[Y]] to i256 +; CHECK-NEXT: [[TMP2:%.*]] = zext i128 [[X]] to i256 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i256 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i256 [[TMP3]], 128 +; CHECK-NEXT: [[HW64:%.*]] = trunc nuw i256 [[TMP4]] to i128 +; CHECK-NEXT: ret i128 [[HW64]] +; + %x_lo = and i128 %x, u0xffffffffffffffff + %y_lo = and i128 %y, u0xffffffffffffffff + %x_hi = lshr i128 %x, 64 + %y_hi = lshr i128 %y, 64 + + %t0 = mul nuw i128 %y_lo, %x_lo + %t1 = mul nuw i128 %y_lo, %x_hi + %t2 = mul nuw i128 %y_hi, %x_lo + %t3 = mul nuw i128 %y_hi, %x_hi + + %t0_hi = lshr i128 %t0, 64 + + %u0 = add nuw i128 %t0_hi, %t1 + %u0_lo = and i128 %u0, u0xffffffffffffffff + %u0_hi = lshr i128 %u0, 64 + %u1 = add nuw i128 %u0_lo, %t2 + %u1_hi = lshr i128 %u1, 64 + %u2 = add nuw i128 %u0_hi, %t3 + %hw64 = add nuw i128 %u2, %u1_hi + ret i128 %hw64 +} + +define i64 @umulh_variant_commuted(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant_commuted( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[HW64:%.*]] = trunc nuw i128 [[TMP4]] to i64 +; CHECK-NEXT: ret i64 [[HW64]] +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %t0 = mul nuw i64 %x_lo, %y_lo + %t1 = mul nuw i64 %x_lo, %y_hi + %t2 = mul nuw i64 %x_hi, %y_lo + %t3 = mul nuw i64 %x_hi, %y_hi + + %t0_hi = lshr i64 %t0, 32 + + %u0 = add nuw i64 %t1, %t0_hi + %u0_lo = and i64 %u0, 4294967295 + %u0_hi = lshr i64 %u0, 32 + %u1 = add nuw i64 %t2, %u0_lo + %u1_hi = lshr i64 %u1, 32 + %u2 = add nuw i64 %u1_hi, %u0_hi + %hw64 = add nuw i64 %t3, %u2 + ret i64 %hw64 +} + + + +; Negative tests + +define i64 @umulh_variant_notlox(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant_notlox( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967294 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[T0:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[T1:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[T2:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[T3:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[T0_HI:%.*]] = lshr i64 [[T0]], 32 +; CHECK-NEXT: [[U0:%.*]] = add nuw i64 [[T0_HI]], [[T1]] +; CHECK-NEXT: [[U0_LO:%.*]] = and i64 [[U0]], 4294967294 +; CHECK-NEXT: [[U0_HI:%.*]] = lshr i64 [[U0]], 32 +; CHECK-NEXT: [[U1:%.*]] = add nuw i64 [[U0_LO]], [[T2]] +; CHECK-NEXT: [[U1_HI:%.*]] = lshr i64 [[U1]], 32 +; CHECK-NEXT: [[U2:%.*]] = add nuw i64 [[U0_HI]], [[T3]] +; CHECK-NEXT: [[HW64:%.*]] = add nuw i64 [[U2]], [[U1_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + %x_lo = and i64 %x, 4294967294 ; wrong imm + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %t0 = mul nuw i64 %y_lo, %x_lo + %t1 = mul nuw i64 %y_lo, %x_hi + %t2 = mul nuw i64 %y_hi, %x_lo + %t3 = mul nuw i64 %y_hi, %x_hi + + %t0_hi = lshr i64 %t0, 32 + + %u0 = add nuw i64 %t0_hi, %t1 + %u0_lo = and i64 %u0, 4294967295 + %u0_hi = lshr i64 %u0, 32 + %u1 = add nuw i64 %u0_lo, %t2 + %u1_hi = lshr i64 %u1, 32 + %u2 = add nuw i64 %u0_hi, %t3 + %hw64 = add nuw i64 %u2, %u1_hi + ret i64 %hw64 +} + +define i64 @umulh_variant_nothiy(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant_nothiy( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 16 +; CHECK-NEXT: [[T0:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[T1:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[T2:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[T3:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[T0_HI:%.*]] = lshr i64 [[T0]], 32 +; CHECK-NEXT: [[U0:%.*]] = add nuw i64 [[T0_HI]], [[T1]] +; CHECK-NEXT: [[U0_LO:%.*]] = and i64 [[U0]], 4294967295 +; CHECK-NEXT: [[U0_HI:%.*]] = lshr i64 [[U0]], 32 +; CHECK-NEXT: [[U1:%.*]] = add nuw i64 [[U0_LO]], [[T2]] +; CHECK-NEXT: [[U1_HI:%.*]] = lshr i64 [[U1]], 32 +; CHECK-NEXT: [[U2:%.*]] = add nuw i64 [[U0_HI]], [[T3]] +; CHECK-NEXT: [[HW64:%.*]] = add nuw i64 [[U2]], [[U1_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 16 ; wrong imm + + %t0 = mul nuw i64 %y_lo, %x_lo + %t1 = mul nuw i64 %y_lo, %x_hi + %t2 = mul nuw i64 %y_hi, %x_lo + %t3 = mul nuw i64 %y_hi, %x_hi + + %t0_hi = lshr i64 %t0, 32 + + %u0 = add nuw i64 %t0_hi, %t1 + %u0_lo = and i64 %u0, 4294967295 + %u0_hi = lshr i64 %u0, 32 + %u1 = add nuw i64 %u0_lo, %t2 + %u1_hi = lshr i64 %u1, 32 + %u2 = add nuw i64 %u0_hi, %t3 + %hw64 = add nuw i64 %u2, %u1_hi + ret i64 %hw64 +} + +define i64 @umulh_variant_notlowacc(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant_notlowacc( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[T0:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[T1:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[T2:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[T3:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[T0_HI:%.*]] = lshr i64 [[T0]], 32 +; CHECK-NEXT: [[U0:%.*]] = add nuw i64 [[T0_HI]], [[T1]] +; CHECK-NEXT: [[U0_LO:%.*]] = and i64 [[U0]], 4294967294 +; CHECK-NEXT: [[U0_HI:%.*]] = lshr i64 [[U0]], 32 +; CHECK-NEXT: [[U1:%.*]] = add nuw i64 [[U0_LO]], [[T2]] +; CHECK-NEXT: [[U1_HI:%.*]] = lshr i64 [[U1]], 32 +; CHECK-NEXT: [[U2:%.*]] = add nuw i64 [[U0_HI]], [[T3]] +; CHECK-NEXT: [[HW64:%.*]] = add nuw i64 [[U2]], [[U1_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %t0 = mul nuw i64 %y_lo, %x_lo + %t1 = mul nuw i64 %y_lo, %x_hi + %t2 = mul nuw i64 %y_hi, %x_lo + %t3 = mul nuw i64 %y_hi, %x_hi + + %t0_hi = lshr i64 %t0, 32 + + %u0 = add nuw i64 %t0_hi, %t1 + %u0_lo = and i64 %u0, 4294967294 ; wrong imm + %u0_hi = lshr i64 %u0, 32 + %u1 = add nuw i64 %u0_lo, %t2 + %u1_hi = lshr i64 %u1, 32 + %u2 = add nuw i64 %u0_hi, %t3 + %hw64 = add nuw i64 %u2, %u1_hi + ret i64 %hw64 +} + +define i64 @umulh_variant_notll(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant_notll( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[T3:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[T1:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[T2:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[T0:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[T0_HI:%.*]] = lshr i64 [[T0]], 32 +; CHECK-NEXT: [[U0:%.*]] = add nuw i64 [[T0_HI]], [[T1]] +; CHECK-NEXT: [[U0_LO:%.*]] = and i64 [[U0]], 4294967295 +; CHECK-NEXT: [[U0_HI:%.*]] = lshr i64 [[U0]], 32 +; CHECK-NEXT: [[U1:%.*]] = add nuw i64 [[U0_LO]], [[T2]] +; CHECK-NEXT: [[U1_HI:%.*]] = lshr i64 [[U1]], 32 +; CHECK-NEXT: [[U2:%.*]] = add nuw i64 [[U0_HI]], [[T3]] +; CHECK-NEXT: [[HW64:%.*]] = add nuw i64 [[U2]], [[U1_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %t3 = mul nuw i64 %y_lo, %x_lo ; swapped lolo and hihi + %t1 = mul nuw i64 %y_lo, %x_hi + %t2 = mul nuw i64 %y_hi, %x_lo + %t0 = mul nuw i64 %y_hi, %x_hi + + %t0_hi = lshr i64 %t0, 32 + + %u0 = add nuw i64 %t0_hi, %t1 + %u0_lo = and i64 %u0, 4294967295 + %u0_hi = lshr i64 %u0, 32 + %u1 = add nuw i64 %u0_lo, %t2 + %u1_hi = lshr i64 %u1, 32 + %u2 = add nuw i64 %u0_hi, %t3 + %hw64 = add nuw i64 %u2, %u1_hi + ret i64 %hw64 +} + + + +; Use checks + +; 't0' can have more than one use. +define i64 @umulh_variant__mul_use__t0(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant__mul_use__t0( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[T0:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[T0]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[HW64:%.*]] = trunc nuw i128 [[TMP4]] to i64 +; CHECK-NEXT: ret i64 [[HW64]] +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %t0 = mul nuw i64 %y_lo, %x_lo + call void (...) @llvm.fake.use(i64 %t0) + %t1 = mul nuw i64 %y_lo, %x_hi + %t2 = mul nuw i64 %y_hi, %x_lo + %t3 = mul nuw i64 %y_hi, %x_hi + + %t0_hi = lshr i64 %t0, 32 + + %u0 = add nuw i64 %t0_hi, %t1 + %u0_lo = and i64 %u0, 4294967295 + %u0_hi = lshr i64 %u0, 32 + %u1 = add nuw i64 %u0_lo, %t2 + %u1_hi = lshr i64 %u1, 32 + %u2 = add nuw i64 %u0_hi, %t3 + %hw64 = add nuw i64 %u2, %u1_hi + ret i64 %hw64 +} + +; 't1' can have more than one use. +define i64 @umulh_variant__mul_use__t1(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant__mul_use__t1( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[T1:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[T1]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[HW64:%.*]] = trunc nuw i128 [[TMP4]] to i64 +; CHECK-NEXT: ret i64 [[HW64]] +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %t0 = mul nuw i64 %y_lo, %x_lo + %t1 = mul nuw i64 %y_lo, %x_hi + call void (...) @llvm.fake.use(i64 %t1) + %t2 = mul nuw i64 %y_hi, %x_lo + %t3 = mul nuw i64 %y_hi, %x_hi + + %t0_hi = lshr i64 %t0, 32 + + %u0 = add nuw i64 %t0_hi, %t1 + %u0_lo = and i64 %u0, 4294967295 + %u0_hi = lshr i64 %u0, 32 + %u1 = add nuw i64 %u0_lo, %t2 + %u1_hi = lshr i64 %u1, 32 + %u2 = add nuw i64 %u0_hi, %t3 + %hw64 = add nuw i64 %u2, %u1_hi + ret i64 %hw64 +} + +; 't2' can have more than one use. +define i64 @umulh_variant__mul_use__t2(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant__mul_use__t2( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[T2:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[T2]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[HW64:%.*]] = trunc nuw i128 [[TMP4]] to i64 +; CHECK-NEXT: ret i64 [[HW64]] +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %t0 = mul nuw i64 %y_lo, %x_lo + %t1 = mul nuw i64 %y_lo, %x_hi + %t2 = mul nuw i64 %y_hi, %x_lo + call void (...) @llvm.fake.use(i64 %t2) + %t3 = mul nuw i64 %y_hi, %x_hi + + %t0_hi = lshr i64 %t0, 32 + + %u0 = add nuw i64 %t0_hi, %t1 + %u0_lo = and i64 %u0, 4294967295 + %u0_hi = lshr i64 %u0, 32 + %u1 = add nuw i64 %u0_lo, %t2 + %u1_hi = lshr i64 %u1, 32 + %u2 = add nuw i64 %u0_hi, %t3 + %hw64 = add nuw i64 %u2, %u1_hi + ret i64 %hw64 +} + +; 't3' must have single use. +define i64 @umulh_variant__mul_use__t3(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant__mul_use__t3( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[T0:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[T1:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[T2:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[T3:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[T3]]) +; CHECK-NEXT: [[T0_HI:%.*]] = lshr i64 [[T0]], 32 +; CHECK-NEXT: [[U0:%.*]] = add nuw i64 [[T0_HI]], [[T1]] +; CHECK-NEXT: [[U0_LO:%.*]] = and i64 [[U0]], 4294967295 +; CHECK-NEXT: [[U0_HI:%.*]] = lshr i64 [[U0]], 32 +; CHECK-NEXT: [[U1:%.*]] = add nuw i64 [[U0_LO]], [[T2]] +; CHECK-NEXT: [[U1_HI:%.*]] = lshr i64 [[U1]], 32 +; CHECK-NEXT: [[U2:%.*]] = add nuw i64 [[U0_HI]], [[T3]] +; CHECK-NEXT: [[HW64:%.*]] = add nuw i64 [[U2]], [[U1_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %t0 = mul nuw i64 %y_lo, %x_lo + %t1 = mul nuw i64 %y_lo, %x_hi + %t2 = mul nuw i64 %y_hi, %x_lo + %t3 = mul nuw i64 %y_hi, %x_hi + call void (...) @llvm.fake.use(i64 %t3) + + %t0_hi = lshr i64 %t0, 32 + + %u0 = add nuw i64 %t0_hi, %t1 + %u0_lo = and i64 %u0, 4294967295 + %u0_hi = lshr i64 %u0, 32 + %u1 = add nuw i64 %u0_lo, %t2 + %u1_hi = lshr i64 %u1, 32 + %u2 = add nuw i64 %u0_hi, %t3 + %hw64 = add nuw i64 %u2, %u1_hi + ret i64 %hw64 +} + +; 't0_hi' must have single use. +define i64 @umulh_variant__mul_use__t0_hi(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant__mul_use__t0_hi( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[T0:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[T0_HI:%.*]] = lshr i64 [[T0]], 32 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[T0_HI]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[Y]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = zext i64 [[X]] to i128 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i128 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = lshr i128 [[TMP3]], 64 +; CHECK-NEXT: [[HW64:%.*]] = trunc nuw i128 [[TMP4]] to i64 +; CHECK-NEXT: ret i64 [[HW64]] +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %t0 = mul nuw i64 %y_lo, %x_lo + %t1 = mul nuw i64 %y_lo, %x_hi + %t2 = mul nuw i64 %y_hi, %x_lo + %t3 = mul nuw i64 %y_hi, %x_hi + + %t0_hi = lshr i64 %t0, 32 + call void (...) @llvm.fake.use(i64 %t0_hi) + + %u0 = add nuw i64 %t0_hi, %t1 + %u0_lo = and i64 %u0, 4294967295 + %u0_hi = lshr i64 %u0, 32 + %u1 = add nuw i64 %u0_lo, %t2 + %u1_hi = lshr i64 %u1, 32 + %u2 = add nuw i64 %u0_hi, %t3 + %hw64 = add nuw i64 %u2, %u1_hi + ret i64 %hw64 +} + +; 'u0' must have single use. +define i64 @umulh_variant__mul_use__u0(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant__mul_use__u0( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[T0:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[T1:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[T2:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[T3:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[T0_HI:%.*]] = lshr i64 [[T0]], 32 +; CHECK-NEXT: [[U0:%.*]] = add nuw i64 [[T0_HI]], [[T1]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[U0]]) +; CHECK-NEXT: [[U0_LO:%.*]] = and i64 [[U0]], 4294967295 +; CHECK-NEXT: [[U0_HI:%.*]] = lshr i64 [[U0]], 32 +; CHECK-NEXT: [[U1:%.*]] = add nuw i64 [[U0_LO]], [[T2]] +; CHECK-NEXT: [[U1_HI:%.*]] = lshr i64 [[U1]], 32 +; CHECK-NEXT: [[U2:%.*]] = add nuw i64 [[U0_HI]], [[T3]] +; CHECK-NEXT: [[HW64:%.*]] = add nuw i64 [[U2]], [[U1_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %t0 = mul nuw i64 %y_lo, %x_lo + %t1 = mul nuw i64 %y_lo, %x_hi + %t2 = mul nuw i64 %y_hi, %x_lo + %t3 = mul nuw i64 %y_hi, %x_hi + + %t0_hi = lshr i64 %t0, 32 + + %u0 = add nuw i64 %t0_hi, %t1 + call void (...) @llvm.fake.use(i64 %u0) + %u0_lo = and i64 %u0, 4294967295 + %u0_hi = lshr i64 %u0, 32 + %u1 = add nuw i64 %u0_lo, %t2 + %u1_hi = lshr i64 %u1, 32 + %u2 = add nuw i64 %u0_hi, %t3 + %hw64 = add nuw i64 %u2, %u1_hi + ret i64 %hw64 +} + +; 'u0_lo' must have single use. +define i64 @umulh_variant__mul_use__u0_lo(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant__mul_use__u0_lo( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[T0:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[T1:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[T2:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[T3:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[T0_HI:%.*]] = lshr i64 [[T0]], 32 +; CHECK-NEXT: [[U0:%.*]] = add nuw i64 [[T0_HI]], [[T1]] +; CHECK-NEXT: [[U0_LO:%.*]] = and i64 [[U0]], 4294967295 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[U0_LO]]) +; CHECK-NEXT: [[U0_HI:%.*]] = lshr i64 [[U0]], 32 +; CHECK-NEXT: [[U1:%.*]] = add nuw i64 [[U0_LO]], [[T2]] +; CHECK-NEXT: [[U1_HI:%.*]] = lshr i64 [[U1]], 32 +; CHECK-NEXT: [[U2:%.*]] = add nuw i64 [[U0_HI]], [[T3]] +; CHECK-NEXT: [[HW64:%.*]] = add nuw i64 [[U2]], [[U1_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %t0 = mul nuw i64 %y_lo, %x_lo + %t1 = mul nuw i64 %y_lo, %x_hi + %t2 = mul nuw i64 %y_hi, %x_lo + %t3 = mul nuw i64 %y_hi, %x_hi + + %t0_hi = lshr i64 %t0, 32 + + %u0 = add nuw i64 %t0_hi, %t1 + %u0_lo = and i64 %u0, 4294967295 + call void (...) @llvm.fake.use(i64 %u0_lo) + %u0_hi = lshr i64 %u0, 32 + %u1 = add nuw i64 %u0_lo, %t2 + %u1_hi = lshr i64 %u1, 32 + %u2 = add nuw i64 %u0_hi, %t3 + %hw64 = add nuw i64 %u2, %u1_hi + ret i64 %hw64 +} + +; 'u0_hi' must have single use. +define i64 @umulh_variant__mul_use__u0_hi(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant__mul_use__u0_hi( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[T0:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[T1:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[T2:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[T3:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[T0_HI:%.*]] = lshr i64 [[T0]], 32 +; CHECK-NEXT: [[U0:%.*]] = add nuw i64 [[T0_HI]], [[T1]] +; CHECK-NEXT: [[U0_LO:%.*]] = and i64 [[U0]], 4294967295 +; CHECK-NEXT: [[U0_HI:%.*]] = lshr i64 [[U0]], 32 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[U0_HI]]) +; CHECK-NEXT: [[U1:%.*]] = add nuw i64 [[U0_LO]], [[T2]] +; CHECK-NEXT: [[U1_HI:%.*]] = lshr i64 [[U1]], 32 +; CHECK-NEXT: [[U2:%.*]] = add nuw i64 [[U0_HI]], [[T3]] +; CHECK-NEXT: [[HW64:%.*]] = add nuw i64 [[U2]], [[U1_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %t0 = mul nuw i64 %y_lo, %x_lo + %t1 = mul nuw i64 %y_lo, %x_hi + %t2 = mul nuw i64 %y_hi, %x_lo + %t3 = mul nuw i64 %y_hi, %x_hi + + %t0_hi = lshr i64 %t0, 32 + + %u0 = add nuw i64 %t0_hi, %t1 + %u0_lo = and i64 %u0, 4294967295 + %u0_hi = lshr i64 %u0, 32 + call void (...) @llvm.fake.use(i64 %u0_hi) + %u1 = add nuw i64 %u0_lo, %t2 + %u1_hi = lshr i64 %u1, 32 + %u2 = add nuw i64 %u0_hi, %t3 + %hw64 = add nuw i64 %u2, %u1_hi + ret i64 %hw64 +} + +; 'u1' must have single use. +define i64 @umulh_variant__mul_use__u1(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant__mul_use__u1( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[T0:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[T1:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[T2:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[T3:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[T0_HI:%.*]] = lshr i64 [[T0]], 32 +; CHECK-NEXT: [[U0:%.*]] = add nuw i64 [[T0_HI]], [[T1]] +; CHECK-NEXT: [[U0_LO:%.*]] = and i64 [[U0]], 4294967295 +; CHECK-NEXT: [[U0_HI:%.*]] = lshr i64 [[U0]], 32 +; CHECK-NEXT: [[U1:%.*]] = add nuw i64 [[U0_LO]], [[T2]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[U1]]) +; CHECK-NEXT: [[U1_HI:%.*]] = lshr i64 [[U1]], 32 +; CHECK-NEXT: [[U2:%.*]] = add nuw i64 [[U0_HI]], [[T3]] +; CHECK-NEXT: [[HW64:%.*]] = add nuw i64 [[U2]], [[U1_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %t0 = mul nuw i64 %y_lo, %x_lo + %t1 = mul nuw i64 %y_lo, %x_hi + %t2 = mul nuw i64 %y_hi, %x_lo + %t3 = mul nuw i64 %y_hi, %x_hi + + %t0_hi = lshr i64 %t0, 32 + + %u0 = add nuw i64 %t0_hi, %t1 + %u0_lo = and i64 %u0, 4294967295 + %u0_hi = lshr i64 %u0, 32 + %u1 = add nuw i64 %u0_lo, %t2 + call void (...) @llvm.fake.use(i64 %u1) + %u1_hi = lshr i64 %u1, 32 + %u2 = add nuw i64 %u0_hi, %t3 + %hw64 = add nuw i64 %u2, %u1_hi + ret i64 %hw64 +} + +; 'u1_hi' must have single use. +define i64 @umulh_variant__mul_use__u1_hi(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant__mul_use__u1_hi( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[T0:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[T1:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[T2:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[T3:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[T0_HI:%.*]] = lshr i64 [[T0]], 32 +; CHECK-NEXT: [[U0:%.*]] = add nuw i64 [[T0_HI]], [[T1]] +; CHECK-NEXT: [[U0_LO:%.*]] = and i64 [[U0]], 4294967295 +; CHECK-NEXT: [[U0_HI:%.*]] = lshr i64 [[U0]], 32 +; CHECK-NEXT: [[U1:%.*]] = add nuw i64 [[U0_LO]], [[T2]] +; CHECK-NEXT: [[U1_HI:%.*]] = lshr i64 [[U1]], 32 +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[U1_HI]]) +; CHECK-NEXT: [[U2:%.*]] = add nuw i64 [[U0_HI]], [[T3]] +; CHECK-NEXT: [[HW64:%.*]] = add nuw i64 [[U2]], [[U1_HI]] +; CHECK-NEXT: ret i64 [[HW64]] +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %t0 = mul nuw i64 %y_lo, %x_lo + %t1 = mul nuw i64 %y_lo, %x_hi + %t2 = mul nuw i64 %y_hi, %x_lo + %t3 = mul nuw i64 %y_hi, %x_hi + + %t0_hi = lshr i64 %t0, 32 + + %u0 = add nuw i64 %t0_hi, %t1 + %u0_lo = and i64 %u0, 4294967295 + %u0_hi = lshr i64 %u0, 32 + %u1 = add nuw i64 %u0_lo, %t2 + %u1_hi = lshr i64 %u1, 32 + call void (...) @llvm.fake.use(i64 %u1_hi) + %u2 = add nuw i64 %u0_hi, %t3 + %hw64 = add nuw i64 %u2, %u1_hi + ret i64 %hw64 +} + +; 'u2' must have single use. +define i64 @umulh_variant__mul_use__u2(i64 %x, i64 %y) { +; CHECK-LABEL: define i64 @umulh_variant__mul_use__u2( +; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) { +; CHECK-NEXT: [[X_LO:%.*]] = and i64 [[X]], 4294967295 +; CHECK-NEXT: [[Y_LO:%.*]] = and i64 [[Y]], 4294967295 +; CHECK-NEXT: [[X_HI:%.*]] = lshr i64 [[X]], 32 +; CHECK-NEXT: [[Y_HI:%.*]] = lshr i64 [[Y]], 32 +; CHECK-NEXT: [[U0:%.*]] = mul nuw i64 [[Y_LO]], [[X_LO]] +; CHECK-NEXT: [[T1:%.*]] = mul nuw i64 [[Y_LO]], [[X_HI]] +; CHECK-NEXT: [[T2:%.*]] = mul nuw i64 [[Y_HI]], [[X_LO]] +; CHECK-NEXT: [[T3:%.*]] = mul nuw i64 [[Y_HI]], [[X_HI]] +; CHECK-NEXT: [[U0_HI:%.*]] = lshr i64 [[U0]], 32 +; CHECK-NEXT: [[U1:%.*]] = add nuw i64 [[U0_HI]], [[T1]] +; CHECK-NEXT: [[U0_LO:%.*]] = and i64 [[U1]], 4294967295 +; CHECK-NEXT: [[U1_HI:%.*]] = lshr i64 [[U1]], 32 +; CHECK-NEXT: [[U3:%.*]] = add nuw i64 [[U0_LO]], [[T2]] +; CHECK-NEXT: [[U1_HI1:%.*]] = lshr i64 [[U3]], 32 +; CHECK-NEXT: [[U2:%.*]] = add nuw i64 [[U1_HI]], [[T3]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i64 [[U2]]) +; CHECK-NEXT: [[HW64:%.*]] = add nuw i64 [[U2]], [[U1_HI1]] +; CHECK-NEXT: ret i64 [[HW64]] +; + %x_lo = and i64 %x, 4294967295 + %y_lo = and i64 %y, 4294967295 + %x_hi = lshr i64 %x, 32 + %y_hi = lshr i64 %y, 32 + + %t0 = mul nuw i64 %y_lo, %x_lo + %t1 = mul nuw i64 %y_lo, %x_hi + %t2 = mul nuw i64 %y_hi, %x_lo + %t3 = mul nuw i64 %y_hi, %x_hi + + %t0_hi = lshr i64 %t0, 32 + + %u0 = add nuw i64 %t0_hi, %t1 + %u0_lo = and i64 %u0, 4294967295 + %u0_hi = lshr i64 %u0, 32 + %u1 = add nuw i64 %u0_lo, %t2 + %u1_hi = lshr i64 %u1, 32 + %u2 = add nuw i64 %u0_hi, %t3 + call void (...) @llvm.fake.use(i64 %u2) + %hw64 = add nuw i64 %u2, %u1_hi + ret i64 %hw64 +} + +define [2 x i64] @XXH_mult64to128(i64 noundef %lhs, i64 noundef %rhs) { +; CHECK-LABEL: define [2 x i64] @XXH_mult64to128( +; CHECK-SAME: i64 noundef [[LHS:%.*]], i64 noundef [[RHS:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = zext i64 [[RHS]] to i128 +; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[LHS]] to i128 +; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i128 [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = lshr i128 [[TMP2]], 64 +; CHECK-NEXT: [[ADD16:%.*]] = trunc nuw i128 [[TMP3]] to i64 +; CHECK-NEXT: [[SHR102:%.*]] = mul i64 [[LHS]], [[RHS]] +; CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [2 x i64] poison, i64 [[SHR102]], 0 +; CHECK-NEXT: [[DOTFCA_1_INSERT:%.*]] = insertvalue [2 x i64] [[DOTFCA_0_INSERT]], i64 [[ADD16]], 1 +; CHECK-NEXT: ret [2 x i64] [[DOTFCA_1_INSERT]] +; +entry: + %and = and i64 %lhs, 4294967295 + %and1 = and i64 %rhs, 4294967295 + %mul.i = mul nuw i64 %and1, %and + %shr = lshr i64 %lhs, 32 + %mul.i27 = mul nuw i64 %and1, %shr + %shr5 = lshr i64 %rhs, 32 + %mul.i28 = mul nuw i64 %shr5, %and + %mul.i29 = mul nuw i64 %shr5, %shr + %shr10 = lshr i64 %mul.i, 32 + %and11 = and i64 %mul.i27, 4294967295 + %add = add nuw i64 %and11, %mul.i28 + %add12 = add nuw i64 %add, %shr10 + %shr13 = lshr i64 %mul.i27, 32 + %shr14 = lshr i64 %add12, 32 + %add15 = add nuw i64 %shr13, %mul.i29 + %add16 = add nuw i64 %add15, %shr14 + %shl = shl i64 %add12, 32 + %and17 = and i64 %mul.i, 4294967295 + %or = or disjoint i64 %shl, %and17 + %.fca.0.insert = insertvalue [2 x i64] poison, i64 %or, 0 + %.fca.1.insert = insertvalue [2 x i64] %.fca.0.insert, i64 %add16, 1 + ret [2 x i64] %.fca.1.insert +} + diff --git a/llvm/test/Transforms/AggressiveInstCombine/umulh_ladder4.ll b/llvm/test/Transforms/AggressiveInstCombine/umulh_ladder4.ll new file mode 100644 index 0000000000000..307fc62a6b4ba --- /dev/null +++ b/llvm/test/Transforms/AggressiveInstCombine/umulh_ladder4.ll @@ -0,0 +1,530 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=aggressive-instcombine,instcombine -S | FileCheck %s + +; Ladder4 variant. https://alive2.llvm.org/ce/z/tExFRs +define i32 @mul_ladder4(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_ladder4( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[X]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[Y]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 32 +; CHECK-NEXT: [[ADD19:%.*]] = trunc nuw i64 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[ADD19]] +; +entry: + %xl = and i32 %x, 65535 + %xh = lshr i32 %x, 16 + %yl = and i32 %y, 65535 + %yh = lshr i32 %y, 16 + %mulll = mul nuw i32 %xl, %yl + %mullh = mul nuw i32 %xl, %yh + %mulhl = mul nuw i32 %xh, %yl + %mulhh = mul nuw i32 %xh, %yh + %shr8 = lshr i32 %mulll, 16 + %conv10 = and i32 %mullh, 65535 + %add = add nuw nsw i32 %shr8, %conv10 + %conv12 = and i32 %mulhl, 65535 + %add13 = add nuw nsw i32 %add, %conv12 + %shr14 = lshr i32 %add13, 16 + %shr15 = lshr i32 %mullh, 16 + %add16 = add nuw i32 %mulhh, %shr15 + %shr17 = lshr i32 %mulhl, 16 + %add18 = add nuw i32 %add16, %shr17 + %add19 = add nuw i32 %add18, %shr14 + ret i32 %add19 +} + +define <2 x i32> @mul_ladder4_v2i32(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: define <2 x i32> @mul_ladder4_v2i32( +; CHECK-SAME: <2 x i32> [[X:%.*]], <2 x i32> [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = zext <2 x i32> [[X]] to <2 x i64> +; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i32> [[Y]] to <2 x i64> +; CHECK-NEXT: [[TMP2:%.*]] = mul nuw <2 x i64> [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = lshr <2 x i64> [[TMP2]], splat (i64 32) +; CHECK-NEXT: [[ADD19:%.*]] = trunc nuw <2 x i64> [[TMP3]] to <2 x i32> +; CHECK-NEXT: ret <2 x i32> [[ADD19]] +; +entry: + %xl = and <2 x i32> %x, + %xh = lshr <2 x i32> %x, + %yl = and <2 x i32> %y, + %yh = lshr <2 x i32> %y, + %mulll = mul nuw <2 x i32> %xl, %yl + %mullh = mul nuw <2 x i32> %xl, %yh + %mulhl = mul nuw <2 x i32> %xh, %yl + %mulhh = mul nuw <2 x i32> %xh, %yh + %shr8 = lshr <2 x i32> %mulll, + %conv10 = and <2 x i32> %mullh, + %add = add nuw nsw <2 x i32> %shr8, %conv10 + %conv12 = and <2 x i32> %mulhl, + %add13 = add nuw nsw <2 x i32> %add, %conv12 + %shr14 = lshr <2 x i32> %add13, + %shr15 = lshr <2 x i32> %mullh, + %add16 = add nuw <2 x i32> %mulhh, %shr15 + %shr17 = lshr <2 x i32> %mulhl, + %add18 = add nuw <2 x i32> %add16, %shr17 + %add19 = add nuw <2 x i32> %add18, %shr14 + ret <2 x i32> %add19 +} + +define i128 @mul_ladder4_i128(i128 %x, i128 %y) { +; CHECK-LABEL: define i128 @mul_ladder4_i128( +; CHECK-SAME: i128 [[X:%.*]], i128 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = zext i128 [[X]] to i256 +; CHECK-NEXT: [[TMP1:%.*]] = zext i128 [[Y]] to i256 +; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i256 [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = lshr i256 [[TMP2]], 128 +; CHECK-NEXT: [[ADD19:%.*]] = trunc nuw i256 [[TMP3]] to i128 +; CHECK-NEXT: ret i128 [[ADD19]] +; +entry: + %xl = and i128 %x, u0xffffffffffffffff + %xh = lshr i128 %x, 64 + %yl = and i128 %y, u0xffffffffffffffff + %yh = lshr i128 %y, 64 + %mulll = mul nuw i128 %xl, %yl + %mullh = mul nuw i128 %xl, %yh + %mulhl = mul nuw i128 %xh, %yl + %mulhh = mul nuw i128 %xh, %yh + %shr8 = lshr i128 %mulll, 64 + %conv10 = and i128 %mullh, u0xffffffffffffffff + %add = add nuw nsw i128 %shr8, %conv10 + %conv12 = and i128 %mulhl, u0xffffffffffffffff + %add13 = add nuw nsw i128 %add, %conv12 + %shr14 = lshr i128 %add13, 64 + %shr15 = lshr i128 %mullh, 64 + %add16 = add nuw i128 %mulhh, %shr15 + %shr17 = lshr i128 %mulhl, 64 + %add18 = add nuw i128 %add16, %shr17 + %add19 = add nuw i128 %add18, %shr14 + ret i128 %add19 +} + +define i32 @mul_ladder4_commutted(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_ladder4_commutted( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[Y]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[X]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 32 +; CHECK-NEXT: [[ADD19:%.*]] = trunc nuw i64 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[ADD19]] +; +entry: + %xl = and i32 %x, 65535 + %xh = lshr i32 %x, 16 + %yl = and i32 %y, 65535 + %yh = lshr i32 %y, 16 + %mulll = mul nuw i32 %yl, %xl + %mullh = mul nuw i32 %yh, %xl + %mulhl = mul nuw i32 %yl, %xh + %mulhh = mul nuw i32 %yh, %xh + %shr8 = lshr i32 %mulll, 16 + %conv10 = and i32 %mullh, 65535 + %add = add nuw nsw i32 %conv10, %shr8 + %conv12 = and i32 %mulhl, 65535 + %add13 = add nuw nsw i32 %conv12, %add + %shr14 = lshr i32 %add13, 16 + %shr15 = lshr i32 %mullh, 16 + %shr17 = lshr i32 %mulhl, 16 + %add16 = add nuw i32 %shr14, %shr17 + %add18 = add nuw i32 %add16, %shr15 + %add19 = add nuw i32 %mulhh, %add18 + ret i32 %add19 +} + +define i32 @mul_ladder4_swap_hl_lh(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_ladder4_swap_hl_lh( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[X]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[Y]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 32 +; CHECK-NEXT: [[ADD19:%.*]] = trunc nuw i64 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[ADD19]] +; +entry: + %xl = and i32 %x, 65535 + %xh = lshr i32 %x, 16 + %yl = and i32 %y, 65535 + %yh = lshr i32 %y, 16 + %mulll = mul nuw i32 %xl, %yl + %mullh = mul nuw i32 %xl, %yh + %mulhl = mul nuw i32 %xh, %yl + %mulhh = mul nuw i32 %xh, %yh + %shr8 = lshr i32 %mulll, 16 + %conv10 = and i32 %mulhl, 65535 + %add = add nuw nsw i32 %shr8, %conv10 + %conv12 = and i32 %mullh, 65535 + %add13 = add nuw nsw i32 %add, %conv12 + %shr14 = lshr i32 %add13, 16 + %shr15 = lshr i32 %mulhl, 16 + %add16 = add nuw i32 %mulhh, %shr15 + %shr17 = lshr i32 %mullh, 16 + %add18 = add nuw i32 %add16, %shr17 + %add19 = add nuw i32 %add18, %shr14 + ret i32 %add19 +} + + +; Negative tests + +define i32 @mul_ladder4_notlhhl(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_ladder4_notlhhl( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[XL:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[XH:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[YL:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[YH:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[MULLL:%.*]] = mul nuw i32 [[XL]], [[YL]] +; CHECK-NEXT: [[MULHL:%.*]] = mul nuw i32 [[XH]], [[YL]] +; CHECK-NEXT: [[MULHH:%.*]] = mul nuw i32 [[XH]], [[YH]] +; CHECK-NEXT: [[SHR8:%.*]] = lshr i32 [[MULLL]], 16 +; CHECK-NEXT: [[CONV10:%.*]] = and i32 [[MULHL]], 65535 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[SHR8]], [[CONV10]] +; CHECK-NEXT: [[CONV12:%.*]] = and i32 [[MULHL]], 65535 +; CHECK-NEXT: [[ADD13:%.*]] = add nuw nsw i32 [[ADD]], [[CONV12]] +; CHECK-NEXT: [[SHR14:%.*]] = lshr i32 [[ADD13]], 16 +; CHECK-NEXT: [[SHR15:%.*]] = lshr i32 [[MULHL]], 16 +; CHECK-NEXT: [[ADD16:%.*]] = add nuw i32 [[MULHH]], [[SHR15]] +; CHECK-NEXT: [[SHR17:%.*]] = lshr i32 [[MULHL]], 16 +; CHECK-NEXT: [[ADD18:%.*]] = add nuw i32 [[ADD16]], [[SHR17]] +; CHECK-NEXT: [[ADD19:%.*]] = add nuw i32 [[ADD18]], [[SHR14]] +; CHECK-NEXT: ret i32 [[ADD19]] +; +entry: + %xl = and i32 %x, 65535 + %xh = lshr i32 %x, 16 + %yl = and i32 %y, 65535 + %yh = lshr i32 %y, 16 + %mulll = mul nuw i32 %xl, %yl + %mullh = mul nuw i32 %xl, %yh + %mulhl = mul nuw i32 %xh, %yl + %mulhh = mul nuw i32 %xh, %yh + %shr8 = lshr i32 %mulll, 16 + %conv10 = and i32 %mulhl, 65535 + %add = add nuw nsw i32 %shr8, %conv10 + %conv12 = and i32 %mulhl, 65535 + %add13 = add nuw nsw i32 %add, %conv12 + %shr14 = lshr i32 %add13, 16 + %shr15 = lshr i32 %mulhl, 16 + %add16 = add nuw i32 %mulhh, %shr15 + %shr17 = lshr i32 %mulhl, 16 + %add18 = add nuw i32 %add16, %shr17 + %add19 = add nuw i32 %add18, %shr14 + ret i32 %add19 +} + + + + + + +; Extra uses + +define i32 @mul_ladder4_use_add13(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_ladder4_use_add13( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[XL:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[XH:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[YL:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[YH:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[MULLL:%.*]] = mul nuw i32 [[YL]], [[XL]] +; CHECK-NEXT: [[MULLH:%.*]] = mul nuw i32 [[YH]], [[XL]] +; CHECK-NEXT: [[MULHL:%.*]] = mul nuw i32 [[YL]], [[XH]] +; CHECK-NEXT: [[MULHH:%.*]] = mul nuw i32 [[YH]], [[XH]] +; CHECK-NEXT: [[SHR8:%.*]] = lshr i32 [[MULLL]], 16 +; CHECK-NEXT: [[CONV10:%.*]] = and i32 [[MULLH]], 65535 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[CONV10]], [[SHR8]] +; CHECK-NEXT: [[CONV12:%.*]] = and i32 [[MULHL]], 65535 +; CHECK-NEXT: [[ADD13:%.*]] = add nuw nsw i32 [[CONV12]], [[ADD]] +; CHECK-NEXT: [[SHR14:%.*]] = lshr i32 [[ADD13]], 16 +; CHECK-NEXT: [[SHR15:%.*]] = lshr i32 [[MULLH]], 16 +; CHECK-NEXT: [[SHR17:%.*]] = lshr i32 [[MULHL]], 16 +; CHECK-NEXT: [[ADD16:%.*]] = add nuw nsw i32 [[SHR14]], [[SHR17]] +; CHECK-NEXT: [[ADD18:%.*]] = add nuw nsw i32 [[ADD16]], [[SHR15]] +; CHECK-NEXT: [[ADD19:%.*]] = add i32 [[MULHH]], [[ADD18]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i32 [[ADD13]]) +; CHECK-NEXT: ret i32 [[ADD19]] +; +entry: + %xl = and i32 %x, 65535 + %xh = lshr i32 %x, 16 + %yl = and i32 %y, 65535 + %yh = lshr i32 %y, 16 + %mulll = mul nuw i32 %yl, %xl + %mullh = mul nuw i32 %yh, %xl + %mulhl = mul nuw i32 %yl, %xh + %mulhh = mul nuw i32 %yh, %xh + %shr8 = lshr i32 %mulll, 16 + %conv10 = and i32 %mullh, 65535 + %add = add nuw nsw i32 %conv10, %shr8 + %conv12 = and i32 %mulhl, 65535 + %add13 = add nuw nsw i32 %conv12, %add + %shr14 = lshr i32 %add13, 16 + %shr15 = lshr i32 %mullh, 16 + %shr17 = lshr i32 %mulhl, 16 + %add16 = add i32 %shr14, %shr17 + %add18 = add i32 %add16, %shr15 + %add19 = add i32 %mulhh, %add18 + call void (...) @llvm.fake.use(i32 %add13) + ret i32 %add19 +} + +define i32 @mul_ladder4_use_conv12(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_ladder4_use_conv12( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[XL:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[XH:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[YL:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[YH:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[MULLL:%.*]] = mul nuw i32 [[YL]], [[XL]] +; CHECK-NEXT: [[MULHL:%.*]] = mul nuw i32 [[YH]], [[XL]] +; CHECK-NEXT: [[MULHL1:%.*]] = mul nuw i32 [[YL]], [[XH]] +; CHECK-NEXT: [[MULHH:%.*]] = mul nuw i32 [[YH]], [[XH]] +; CHECK-NEXT: [[SHR8:%.*]] = lshr i32 [[MULLL]], 16 +; CHECK-NEXT: [[CONV12:%.*]] = and i32 [[MULHL]], 65535 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[CONV12]], [[SHR8]] +; CHECK-NEXT: [[CONV13:%.*]] = and i32 [[MULHL1]], 65535 +; CHECK-NEXT: [[ADD13:%.*]] = add nuw nsw i32 [[CONV13]], [[ADD]] +; CHECK-NEXT: [[SHR14:%.*]] = lshr i32 [[ADD13]], 16 +; CHECK-NEXT: [[SHR15:%.*]] = lshr i32 [[MULHL]], 16 +; CHECK-NEXT: [[SHR17:%.*]] = lshr i32 [[MULHL1]], 16 +; CHECK-NEXT: [[ADD16:%.*]] = add nuw nsw i32 [[SHR14]], [[SHR17]] +; CHECK-NEXT: [[ADD18:%.*]] = add nuw nsw i32 [[ADD16]], [[SHR15]] +; CHECK-NEXT: [[ADD19:%.*]] = add i32 [[MULHH]], [[ADD18]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i32 [[CONV13]]) +; CHECK-NEXT: ret i32 [[ADD19]] +; +entry: + %xl = and i32 %x, 65535 + %xh = lshr i32 %x, 16 + %yl = and i32 %y, 65535 + %yh = lshr i32 %y, 16 + %mulll = mul nuw i32 %yl, %xl + %mullh = mul nuw i32 %yh, %xl + %mulhl = mul nuw i32 %yl, %xh + %mulhh = mul nuw i32 %yh, %xh + %shr8 = lshr i32 %mulll, 16 + %conv10 = and i32 %mullh, 65535 + %add = add nuw nsw i32 %conv10, %shr8 + %conv12 = and i32 %mulhl, 65535 + %add13 = add nuw nsw i32 %conv12, %add + %shr14 = lshr i32 %add13, 16 + %shr15 = lshr i32 %mullh, 16 + %shr17 = lshr i32 %mulhl, 16 + %add16 = add i32 %shr14, %shr17 + %add18 = add i32 %add16, %shr15 + %add19 = add i32 %mulhh, %add18 + call void (...) @llvm.fake.use(i32 %conv12) + ret i32 %add19 +} + +define i32 @mul_ladder4_use_u0(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_ladder4_use_u0( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[XL:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[XH:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[YL:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[YH:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[MULLL:%.*]] = mul nuw i32 [[YL]], [[XL]] +; CHECK-NEXT: [[MULHL1:%.*]] = mul nuw i32 [[YH]], [[XL]] +; CHECK-NEXT: [[MULHL:%.*]] = mul nuw i32 [[YL]], [[XH]] +; CHECK-NEXT: [[MULHH:%.*]] = mul nuw i32 [[YH]], [[XH]] +; CHECK-NEXT: [[SHR8:%.*]] = lshr i32 [[MULLL]], 16 +; CHECK-NEXT: [[CONV13:%.*]] = and i32 [[MULHL1]], 65535 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[CONV13]], [[SHR8]] +; CHECK-NEXT: [[CONV12:%.*]] = and i32 [[MULHL]], 65535 +; CHECK-NEXT: [[ADD13:%.*]] = add nuw nsw i32 [[CONV12]], [[ADD]] +; CHECK-NEXT: [[SHR14:%.*]] = lshr i32 [[ADD13]], 16 +; CHECK-NEXT: [[SHR15:%.*]] = lshr i32 [[MULHL1]], 16 +; CHECK-NEXT: [[SHR17:%.*]] = lshr i32 [[MULHL]], 16 +; CHECK-NEXT: [[ADD16:%.*]] = add nuw nsw i32 [[SHR14]], [[SHR17]] +; CHECK-NEXT: [[ADD18:%.*]] = add nuw nsw i32 [[ADD16]], [[SHR15]] +; CHECK-NEXT: [[ADD19:%.*]] = add i32 [[MULHH]], [[ADD18]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i32 [[ADD]]) +; CHECK-NEXT: ret i32 [[ADD19]] +; +entry: + %xl = and i32 %x, 65535 + %xh = lshr i32 %x, 16 + %yl = and i32 %y, 65535 + %yh = lshr i32 %y, 16 + %mulll = mul nuw i32 %yl, %xl + %mullh = mul nuw i32 %yh, %xl + %mulhl = mul nuw i32 %yl, %xh + %mulhh = mul nuw i32 %yh, %xh + %shr8 = lshr i32 %mulll, 16 + %conv10 = and i32 %mullh, 65535 + %add = add nuw nsw i32 %conv10, %shr8 + %conv12 = and i32 %mulhl, 65535 + %add13 = add nuw nsw i32 %conv12, %add + %shr14 = lshr i32 %add13, 16 + %shr15 = lshr i32 %mullh, 16 + %shr17 = lshr i32 %mulhl, 16 + %add16 = add i32 %shr14, %shr17 + %add18 = add i32 %add16, %shr15 + %add19 = add i32 %mulhh, %add18 + call void (...) @llvm.fake.use(i32 %add) + ret i32 %add19 +} + +define i32 @mul_ladder4_use_hl(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_ladder4_use_hl( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[XL:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[XH:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[YL:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[YH:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[MULLL:%.*]] = mul nuw i32 [[YL]], [[XL]] +; CHECK-NEXT: [[MULLH:%.*]] = mul nuw i32 [[YH]], [[XL]] +; CHECK-NEXT: [[MULHL:%.*]] = mul nuw i32 [[YL]], [[XH]] +; CHECK-NEXT: [[MULHH:%.*]] = mul nuw i32 [[YH]], [[XH]] +; CHECK-NEXT: [[SHR8:%.*]] = lshr i32 [[MULLL]], 16 +; CHECK-NEXT: [[CONV10:%.*]] = and i32 [[MULLH]], 65535 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[CONV10]], [[SHR8]] +; CHECK-NEXT: [[CONV12:%.*]] = and i32 [[MULHL]], 65535 +; CHECK-NEXT: [[ADD13:%.*]] = add nuw nsw i32 [[CONV12]], [[ADD]] +; CHECK-NEXT: [[SHR14:%.*]] = lshr i32 [[ADD13]], 16 +; CHECK-NEXT: [[SHR15:%.*]] = lshr i32 [[MULLH]], 16 +; CHECK-NEXT: [[SHR17:%.*]] = lshr i32 [[MULHL]], 16 +; CHECK-NEXT: [[ADD16:%.*]] = add nuw nsw i32 [[SHR14]], [[SHR17]] +; CHECK-NEXT: [[ADD18:%.*]] = add nuw nsw i32 [[ADD16]], [[SHR15]] +; CHECK-NEXT: [[ADD19:%.*]] = add i32 [[MULHH]], [[ADD18]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i32 [[MULHL]]) +; CHECK-NEXT: ret i32 [[ADD19]] +; +entry: + %xl = and i32 %x, 65535 + %xh = lshr i32 %x, 16 + %yl = and i32 %y, 65535 + %yh = lshr i32 %y, 16 + %mulll = mul nuw i32 %yl, %xl + %mullh = mul nuw i32 %yh, %xl + %mulhl = mul nuw i32 %yl, %xh + %mulhh = mul nuw i32 %yh, %xh + %shr8 = lshr i32 %mulll, 16 + %conv10 = and i32 %mullh, 65535 + %add = add nuw nsw i32 %conv10, %shr8 + %conv12 = and i32 %mulhl, 65535 + %add13 = add nuw nsw i32 %conv12, %add + %shr14 = lshr i32 %add13, 16 + %shr15 = lshr i32 %mullh, 16 + %shr17 = lshr i32 %mulhl, 16 + %add16 = add i32 %shr14, %shr17 + %add18 = add i32 %add16, %shr15 + %add19 = add i32 %mulhh, %add18 + call void (...) @llvm.fake.use(i32 %mulhl) + ret i32 %add19 +} + +define i32 @mul_ladder4_use_lh(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_ladder4_use_lh( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[XL:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[XH:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[YL:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[YH:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[MULLL:%.*]] = mul nuw i32 [[YL]], [[XL]] +; CHECK-NEXT: [[MULLH:%.*]] = mul nuw i32 [[YH]], [[XL]] +; CHECK-NEXT: [[MULHL:%.*]] = mul nuw i32 [[YL]], [[XH]] +; CHECK-NEXT: [[MULHH:%.*]] = mul nuw i32 [[YH]], [[XH]] +; CHECK-NEXT: [[SHR8:%.*]] = lshr i32 [[MULLL]], 16 +; CHECK-NEXT: [[CONV10:%.*]] = and i32 [[MULLH]], 65535 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[CONV10]], [[SHR8]] +; CHECK-NEXT: [[CONV12:%.*]] = and i32 [[MULHL]], 65535 +; CHECK-NEXT: [[ADD13:%.*]] = add nuw nsw i32 [[CONV12]], [[ADD]] +; CHECK-NEXT: [[SHR14:%.*]] = lshr i32 [[ADD13]], 16 +; CHECK-NEXT: [[SHR15:%.*]] = lshr i32 [[MULLH]], 16 +; CHECK-NEXT: [[SHR17:%.*]] = lshr i32 [[MULHL]], 16 +; CHECK-NEXT: [[ADD16:%.*]] = add nuw nsw i32 [[SHR14]], [[SHR17]] +; CHECK-NEXT: [[ADD18:%.*]] = add nuw nsw i32 [[ADD16]], [[SHR15]] +; CHECK-NEXT: [[ADD19:%.*]] = add i32 [[MULHH]], [[ADD18]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i32 [[MULLH]]) +; CHECK-NEXT: ret i32 [[ADD19]] +; +entry: + %xl = and i32 %x, 65535 + %xh = lshr i32 %x, 16 + %yl = and i32 %y, 65535 + %yh = lshr i32 %y, 16 + %mulll = mul nuw i32 %yl, %xl + %mullh = mul nuw i32 %yh, %xl + %mulhl = mul nuw i32 %yl, %xh + %mulhh = mul nuw i32 %yh, %xh + %shr8 = lshr i32 %mulll, 16 + %conv10 = and i32 %mullh, 65535 + %add = add nuw nsw i32 %conv10, %shr8 + %conv12 = and i32 %mulhl, 65535 + %add13 = add nuw nsw i32 %conv12, %add + %shr14 = lshr i32 %add13, 16 + %shr15 = lshr i32 %mullh, 16 + %shr17 = lshr i32 %mulhl, 16 + %add16 = add i32 %shr14, %shr17 + %add18 = add i32 %add16, %shr15 + %add19 = add i32 %mulhh, %add18 + call void (...) @llvm.fake.use(i32 %mullh) + ret i32 %add19 +} + +define i32 @mul_ladder4_use_conv10(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @mul_ladder4_use_conv10( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[XL:%.*]] = and i32 [[X]], 65535 +; CHECK-NEXT: [[XH:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[YL:%.*]] = and i32 [[Y]], 65535 +; CHECK-NEXT: [[YH:%.*]] = lshr i32 [[Y]], 16 +; CHECK-NEXT: [[MULLL:%.*]] = mul nuw i32 [[YL]], [[XL]] +; CHECK-NEXT: [[MULHL:%.*]] = mul nuw i32 [[YH]], [[XL]] +; CHECK-NEXT: [[MULHL1:%.*]] = mul nuw i32 [[YL]], [[XH]] +; CHECK-NEXT: [[MULHH:%.*]] = mul nuw i32 [[YH]], [[XH]] +; CHECK-NEXT: [[SHR8:%.*]] = lshr i32 [[MULLL]], 16 +; CHECK-NEXT: [[CONV12:%.*]] = and i32 [[MULHL]], 65535 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[CONV12]], [[SHR8]] +; CHECK-NEXT: [[CONV13:%.*]] = and i32 [[MULHL1]], 65535 +; CHECK-NEXT: [[ADD13:%.*]] = add nuw nsw i32 [[CONV13]], [[ADD]] +; CHECK-NEXT: [[SHR14:%.*]] = lshr i32 [[ADD13]], 16 +; CHECK-NEXT: [[SHR15:%.*]] = lshr i32 [[MULHL]], 16 +; CHECK-NEXT: [[SHR17:%.*]] = lshr i32 [[MULHL1]], 16 +; CHECK-NEXT: [[ADD16:%.*]] = add nuw nsw i32 [[SHR14]], [[SHR17]] +; CHECK-NEXT: [[ADD18:%.*]] = add nuw nsw i32 [[ADD16]], [[SHR15]] +; CHECK-NEXT: [[ADD19:%.*]] = add i32 [[MULHH]], [[ADD18]] +; CHECK-NEXT: call void (...) @llvm.fake.use(i32 [[CONV12]]) +; CHECK-NEXT: ret i32 [[ADD19]] +; +entry: + %xl = and i32 %x, 65535 + %xh = lshr i32 %x, 16 + %yl = and i32 %y, 65535 + %yh = lshr i32 %y, 16 + %mulll = mul nuw i32 %yl, %xl + %mullh = mul nuw i32 %yh, %xl + %mulhl = mul nuw i32 %yl, %xh + %mulhh = mul nuw i32 %yh, %xh + %shr8 = lshr i32 %mulll, 16 + %conv10 = and i32 %mullh, 65535 + %add = add nuw nsw i32 %conv10, %shr8 + %conv12 = and i32 %mulhl, 65535 + %add13 = add nuw nsw i32 %conv12, %add + %shr14 = lshr i32 %add13, 16 + %shr15 = lshr i32 %mullh, 16 + %shr17 = lshr i32 %mulhl, 16 + %add16 = add i32 %shr14, %shr17 + %add18 = add i32 %add16, %shr15 + %add19 = add i32 %mulhh, %add18 + call void (...) @llvm.fake.use(i32 %conv10) + ret i32 %add19 +} diff --git a/llvm/test/Transforms/IndVarSimplify/floating-point-iv.ll b/llvm/test/Transforms/IndVarSimplify/floating-point-iv.ll index c4933678d0391..a01c032460632 100644 --- a/llvm/test/Transforms/IndVarSimplify/floating-point-iv.ll +++ b/llvm/test/Transforms/IndVarSimplify/floating-point-iv.ll @@ -553,4 +553,27 @@ exit: ret void } +define void @test_fp_recurrence_cmp_used_by_select() { +; CHECK-LABEL: @test_fp_recurrence_cmp_used_by_select( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK: loop: +; CHECK-NEXT: br i1 false, label [[LOOP]], label [[EXIT:%.*]] +; CHECK: exit: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %fp.iv = phi double [ 0.0, %entry ], [ %fp.iv.next, %loop ] + %fp.iv.next = fadd double %fp.iv, 1.250000e-02 + %cmp.fp = fcmp olt double %fp.iv.next, 2.001250e+00 + %cond = select i1 %cmp.fp, i1 false, i1 false + br i1 %cond, label %loop, label %exit + +exit: + ret void +} + declare void @opaque() diff --git a/llvm/test/Transforms/IndVarSimplify/skip-predication-convergence.ll b/llvm/test/Transforms/IndVarSimplify/skip-predication-convergence.ll new file mode 100644 index 0000000000000..59b84a3c082c2 --- /dev/null +++ b/llvm/test/Transforms/IndVarSimplify/skip-predication-convergence.ll @@ -0,0 +1,64 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -passes=indvars -indvars-predicate-loops=1 -S | FileCheck %s + +; Loop with body using loop convergence token should be skipped by IndVarSimplify. + +declare token @llvm.experimental.convergence.entry() #0 + +define void @loop(i32 %tid, ptr %array) #0 { +; CHECK-LABEL: @loop( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = tail call token @llvm.experimental.convergence.entry() +; CHECK-NEXT: br label [[FOR_COND_I:%.*]] +; CHECK: for.cond.i: +; CHECK-NEXT: [[I_0_I:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC_I:%.*]], [[FOR_BODY_I:%.*]] ] +; CHECK-NEXT: [[TMP1:%.*]] = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token [[TMP0]]) ] +; CHECK-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[I_0_I]], 8 +; CHECK-NEXT: br i1 [[CMP_I]], label [[FOR_BODY_I]], label [[EXIT_LOOPEXIT:%.*]] +; CHECK: for.body.i: +; CHECK-NEXT: [[CMP1_I:%.*]] = icmp eq i32 [[I_0_I]], [[TID:%.*]] +; CHECK-NEXT: [[INC_I]] = add nuw nsw i32 [[I_0_I]], 1 +; CHECK-NEXT: br i1 [[CMP1_I]], label [[IF_THEN_I:%.*]], label [[FOR_COND_I]] +; CHECK: exit.loopexit: +; CHECK-NEXT: br label [[EXIT:%.*]] +; CHECK: if.then.i: +; CHECK-NEXT: [[HLSL_WAVE_ACTIVE_MAX2_I:%.*]] = call spir_func i32 @llvm.spv.wave.reduce.umax.i32(i32 [[TID]]) [ "convergencectrl"(token [[TMP1]]) ] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i32 [[TID]] +; CHECK-NEXT: store i32 [[HLSL_WAVE_ACTIVE_MAX2_I]], ptr [[TMP2]], align 4 +; CHECK-NEXT: br label [[EXIT]] +; CHECK: exit: +; CHECK-NEXT: ret void +; +entry: + %0 = tail call token @llvm.experimental.convergence.entry() + br label %for.cond.i + +for.cond.i: + %i.0.i = phi i32 [ 0, %entry ], [ %inc.i, %for.body.i ] + %2 = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token %0) ] + %cmp.i = icmp ult i32 %i.0.i, 8 + br i1 %cmp.i, label %for.body.i, label %exit.loopexit + +for.body.i: + %cmp1.i = icmp eq i32 %i.0.i, %tid + %inc.i = add nuw nsw i32 %i.0.i, 1 + br i1 %cmp1.i, label %if.then.i, label %for.cond.i + +exit.loopexit: + br label %exit + +if.then.i: + %hlsl.wave.active.max2.i = call spir_func i32 @llvm.spv.wave.reduce.umax.i32(i32 %tid) [ "convergencectrl"(token %2) ] + %3 = getelementptr inbounds i32, ptr %array, i32 %tid + store i32 %hlsl.wave.active.max2.i, ptr %3, align 4 + br label %exit + +exit: + ret void +} + +declare token @llvm.experimental.convergence.loop() #0 + +declare i32 @llvm.spv.wave.reduce.umax.i32(i32) #0 + +attributes #0 = { convergent } diff --git a/llvm/test/Transforms/IndVarSimplify/skip-predication-nested-convergence.ll b/llvm/test/Transforms/IndVarSimplify/skip-predication-nested-convergence.ll new file mode 100644 index 0000000000000..0944205839aca --- /dev/null +++ b/llvm/test/Transforms/IndVarSimplify/skip-predication-nested-convergence.ll @@ -0,0 +1,95 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -passes=indvars -indvars-predicate-loops=1 -S | FileCheck %s + +; Nested loops with body using loop convergence token should be skipped by IndVarSimplify. + +declare token @llvm.experimental.convergence.entry() #0 + +define void @nested(i32 %tidx, i32 %tidy, ptr %array) #0 { +; CHECK-LABEL: @nested( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = tail call token @llvm.experimental.convergence.entry() +; CHECK-NEXT: [[MUL_I:%.*]] = shl nsw i32 [[TIDX:%.*]], 3 +; CHECK-NEXT: [[ADD_I:%.*]] = add nsw i32 [[MUL_I]], [[TIDY:%.*]] +; CHECK-NEXT: br label [[FOR_COND_I:%.*]] +; CHECK: for.cond.i: +; CHECK-NEXT: [[I_0_I:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC10_I:%.*]], [[CLEANUP_I:%.*]] ] +; CHECK-NEXT: [[TMP1:%.*]] = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token [[TMP0]]) ] +; CHECK-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[I_0_I]], 8 +; CHECK-NEXT: br i1 [[CMP_I]], label [[FOR_COND1_I_PREHEADER:%.*]], label [[EXIT:%.*]] +; CHECK: for.cond1.i.preheader: +; CHECK-NEXT: [[CMP5_I:%.*]] = icmp eq i32 [[I_0_I]], [[TIDX]] +; CHECK-NEXT: br label [[FOR_COND1_I:%.*]] +; CHECK: for.cond1.i: +; CHECK-NEXT: [[J_0_I:%.*]] = phi i32 [ [[INC_I:%.*]], [[FOR_BODY4_I:%.*]] ], [ 0, [[FOR_COND1_I_PREHEADER]] ] +; CHECK-NEXT: [[TMP2:%.*]] = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token [[TMP1]]) ] +; CHECK-NEXT: [[CMP2_I:%.*]] = icmp ult i32 [[J_0_I]], 8 +; CHECK-NEXT: br i1 [[CMP2_I]], label [[FOR_BODY4_I]], label [[CLEANUP_I_LOOPEXIT:%.*]] +; CHECK: for.body4.i: +; CHECK-NEXT: [[CMP6_I:%.*]] = icmp eq i32 [[J_0_I]], [[TIDY]] +; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[CMP5_I]], i1 [[CMP6_I]], i1 false +; CHECK-NEXT: [[INC_I]] = add nuw nsw i32 [[J_0_I]], 1 +; CHECK-NEXT: br i1 [[OR_COND]], label [[IF_THEN_I:%.*]], label [[FOR_COND1_I]] +; CHECK: cleanup.i.loopexit: +; CHECK-NEXT: br label [[CLEANUP_I]] +; CHECK: if.then.i: +; CHECK-NEXT: [[HLSL_WAVE_ACTIVE_MAX7_I:%.*]] = call spir_func i32 @llvm.spv.wave.reduce.umax.i32(i32 [[ADD_I]]) [ "convergencectrl"(token [[TMP2]]) ] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i32 [[ADD_I]] +; CHECK-NEXT: store i32 [[HLSL_WAVE_ACTIVE_MAX7_I]], ptr [[TMP3]], align 4 +; CHECK-NEXT: br label [[CLEANUP_I]] +; CHECK: cleanup.i: +; CHECK-NEXT: [[INC10_I]] = add nuw nsw i32 [[I_0_I]], 1 +; CHECK-NEXT: br label [[FOR_COND_I]] +; CHECK: exit: +; CHECK-NEXT: ret void +; +entry: + %0 = tail call token @llvm.experimental.convergence.entry() + %mul.i = shl nsw i32 %tidx, 3 + %add.i = add nsw i32 %mul.i, %tidy + br label %for.cond.i + +for.cond.i: + %i.0.i = phi i32 [ 0, %entry ], [ %inc10.i, %cleanup.i ] + %2 = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token %0) ] + %cmp.i = icmp ult i32 %i.0.i, 8 + br i1 %cmp.i, label %for.cond1.i.preheader, label %exit + +for.cond1.i.preheader: + %cmp5.i = icmp eq i32 %i.0.i, %tidx + br label %for.cond1.i + +for.cond1.i: + %j.0.i = phi i32 [ %inc.i, %for.body4.i ], [ 0, %for.cond1.i.preheader ] + %3 = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token %2) ] + %cmp2.i = icmp ult i32 %j.0.i, 8 + br i1 %cmp2.i, label %for.body4.i, label %cleanup.i.loopexit + +for.body4.i: + %cmp6.i = icmp eq i32 %j.0.i, %tidy + %or.cond = select i1 %cmp5.i, i1 %cmp6.i, i1 false + %inc.i = add nsw i32 %j.0.i, 1 + br i1 %or.cond, label %if.then.i, label %for.cond1.i + +cleanup.i.loopexit: + br label %cleanup.i + +if.then.i: + %hlsl.wave.active.max7.i = call spir_func i32 @llvm.spv.wave.reduce.umax.i32(i32 %add.i) [ "convergencectrl"(token %3) ] + %4 = getelementptr inbounds i32, ptr %array, i32 %add.i + store i32 %hlsl.wave.active.max7.i, ptr %4, align 4 + br label %cleanup.i + +cleanup.i: + %inc10.i = add nsw i32 %i.0.i, 1 + br label %for.cond.i + +exit: + ret void +} + +declare token @llvm.experimental.convergence.loop() #0 + +declare i32 @llvm.spv.wave.reduce.umax.i32(i32) #0 + +attributes #0 = { convergent } diff --git a/llvm/test/Transforms/InstCombine/get_vector_length.ll b/llvm/test/Transforms/InstCombine/get_vector_length.ll new file mode 100644 index 0000000000000..122beeae866f3 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/get_vector_length.ll @@ -0,0 +1,89 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt < %s -passes=instcombine,verify -S | FileCheck %s + +define i32 @cnt_known_lt() { +; CHECK-LABEL: define i32 @cnt_known_lt() { +; CHECK-NEXT: ret i32 1 +; + %x = call i32 @llvm.experimental.get.vector.length(i32 1, i32 2, i1 false) + ret i32 %x +} + +define i32 @cnt_not_known_lt() { +; CHECK-LABEL: define i32 @cnt_not_known_lt() { +; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 2, i32 1, i1 false) +; CHECK-NEXT: ret i32 [[X]] +; + %x = call i32 @llvm.experimental.get.vector.length(i32 2, i32 1, i1 false) + ret i32 %x +} + +define i32 @cnt_known_lt_scalable() vscale_range(2, 4) { +; CHECK-LABEL: define i32 @cnt_known_lt_scalable( +; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: ret i32 2 +; + %x = call i32 @llvm.experimental.get.vector.length(i32 2, i32 1, i1 true) + ret i32 %x +} + +define i32 @cnt_not_known_lt_scalable() { +; CHECK-LABEL: define i32 @cnt_not_known_lt_scalable() { +; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 2, i32 1, i1 true) +; CHECK-NEXT: ret i32 [[X]] +; + %x = call i32 @llvm.experimental.get.vector.length(i32 2, i32 1, i1 true) + ret i32 %x +} + +define i32 @cnt_known_lt_runtime(i32 %x) { +; CHECK-LABEL: define i32 @cnt_known_lt_runtime( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[ICMP:%.*]] = icmp ult i32 [[X]], 4 +; CHECK-NEXT: call void @llvm.assume(i1 [[ICMP]]) +; CHECK-NEXT: ret i32 [[X]] +; + %icmp = icmp ule i32 %x, 3 + call void @llvm.assume(i1 %icmp) + %y = call i32 @llvm.experimental.get.vector.length(i32 %x, i32 3, i1 false) + ret i32 %y +} + +define i32 @cnt_known_lt_runtime_trunc(i64 %x) { +; CHECK-LABEL: define i32 @cnt_known_lt_runtime_trunc( +; CHECK-SAME: i64 [[X:%.*]]) { +; CHECK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[X]], 4 +; CHECK-NEXT: call void @llvm.assume(i1 [[ICMP]]) +; CHECK-NEXT: [[Y:%.*]] = trunc nuw nsw i64 [[X]] to i32 +; CHECK-NEXT: ret i32 [[Y]] +; + %icmp = icmp ule i64 %x, 3 + call void @llvm.assume(i1 %icmp) + %y = call i32 @llvm.experimental.get.vector.length(i64 %x, i32 3, i1 false) + ret i32 %y +} + +; FIXME: We should be able to deduce the constant range from AssumptionCache +; rather than relying on KnownBits, which in this case only knows x <= 3. +define i32 @cnt_known_lt_runtime_assumption(i32 %x) { +; CHECK-LABEL: define i32 @cnt_known_lt_runtime_assumption( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[ICMP:%.*]] = icmp ult i32 [[X]], 3 +; CHECK-NEXT: call void @llvm.assume(i1 [[ICMP]]) +; CHECK-NEXT: [[Y:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[X]], i32 2, i1 false) +; CHECK-NEXT: ret i32 [[Y]] +; + %icmp = icmp ule i32 %x, 2 + call void @llvm.assume(i1 %icmp) + %y = call i32 @llvm.experimental.get.vector.length(i32 %x, i32 2, i1 false) + ret i32 %y +} + + +define i32 @cnt_known_lt_i16() { +; CHECK-LABEL: define i32 @cnt_known_lt_i16() { +; CHECK-NEXT: ret i32 1 +; + %x = call i32 @llvm.experimental.get.vector.length(i16 1, i32 2, i1 false) + ret i32 %x +} diff --git a/llvm/test/Transforms/InstCombine/icmp-select.ll b/llvm/test/Transforms/InstCombine/icmp-select.ll index c6c0ba385a6fd..c29527a3c3c5e 100644 --- a/llvm/test/Transforms/InstCombine/icmp-select.ll +++ b/llvm/test/Transforms/InstCombine/icmp-select.ll @@ -835,3 +835,120 @@ define i1 @discr_eq_constantexpr(ptr %p) { %cmp = icmp eq i64 %sub, -1 ret i1 %cmp } + +define i1 @shl_nsw_eq_simplify_zero_to_self(i8 %a, i1 %cond) { +; CHECK-LABEL: @shl_nsw_eq_simplify_zero_to_self( +; CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[COND:%.*]] to i8 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], [[TMP1]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %shl = shl nsw i8 %a, 3 + %sel = select i1 %cond, i8 8, i8 0 + %cmp = icmp eq i8 %shl, %sel + ret i1 %cmp +} + +define i1 @shl_nsw_eq(i8 %a, i1 %cond) { +; CHECK-LABEL: @shl_nsw_eq( +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[COND:%.*]], i8 1, i8 -15 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], [[TMP1]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %shl = shl nsw i8 %a, 3 + %sel = select i1 %cond, i8 8, i8 -120 + %cmp = icmp eq i8 %shl, %sel + ret i1 %cmp +} + +define i1 @shl_nuw_eq(i8 %a, i1 %cond) { +; CHECK-LABEL: @shl_nuw_eq( +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[COND:%.*]], i8 1, i8 17 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], [[TMP1]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %shl = shl nuw i8 %a, 3 + %sel = select i1 %cond, i8 8, i8 -120 + %cmp = icmp eq i8 %shl, %sel + ret i1 %cmp +} + +define i1 @shl_nsw_failed_to_simplify(i8 %a, i1 %cond) { +; CHECK-LABEL: @shl_nsw_failed_to_simplify( +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[A:%.*]], 1 +; CHECK-NEXT: [[NOT_COND:%.*]] = xor i1 [[COND:%.*]], true +; CHECK-NEXT: [[CMP:%.*]] = select i1 [[NOT_COND]], i1 [[CMP1]], i1 false +; CHECK-NEXT: ret i1 [[CMP]] +; + %shl = shl nsw i8 %a, 4 + %sel = select i1 %cond, i8 8, i8 16 + %cmp = icmp eq i8 %shl, %sel + ret i1 %cmp +} + +define i1 @shl_nuw_failed_to_simplify(i8 %a, i1 %cond) { +; CHECK-LABEL: @shl_nuw_failed_to_simplify( +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[A:%.*]], 4 +; CHECK-NEXT: [[NOT_COND:%.*]] = xor i1 [[COND:%.*]], true +; CHECK-NEXT: [[CMP:%.*]] = select i1 [[NOT_COND]], i1 [[CMP1]], i1 false +; CHECK-NEXT: ret i1 [[CMP]] +; + %shl = shl nuw i8 %a, 3 + %sel = select i1 %cond, i8 -1, i8 32 + %cmp = icmp eq i8 %shl, %sel + ret i1 %cmp +} + +define i1 @shl_failed_to_simplify(i8 %a, i1 %cond) { +; CHECK-LABEL: @shl_failed_to_simplify( +; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[A:%.*]], 3 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND:%.*]], i8 8, i8 32 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[SHL]], [[SEL]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %shl = shl i8 %a, 3 + %sel = select i1 %cond, i8 8, i8 32 + %cmp = icmp eq i8 %shl, %sel + ret i1 %cmp +} + +define i1 @shl_nuw_ne(i8 %a, i8 %b, i8 %c, i1 %cond) { +; CHECK-LABEL: @shl_nuw_ne( +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[COND:%.*]], i8 [[B:%.*]], i8 4 +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %shl_a = shl nuw i8 %a, 3 + %shl_b = shl nuw i8 %b, 3 + %sel = select i1 %cond, i8 %shl_b, i8 32 + %cmp = icmp ne i8 %sel, %shl_a + ret i1 %cmp +} + +define i1 @shl_const_phi_failed_to_simplify(i64 %indvars, i32 %conv) { +; CHECK-LABEL: @shl_const_phi_failed_to_simplify( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i64 [[INDVARS:%.*]], 1 +; CHECK-NEXT: br i1 [[CMP_SLT]], label [[END:%.*]], label [[THEN:%.*]] +; CHECK: then: +; CHECK-NEXT: br label [[END]] +; CHECK: end: +; CHECK-NEXT: [[CONST_PHI:%.*]] = phi i32 [ 0, [[THEN]] ], [ 65535, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[SHL_NUW:%.*]] = shl nuw i32 [[CONV:%.*]], 31 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP_SLT]], i32 [[CONST_PHI]], i32 [[SHL_NUW]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[SEL]], 0 +; CHECK-NEXT: ret i1 [[CMP]] +; +entry: + %cmp_slt = icmp slt i64 %indvars, 1 + br i1 %cmp_slt, label %end, label %then + +then: + br label %end + +end: + %const_phi = phi i32 [ 0, %then ], [ 65535, %entry ] + %shl_nuw = shl nuw i32 %conv, 31 + %sel = select i1 %cmp_slt, i32 %const_phi, i32 %shl_nuw + %cmp = icmp eq i32 %sel, 0 + ret i1 %cmp +} diff --git a/llvm/test/Transforms/InstCombine/ptrauth-intrinsics.ll b/llvm/test/Transforms/InstCombine/ptrauth-intrinsics.ll index 208e162ac9416..22c330fe7ae61 100644 --- a/llvm/test/Transforms/InstCombine/ptrauth-intrinsics.ll +++ b/llvm/test/Transforms/InstCombine/ptrauth-intrinsics.ll @@ -160,6 +160,43 @@ define i64 @test_ptrauth_resign_ptrauth_constant(ptr %p) { ret i64 %authed } +@ds = external global i8 + +define i64 @test_ptrauth_nop_ds1(ptr %p) { +; CHECK-LABEL: @test_ptrauth_nop_ds1( +; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[P:%.*]] to i64 +; CHECK-NEXT: [[SIGNED:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[TMP0]], i32 1, i64 1234) [ "deactivation-symbol"(ptr @ds) ] +; CHECK-NEXT: [[AUTHED:%.*]] = call i64 @llvm.ptrauth.auth(i64 [[SIGNED]], i32 1, i64 1234) +; CHECK-NEXT: ret i64 [[AUTHED]] +; + %tmp0 = ptrtoint ptr %p to i64 + %signed = call i64 @llvm.ptrauth.sign(i64 %tmp0, i32 1, i64 1234) [ "deactivation-symbol"(ptr @ds) ] + %authed = call i64 @llvm.ptrauth.auth(i64 %signed, i32 1, i64 1234) + ret i64 %authed +} + +define i64 @test_ptrauth_nop_ds2(ptr %p) { +; CHECK-LABEL: @test_ptrauth_nop_ds2( +; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[P:%.*]] to i64 +; CHECK-NEXT: [[SIGNED:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[TMP0]], i32 1, i64 1234) +; CHECK-NEXT: [[AUTHED:%.*]] = call i64 @llvm.ptrauth.auth(i64 [[SIGNED]], i32 1, i64 1234) [ "deactivation-symbol"(ptr @ds) ] +; CHECK-NEXT: ret i64 [[AUTHED]] +; + %tmp0 = ptrtoint ptr %p to i64 + %signed = call i64 @llvm.ptrauth.sign(i64 %tmp0, i32 1, i64 1234) + %authed = call i64 @llvm.ptrauth.auth(i64 %signed, i32 1, i64 1234) [ "deactivation-symbol"(ptr @ds) ] + ret i64 %authed +} + +define i64 @test_ptrauth_nop_ds_constant() { +; CHECK-LABEL: @test_ptrauth_nop_ds_constant( +; CHECK-NEXT: [[AUTHED:%.*]] = call i64 @llvm.ptrauth.auth(i64 ptrtoint (ptr ptrauth (ptr @foo, i32 1, i64 1234, ptr null, ptr @ds) to i64), i32 1, i64 1234) +; CHECK-NEXT: ret i64 [[AUTHED]] +; + %authed = call i64 @llvm.ptrauth.auth(i64 ptrtoint(ptr ptrauth(ptr @foo, i32 1, i64 1234, ptr null, ptr @ds) to i64), i32 1, i64 1234) + ret i64 %authed +} + declare i64 @llvm.ptrauth.auth(i64, i32, i64) declare i64 @llvm.ptrauth.sign(i64, i32, i64) declare i64 @llvm.ptrauth.resign(i64, i32, i64, i32, i64) diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vectorize-redund-loads.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vectorize-redund-loads.ll index 55b511fd51a2b..802795da47894 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vectorize-redund-loads.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vectorize-redund-loads.ll @@ -1,6 +1,33 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck %s +define void @onevec(ptr %ptr) { +; CHECK-LABEL: define void @onevec( +; CHECK-SAME: ptr [[PTR:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[TMP1]] to <1 x i32> +; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 16 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[GEP1]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[TMP3]] to <1 x i32> +; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 32 +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[GEP2]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32 [[TMP5]] to <1 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = bitcast i32 [[TMP5]] to <1 x i32> +; CHECK-NEXT: ret void +; + %ld0 = load <1 x i32>, ptr %ptr, align 4 + %ld1 = load i32, ptr %ptr, align 4 + + %gep1 = getelementptr inbounds i8, ptr %ptr, i32 16 + %ld2 = load i32, ptr %gep1, align 4 + %ld3 = load <1 x i32>, ptr %gep1, align 4 + + %gep2 = getelementptr inbounds i8, ptr %ptr, i32 32 + %ld4 = load <1 x i32>, ptr %gep2, align 4 + %ld5 = load <1 x i32>, ptr %gep2, align 4 + ret void +} + define void @test(ptr %ptr) { ; CHECK-LABEL: define void @test( ; CHECK-SAME: ptr [[PTR:%.*]]) { diff --git a/llvm/test/Transforms/LoopUnroll/partial-unroll-reductions.ll b/llvm/test/Transforms/LoopUnroll/partial-unroll-reductions.ll index 220a4a29a3041..e94a368d3ded0 100644 --- a/llvm/test/Transforms/LoopUnroll/partial-unroll-reductions.ll +++ b/llvm/test/Transforms/LoopUnroll/partial-unroll-reductions.ll @@ -319,27 +319,33 @@ define float @test_fadd_with_ressaoc(ptr %src, i64 %n, float %start) { ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT_3:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi float [ [[START]], %[[ENTRY]] ], [ [[RDX_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_1:%.*]] = phi float [ -0.000000e+00, %[[ENTRY]] ], [ [[RDX_NEXT_1:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_2:%.*]] = phi float [ -0.000000e+00, %[[ENTRY]] ], [ [[RDX_NEXT_2:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_3:%.*]] = phi float [ -0.000000e+00, %[[ENTRY]] ], [ [[RDX_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi float [ [[START]], %[[ENTRY]] ], [ [[RDX_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr float, ptr [[SRC]], i64 [[IV]] ; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP_SRC]], align 1 -; CHECK-NEXT: [[RDX_NEXT:%.*]] = fadd float [[RDX]], [[L]] +; CHECK-NEXT: [[RDX_NEXT]] = fadd reassoc float [[RDX]], [[L]] ; CHECK-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2 ; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr float, ptr [[SRC]], i64 [[IV_NEXT]] ; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 1 -; CHECK-NEXT: [[RDX_NEXT_1:%.*]] = fadd float [[RDX_NEXT]], [[L_1]] +; CHECK-NEXT: [[RDX_NEXT_1]] = fadd reassoc float [[RDX_1]], [[L_1]] ; CHECK-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3 ; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr float, ptr [[SRC]], i64 [[IV_NEXT_1]] ; CHECK-NEXT: [[L_2:%.*]] = load float, ptr [[GEP_SRC_2]], align 1 -; CHECK-NEXT: [[RDX_NEXT_2:%.*]] = fadd float [[RDX_NEXT_1]], [[L_2]] +; CHECK-NEXT: [[RDX_NEXT_2]] = fadd reassoc float [[RDX_2]], [[L_2]] ; CHECK-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV]], 4 ; CHECK-NEXT: [[GEP_SRC_24:%.*]] = getelementptr float, ptr [[SRC]], i64 [[IV_NEXT_2]] ; CHECK-NEXT: [[L_24:%.*]] = load float, ptr [[GEP_SRC_24]], align 1 -; CHECK-NEXT: [[RDX_NEXT_3]] = fadd float [[RDX_NEXT_2]], [[L_24]] +; CHECK-NEXT: [[RDX_NEXT_3]] = fadd reassoc float [[RDX_3]], [[L_24]] ; CHECK-NEXT: [[EC_3:%.*]] = icmp ne i64 [[IV_NEXT_3]], 1000 ; CHECK-NEXT: br i1 [[EC_3]], label %[[LOOP]], label %[[EXIT:.*]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RDX_NEXT_LCSSA:%.*]] = phi float [ [[RDX_NEXT_3]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_NEXT_LCSSA1:%.*]] = phi float [ [[RDX_NEXT_3]], %[[LOOP]] ] +; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc float [[RDX_NEXT_1]], [[RDX_NEXT]] +; CHECK-NEXT: [[BIN_RDX1:%.*]] = fadd reassoc float [[RDX_NEXT_2]], [[BIN_RDX]] +; CHECK-NEXT: [[RDX_NEXT_LCSSA:%.*]] = fadd reassoc float [[RDX_NEXT_3]], [[BIN_RDX1]] ; CHECK-NEXT: ret float [[RDX_NEXT_LCSSA]] ; entry: @@ -351,7 +357,7 @@ loop: %iv.next = add i64 %iv, 1 %gep.src = getelementptr float, ptr %src, i64 %iv %l = load float, ptr %gep.src, align 1 - %rdx.next = fadd float %rdx, %l + %rdx.next = fadd reassoc float %rdx, %l %ec = icmp ne i64 %iv.next, 1000 br i1 %ec, label %loop, label %exit diff --git a/llvm/test/Transforms/LoopUnroll/runtime-unroll-reductions.ll b/llvm/test/Transforms/LoopUnroll/runtime-unroll-reductions.ll index fb1f2fcf5c190..840cc6c507c3d 100644 --- a/llvm/test/Transforms/LoopUnroll/runtime-unroll-reductions.ll +++ b/llvm/test/Transforms/LoopUnroll/runtime-unroll-reductions.ll @@ -287,6 +287,202 @@ exit: ret <4 x i32> %res } +define float @test_fadd_reduction(ptr %a, i64 %n) { +; CHECK-LABEL: define float @test_fadd_reduction( +; CHECK-SAME: ptr [[A:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 +; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 1 +; CHECK-NEXT: br i1 [[TMP1]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] +; CHECK: [[ENTRY_NEW]]: +; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[IV_NEXT_1:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_1:%.*]] = phi float [ -0.000000e+00, %[[ENTRY_NEW]] ], [ [[RDX_NEXT_1:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY_NEW]] ], [ [[RDX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[NITER:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[NITER_NEXT_1:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[GEP_A]], align 16 +; CHECK-NEXT: [[RDX_NEXT]] = fadd reassoc float [[RDX]], [[TMP2]] +; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[GEP_A_1]], align 16 +; CHECK-NEXT: [[RDX_NEXT_1]] = fadd reassoc float [[RDX_1]], [[TMP3]] +; CHECK-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2 +; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 +; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] +; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK: [[EXIT_UNR_LCSSA]]: +; CHECK-NEXT: [[RES_PH:%.*]] = phi float [ [[RDX_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ [[IV_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_UNR:%.*]] = phi float [ [[RDX_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc float [[RDX_NEXT_1]], [[RDX_NEXT]] +; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER]], label %[[EXIT:.*]] +; CHECK: [[LOOP_EPIL_PREHEADER]]: +; CHECK-NEXT: [[IV_EPIL_INIT:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR]], %[[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[RDX_EPIL_INIT:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[BIN_RDX]], %[[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD2:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD2]]) +; CHECK-NEXT: br label %[[LOOP_EPIL:.*]] +; CHECK: [[LOOP_EPIL]]: +; CHECK-NEXT: [[GEP_A_EPIL:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_EPIL_INIT]] +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[GEP_A_EPIL]], align 16 +; CHECK-NEXT: [[RDX_NEXT_EPIL:%.*]] = fadd reassoc float [[RDX_EPIL_INIT]], [[TMP4]] +; CHECK-NEXT: br label %[[EXIT]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RES:%.*]] = phi float [ [[BIN_RDX]], %[[EXIT_UNR_LCSSA]] ], [ [[RDX_NEXT_EPIL]], %[[LOOP_EPIL]] ] +; CHECK-NEXT: ret float [[RES]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %rdx = phi float [ 0.0, %entry ], [ %rdx.next, %loop ] + %gep.a = getelementptr inbounds nuw float, ptr %a, i64 %iv + %1 = load float, ptr %gep.a, align 16 + %rdx.next = fadd reassoc float %rdx, %1 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop, !llvm.loop !0 + +exit: + %res = phi float [ %rdx.next, %loop ] + ret float %res +} + +define float @test_fadd_no_reassoc(ptr %a, i64 %n) { +; CHECK-LABEL: define float @test_fadd_no_reassoc( +; CHECK-SAME: ptr [[A:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 +; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 1 +; CHECK-NEXT: br i1 [[TMP1]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] +; CHECK: [[ENTRY_NEW]]: +; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[IV_NEXT_1:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY_NEW]] ], [ [[RDX_NEXT_1:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[NITER:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[NITER_NEXT_1:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[GEP_A]], align 16 +; CHECK-NEXT: [[RDX_NEXT:%.*]] = fadd float [[RDX]], [[TMP2]] +; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[GEP_A_1]], align 16 +; CHECK-NEXT: [[RDX_NEXT_1]] = fadd float [[RDX_NEXT]], [[TMP3]] +; CHECK-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2 +; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 +; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] +; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK: [[EXIT_UNR_LCSSA]]: +; CHECK-NEXT: [[RES_PH:%.*]] = phi float [ [[RDX_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ [[IV_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_UNR:%.*]] = phi float [ [[RDX_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER]], label %[[EXIT:.*]] +; CHECK: [[LOOP_EPIL_PREHEADER]]: +; CHECK-NEXT: [[IV_EPIL_INIT:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR]], %[[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[RDX_EPIL_INIT:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[RDX_UNR]], %[[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD2:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD2]]) +; CHECK-NEXT: br label %[[LOOP_EPIL:.*]] +; CHECK: [[LOOP_EPIL]]: +; CHECK-NEXT: [[GEP_A_EPIL:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_EPIL_INIT]] +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[GEP_A_EPIL]], align 16 +; CHECK-NEXT: [[RDX_NEXT_EPIL:%.*]] = fadd float [[RDX_EPIL_INIT]], [[TMP4]] +; CHECK-NEXT: br label %[[EXIT]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RES:%.*]] = phi float [ [[RES_PH]], %[[EXIT_UNR_LCSSA]] ], [ [[RDX_NEXT_EPIL]], %[[LOOP_EPIL]] ] +; CHECK-NEXT: ret float [[RES]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %rdx = phi float [ 0.0, %entry ], [ %rdx.next, %loop ] + %gep.a = getelementptr inbounds nuw float, ptr %a, i64 %iv + %1 = load float, ptr %gep.a, align 16 + %rdx.next = fadd float %rdx, %1 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop, !llvm.loop !0 + +exit: + %res = phi float [ %rdx.next, %loop ] + ret float %res +} + +define float @test_fadd_other_fastmath(ptr %a, i64 %n) { +; CHECK-LABEL: define float @test_fadd_other_fastmath( +; CHECK-SAME: ptr [[A:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 +; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 1 +; CHECK-NEXT: br i1 [[TMP1]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] +; CHECK: [[ENTRY_NEW]]: +; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[IV_NEXT_1:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY_NEW]] ], [ [[RDX_NEXT_1:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[NITER:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[NITER_NEXT_1:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[GEP_A]], align 16 +; CHECK-NEXT: [[RDX_NEXT:%.*]] = fadd contract float [[RDX]], [[TMP2]] +; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[GEP_A_1]], align 16 +; CHECK-NEXT: [[RDX_NEXT_1]] = fadd contract float [[RDX_NEXT]], [[TMP3]] +; CHECK-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2 +; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 +; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] +; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK: [[EXIT_UNR_LCSSA]]: +; CHECK-NEXT: [[RES_PH:%.*]] = phi float [ [[RDX_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ [[IV_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_UNR:%.*]] = phi float [ [[RDX_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER]], label %[[EXIT:.*]] +; CHECK: [[LOOP_EPIL_PREHEADER]]: +; CHECK-NEXT: [[IV_EPIL_INIT:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR]], %[[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[RDX_EPIL_INIT:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[RDX_UNR]], %[[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD2:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD2]]) +; CHECK-NEXT: br label %[[LOOP_EPIL:.*]] +; CHECK: [[LOOP_EPIL]]: +; CHECK-NEXT: [[GEP_A_EPIL:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_EPIL_INIT]] +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[GEP_A_EPIL]], align 16 +; CHECK-NEXT: [[RDX_NEXT_EPIL:%.*]] = fadd contract float [[RDX_EPIL_INIT]], [[TMP4]] +; CHECK-NEXT: br label %[[EXIT]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RES:%.*]] = phi float [ [[RES_PH]], %[[EXIT_UNR_LCSSA]] ], [ [[RDX_NEXT_EPIL]], %[[LOOP_EPIL]] ] +; CHECK-NEXT: ret float [[RES]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %rdx = phi float [ 0.0, %entry ], [ %rdx.next, %loop ] + %gep.a = getelementptr inbounds nuw float, ptr %a, i64 %iv + %1 = load float, ptr %gep.a, align 16 + %rdx.next = fadd contract float %rdx, %1 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop, !llvm.loop !0 + +exit: + %res = phi float [ %rdx.next, %loop ] + ret float %res +} !0 = distinct !{!0, !1} !1 = !{!"llvm.loop.unroll.count", i32 2} @@ -301,4 +497,7 @@ exit: ; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]} ; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]]} ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]]} +; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]]} +; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]} +; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll index 23918427e7003..95b4dcb23dd47 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll @@ -14,7 +14,7 @@ define void @fshl_operand_first_order_recurrence(ptr %dst, ptr noalias %src) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 2 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP2]], align 8 ; CHECK-NEXT: [[WIDE_LOAD1]] = load <2 x i64>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x i64> [[VECTOR_RECUR]], <2 x i64> [[WIDE_LOAD]], <2 x i32> @@ -22,7 +22,7 @@ define void @fshl_operand_first_order_recurrence(ptr %dst, ptr noalias %src) { ; CHECK-NEXT: [[TMP8:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> splat (i64 1), <2 x i64> [[TMP6]], <2 x i64> splat (i64 1)) ; CHECK-NEXT: [[TMP9:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> splat (i64 1), <2 x i64> [[TMP7]], <2 x i64> splat (i64 1)) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[TMP10]], i32 2 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[TMP10]], i64 2 ; CHECK-NEXT: store <2 x i64> [[TMP8]], ptr [[TMP10]], align 8 ; CHECK-NEXT: store <2 x i64> [[TMP9]], ptr [[TMP13]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll index 9609982b2c68f..b549a06f08f8c 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll @@ -63,7 +63,7 @@ define void @loop_dependent_cond(ptr %src, ptr noalias %dst, i64 %N) { ; DEFAULT: [[VECTOR_BODY]]: ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE7:.*]] ] ; DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr double, ptr [[TMP3]], i32 2 +; DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr double, ptr [[TMP3]], i64 2 ; DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP3]], align 8 ; DEFAULT-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x double>, ptr [[TMP6]], align 8 ; DEFAULT-NEXT: [[TMP7:%.*]] = call <2 x double> @llvm.fabs.v2f64(<2 x double> [[WIDE_LOAD]]) @@ -259,7 +259,7 @@ define void @latch_branch_cost(ptr %dst) { ; DEFAULT: [[VECTOR_BODY]]: ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; DEFAULT-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i32 16 +; DEFAULT-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 16 ; DEFAULT-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP2]], align 1 ; DEFAULT-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP5]], align 1 ; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 @@ -522,25 +522,47 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 { ; DEFAULT-LABEL: define void @multiple_exit_conditions( ; DEFAULT-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]]) #[[ATTR2:[0-9]+]] { ; DEFAULT-NEXT: [[ENTRY:.*:]] -; DEFAULT-NEXT: br label %[[VECTOR_PH:.*]] +; DEFAULT-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 4 +; DEFAULT-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 257, [[TMP3]] +; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; DEFAULT: [[VECTOR_PH]]: -; DEFAULT-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[DST]], i64 2048 +; DEFAULT-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16 +; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 257, [[TMP5]] +; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 257, [[N_MOD_VF]] +; DEFAULT-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[N_VEC]], 8 +; DEFAULT-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX]] +; DEFAULT-NEXT: [[TMP6:%.*]] = mul i64 [[N_VEC]], 2 ; DEFAULT-NEXT: br label %[[VECTOR_BODY:.*]] ; DEFAULT: [[VECTOR_BODY]]: ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; DEFAULT-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 -; DEFAULT-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX]] +; DEFAULT-NEXT: [[OFFSET_IDX1:%.*]] = mul i64 [[INDEX]], 8 +; DEFAULT-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX1]] ; DEFAULT-NEXT: [[TMP1:%.*]] = load i16, ptr [[SRC]], align 2 -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[TMP1]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i16> [[BROADCAST_SPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer -; DEFAULT-NEXT: [[TMP2:%.*]] = or <8 x i16> [[BROADCAST_SPLAT]], splat (i16 1) -; DEFAULT-NEXT: [[TMP3:%.*]] = uitofp <8 x i16> [[TMP2]] to <8 x double> -; DEFAULT-NEXT: store <8 x double> [[TMP3]], ptr [[NEXT_GEP]], align 8 -; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 -; DEFAULT-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 -; DEFAULT-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; DEFAULT-NEXT: [[TMP8:%.*]] = or i16 [[TMP1]], 1 +; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i16 [[TMP8]], i64 0 +; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; DEFAULT-NEXT: [[TMP9:%.*]] = uitofp [[BROADCAST_SPLAT]] to +; DEFAULT-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP11:%.*]] = shl nuw i64 [[TMP10]], 2 +; DEFAULT-NEXT: [[TMP12:%.*]] = getelementptr double, ptr [[NEXT_GEP1]], i64 [[TMP11]] +; DEFAULT-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP14:%.*]] = shl nuw i64 [[TMP13]], 3 +; DEFAULT-NEXT: [[TMP15:%.*]] = getelementptr double, ptr [[NEXT_GEP1]], i64 [[TMP14]] +; DEFAULT-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 12 +; DEFAULT-NEXT: [[TMP18:%.*]] = getelementptr double, ptr [[NEXT_GEP1]], i64 [[TMP17]] +; DEFAULT-NEXT: store [[TMP9]], ptr [[NEXT_GEP1]], align 8 +; DEFAULT-NEXT: store [[TMP9]], ptr [[TMP12]], align 8 +; DEFAULT-NEXT: store [[TMP9]], ptr [[TMP15]], align 8 +; DEFAULT-NEXT: store [[TMP9]], ptr [[TMP18]], align 8 +; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; DEFAULT-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; DEFAULT-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: -; DEFAULT-NEXT: br label %[[SCALAR_PH:.*]] +; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 257, [[N_VEC]] +; DEFAULT-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] ; DEFAULT: [[SCALAR_PH]]: ; ; PRED-LABEL: define void @multiple_exit_conditions( @@ -549,28 +571,28 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 { ; PRED-NEXT: br label %[[VECTOR_PH:.*]] ; PRED: [[VECTOR_PH]]: ; PRED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; PRED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 +; PRED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 ; PRED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; PRED-NEXT: [[TMP7:%.*]] = shl nuw i64 [[TMP6]], 1 +; PRED-NEXT: [[TMP7:%.*]] = shl nuw i64 [[TMP6]], 2 ; PRED-NEXT: [[TMP8:%.*]] = sub i64 257, [[TMP7]] ; PRED-NEXT: [[TMP9:%.*]] = icmp ugt i64 257, [[TMP7]] ; PRED-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i64 [[TMP8]], i64 0 -; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 257) +; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 257) ; PRED-NEXT: br label %[[VECTOR_BODY:.*]] ; PRED: [[VECTOR_BODY]]: ; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] ; PRED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; PRED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX]] ; PRED-NEXT: [[TMP12:%.*]] = load i16, ptr [[SRC]], align 2 -; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i16 [[TMP12]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; PRED-NEXT: [[TMP13:%.*]] = or [[BROADCAST_SPLAT]], splat (i16 1) -; PRED-NEXT: [[TMP14:%.*]] = uitofp [[TMP13]] to -; PRED-NEXT: call void @llvm.masked.store.nxv2f64.p0( [[TMP14]], ptr align 8 [[NEXT_GEP]], [[ACTIVE_LANE_MASK]]) +; PRED-NEXT: [[TMP11:%.*]] = or i16 [[TMP12]], 1 +; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i16 [[TMP11]], i64 0 +; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; PRED-NEXT: [[TMP13:%.*]] = uitofp [[BROADCAST_SPLAT]] to +; PRED-NEXT: call void @llvm.masked.store.nxv4f64.p0( [[TMP13]], ptr align 8 [[NEXT_GEP]], [[ACTIVE_LANE_MASK]]) ; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] -; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP10]]) -; PRED-NEXT: [[TMP15:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP10]]) +; PRED-NEXT: [[TMP15:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; PRED-NEXT: [[TMP16:%.*]] = xor i1 [[TMP15]], true ; PRED-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; PRED: [[MIDDLE_BLOCK]]: @@ -672,10 +694,10 @@ define void @test_conditional_interleave_group (ptr noalias %src.1, ptr noalias ; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT8:%.*]] = insertelement <8 x float> poison, float [[TMP15]], i64 0 ; DEFAULT-NEXT: [[BROADCAST_SPLAT9:%.*]] = shufflevector <8 x float> [[BROADCAST_SPLATINSERT8]], <8 x float> poison, <8 x i32> zeroinitializer ; DEFAULT-NEXT: [[TMP16:%.*]] = load float, ptr [[SRC_2]], align 4 -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x float> poison, float [[TMP16]], i64 0 +; DEFAULT-NEXT: [[TMP17:%.*]] = fmul float [[TMP16]], 0.000000e+00 +; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x float> poison, float [[TMP17]], i64 0 ; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x float> [[BROADCAST_SPLATINSERT]], <8 x float> poison, <8 x i32> zeroinitializer -; DEFAULT-NEXT: [[TMP17:%.*]] = fmul <8 x float> [[BROADCAST_SPLAT]], zeroinitializer -; DEFAULT-NEXT: [[TMP18:%.*]] = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> [[BROADCAST_SPLAT9]], <8 x float> zeroinitializer, <8 x float> [[TMP17]]) +; DEFAULT-NEXT: [[TMP18:%.*]] = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> [[BROADCAST_SPLAT9]], <8 x float> zeroinitializer, <8 x float> [[BROADCAST_SPLAT]]) ; DEFAULT-NEXT: [[TMP19:%.*]] = load float, ptr [[SRC_3]], align 4 ; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT10:%.*]] = insertelement <8 x float> poison, float [[TMP19]], i64 0 ; DEFAULT-NEXT: [[BROADCAST_SPLAT11:%.*]] = shufflevector <8 x float> [[BROADCAST_SPLATINSERT10]], <8 x float> poison, <8 x i32> zeroinitializer @@ -857,10 +879,10 @@ define void @test_conditional_interleave_group (ptr noalias %src.1, ptr noalias ; PRED-NEXT: [[BROADCAST_SPLATINSERT8:%.*]] = insertelement <8 x float> poison, float [[TMP18]], i64 0 ; PRED-NEXT: [[BROADCAST_SPLAT9:%.*]] = shufflevector <8 x float> [[BROADCAST_SPLATINSERT8]], <8 x float> poison, <8 x i32> zeroinitializer ; PRED-NEXT: [[TMP19:%.*]] = load float, ptr [[SRC_2]], align 4 -; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x float> poison, float [[TMP19]], i64 0 +; PRED-NEXT: [[TMP20:%.*]] = fmul float [[TMP19]], 0.000000e+00 +; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x float> poison, float [[TMP20]], i64 0 ; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x float> [[BROADCAST_SPLATINSERT]], <8 x float> poison, <8 x i32> zeroinitializer -; PRED-NEXT: [[TMP20:%.*]] = fmul <8 x float> [[BROADCAST_SPLAT]], zeroinitializer -; PRED-NEXT: [[TMP21:%.*]] = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> [[BROADCAST_SPLAT9]], <8 x float> zeroinitializer, <8 x float> [[TMP20]]) +; PRED-NEXT: [[TMP21:%.*]] = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> [[BROADCAST_SPLAT9]], <8 x float> zeroinitializer, <8 x float> [[BROADCAST_SPLAT]]) ; PRED-NEXT: [[TMP22:%.*]] = load float, ptr [[SRC_3]], align 4 ; PRED-NEXT: [[BROADCAST_SPLATINSERT10:%.*]] = insertelement <8 x float> poison, float [[TMP22]], i64 0 ; PRED-NEXT: [[BROADCAST_SPLAT11:%.*]] = shufflevector <8 x float> [[BROADCAST_SPLATINSERT10]], <8 x float> poison, <8 x i32> zeroinitializer diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll b/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll index 0a433ec76acf4..f0664197dcb94 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll @@ -137,7 +137,7 @@ define void @test_shrink_zext_in_preheader(ptr noalias %src, ptr noalias %dst, i ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[DST]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 16 ; CHECK-NEXT: store <16 x i8> [[TMP3]], ptr [[TMP4]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP3]], ptr [[TMP5]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 32 @@ -210,7 +210,7 @@ define void @test_shrink_select(ptr noalias %src, ptr noalias %dst, i32 %A, i1 % ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[DST]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 16 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 16 ; CHECK-NEXT: store <16 x i8> [[TMP4]], ptr [[TMP5]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP4]], ptr [[TMP6]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 32 @@ -279,7 +279,7 @@ define void @trunc_invariant_sdiv_result(i32 %a, i32 %b, ptr noalias %src, ptr % ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i16> @@ -287,7 +287,7 @@ define void @trunc_invariant_sdiv_result(i32 %a, i32 %b, ptr noalias %src, ptr % ; CHECK-NEXT: [[TMP5:%.*]] = mul <16 x i16> [[TMP0]], [[TMP3]] ; CHECK-NEXT: [[TMP6:%.*]] = mul <16 x i16> [[TMP0]], [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[TMP7]], i32 16 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[TMP7]], i64 16 ; CHECK-NEXT: store <16 x i16> [[TMP5]], ptr [[TMP7]], align 2 ; CHECK-NEXT: store <16 x i16> [[TMP6]], ptr [[TMP8]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 @@ -413,7 +413,7 @@ define void @old_and_new_size_equalko(ptr noalias %src, ptr noalias %dst) { ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[TMP0]], i32 4 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[TMP0]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 ; CHECK-NEXT: [[TMP2:%.*]] = trunc <4 x i64> [[WIDE_LOAD]] to <4 x i1> @@ -427,15 +427,16 @@ define void @old_and_new_size_equalko(ptr noalias %src, ptr noalias %dst) { ; CHECK-NEXT: [[TMP10:%.*]] = trunc <4 x i64> [[TMP8]] to <4 x i32> ; CHECK-NEXT: [[TMP11:%.*]] = trunc <4 x i64> [[TMP9]] to <4 x i32> ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 4 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i64 4 ; CHECK-NEXT: store <4 x i32> [[TMP10]], ptr [[TMP12]], align 4 ; CHECK-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP13]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br [[EXIT:label %.*]] -; CHECK: [[SCALAR_PH:.*:]] +; CHECK-NEXT: br label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-iv-select-cmp.ll b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-iv-select-cmp.ll index 2180f18750bf2..580c568c373f1 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-iv-select-cmp.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-iv-select-cmp.ll @@ -27,7 +27,7 @@ define i8 @select_icmp_var_start(ptr %a, i8 %n, i8 %start) { ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <16 x i8> [[VEC_IND]], splat (i8 16) ; CHECK-NEXT: [[INDEX4:%.*]] = trunc i32 [[INDEX]] to i8 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i8 [[INDEX4]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 16 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP8]], align 8 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP7]], align 8 ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq <16 x i8> [[WIDE_LOAD]], splat (i8 3) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-factors.ll b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-factors.ll index a3b7392dd280f..549df337e6907 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-factors.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-factors.ll @@ -19,17 +19,17 @@ define void @add_i8(ptr noalias nocapture noundef writeonly %A, ptr nocapture no ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 32 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 48 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 16 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 32 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 48 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[C:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 16 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 32 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 48 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 16 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 32 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 48 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 @@ -39,9 +39,9 @@ define void @add_i8(ptr noalias nocapture noundef writeonly %A, ptr nocapture no ; CHECK-NEXT: [[TMP13:%.*]] = add <16 x i8> [[WIDE_LOAD7]], [[WIDE_LOAD3]] ; CHECK-NEXT: [[TMP14:%.*]] = add <16 x i8> [[WIDE_LOAD8]], [[WIDE_LOAD4]] ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[TMP15]], i32 16 -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[TMP15]], i32 32 -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TMP15]], i32 48 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[TMP15]], i64 16 +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[TMP15]], i64 32 +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TMP15]], i64 48 ; CHECK-NEXT: store <16 x i8> [[TMP11]], ptr [[TMP15]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP12]], ptr [[TMP17]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP13]], ptr [[TMP18]], align 1 @@ -54,7 +54,7 @@ define void @add_i8(ptr noalias nocapture noundef writeonly %A, ptr nocapture no ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[N_MOD_VF9:%.*]] = urem i64 [[ITERATIONS]], 8 @@ -71,7 +71,7 @@ define void @add_i8(ptr noalias nocapture noundef writeonly %A, ptr nocapture no ; CHECK-NEXT: store <8 x i8> [[TMP26]], ptr [[TMP27]], align 1 ; CHECK-NEXT: [[INDEX_NEXT14]] = add nuw i64 [[INDEX11]], 8 ; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT14]], [[N_VEC10]] -; CHECK-NEXT: br i1 [[TMP29]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP29]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[CMP_N15:%.*]] = icmp eq i64 [[ITERATIONS]], [[N_VEC10]] ; CHECK-NEXT: br i1 [[CMP_N15]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -89,7 +89,7 @@ define void @add_i8(ptr noalias nocapture noundef writeonly %A, ptr nocapture no ; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX6]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[ITERATIONS]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -128,17 +128,17 @@ define void @add_i16(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[B:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i16, ptr [[TMP1]], i32 8 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[TMP1]], i32 16 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, ptr [[TMP1]], i32 24 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i16, ptr [[TMP1]], i64 8 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[TMP1]], i64 16 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, ptr [[TMP1]], i64 24 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i16>, ptr [[TMP3]], align 1 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i16>, ptr [[TMP4]], align 1 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i16>, ptr [[TMP5]], align 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[C:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i32 8 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i32 16 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i32 24 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i64 8 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i64 16 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i64 24 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i16>, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i16>, ptr [[TMP8]], align 1 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <8 x i16>, ptr [[TMP9]], align 1 @@ -148,22 +148,22 @@ define void @add_i16(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK-NEXT: [[TMP13:%.*]] = add <8 x i16> [[WIDE_LOAD7]], [[WIDE_LOAD3]] ; CHECK-NEXT: [[TMP14:%.*]] = add <8 x i16> [[WIDE_LOAD8]], [[WIDE_LOAD4]] ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i16, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i16, ptr [[TMP15]], i32 8 -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i16, ptr [[TMP15]], i32 16 -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i16, ptr [[TMP15]], i32 24 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i16, ptr [[TMP15]], i64 8 +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i16, ptr [[TMP15]], i64 16 +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i16, ptr [[TMP15]], i64 24 ; CHECK-NEXT: store <8 x i16> [[TMP11]], ptr [[TMP15]], align 1 ; CHECK-NEXT: store <8 x i16> [[TMP12]], ptr [[TMP17]], align 1 ; CHECK-NEXT: store <8 x i16> [[TMP13]], ptr [[TMP18]], align 1 ; CHECK-NEXT: store <8 x i16> [[TMP14]], ptr [[TMP19]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[ITERATIONS]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF7:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[N_MOD_VF9:%.*]] = urem i64 [[ITERATIONS]], 4 @@ -180,7 +180,7 @@ define void @add_i16(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK-NEXT: store <4 x i16> [[TMP26]], ptr [[TMP27]], align 1 ; CHECK-NEXT: [[INDEX_NEXT14]] = add nuw i64 [[INDEX11]], 4 ; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT14]], [[N_VEC10]] -; CHECK-NEXT: br i1 [[TMP29]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP29]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[CMP_N15:%.*]] = icmp eq i64 [[ITERATIONS]], [[N_VEC10]] ; CHECK-NEXT: br i1 [[CMP_N15]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -198,7 +198,7 @@ define void @add_i16(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX6]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[ITERATIONS]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -237,17 +237,17 @@ define void @add_i32(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 4 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 8 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 12 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 4 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 8 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP3]], align 1 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i32>, ptr [[TMP4]], align 1 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i32>, ptr [[TMP5]], align 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 4 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 8 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 12 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i64 4 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i64 8 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP8]], align 1 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP9]], align 1 @@ -257,22 +257,22 @@ define void @add_i32(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK-NEXT: [[TMP13:%.*]] = add <4 x i32> [[WIDE_LOAD7]], [[WIDE_LOAD3]] ; CHECK-NEXT: [[TMP14:%.*]] = add <4 x i32> [[WIDE_LOAD8]], [[WIDE_LOAD4]] ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i32 4 -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i32 8 -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i32 12 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 4 +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 8 +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 12 ; CHECK-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP15]], align 1 ; CHECK-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP17]], align 1 ; CHECK-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP18]], align 1 ; CHECK-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP19]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[ITERATIONS]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF11:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[N_MOD_VF9:%.*]] = urem i64 [[ITERATIONS]], 4 @@ -289,7 +289,7 @@ define void @add_i32(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 1 ; CHECK-NEXT: [[INDEX_NEXT14]] = add nuw i64 [[INDEX11]], 4 ; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT14]], [[N_VEC10]] -; CHECK-NEXT: br i1 [[TMP29]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP29]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[CMP_N15:%.*]] = icmp eq i64 [[ITERATIONS]], [[N_VEC10]] ; CHECK-NEXT: br i1 [[CMP_N15]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -307,7 +307,7 @@ define void @add_i32(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX6]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[ITERATIONS]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -347,9 +347,9 @@ define void @small_trip_count_loop(ptr %arg, ptr %arg2) { ; CHECK: vector.ph: ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[ARG]], i32 16 -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[ARG]], i32 32 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[ARG]], i32 48 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[ARG]], i64 16 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[ARG]], i64 32 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[ARG]], i64 48 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[ARG]], align 1 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 @@ -358,9 +358,9 @@ define void @small_trip_count_loop(ptr %arg, ptr %arg2) { ; CHECK-NEXT: [[TMP5:%.*]] = add <16 x i8> [[WIDE_LOAD4]], splat (i8 10) ; CHECK-NEXT: [[TMP6:%.*]] = add <16 x i8> [[WIDE_LOAD5]], splat (i8 10) ; CHECK-NEXT: [[TMP7:%.*]] = add <16 x i8> [[WIDE_LOAD6]], splat (i8 10) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[ARG2]], i32 16 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[ARG2]], i32 32 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[ARG2]], i32 48 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[ARG2]], i64 16 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[ARG2]], i64 32 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[ARG2]], i64 48 ; CHECK-NEXT: store <16 x i8> [[TMP4]], ptr [[ARG2]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP5]], ptr [[TMP8]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP6]], ptr [[TMP9]], align 1 @@ -369,7 +369,7 @@ define void @small_trip_count_loop(ptr %arg, ptr %arg2) { ; CHECK: middle.block: ; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF14:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i32 [ 0, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] @@ -382,7 +382,7 @@ define void @small_trip_count_loop(ptr %arg, ptr %arg2) { ; CHECK-NEXT: store <16 x i8> [[TMP12]], ptr [[TMP13]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16 -; CHECK-NEXT: br i1 [[TMP14]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP14]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: br i1 false, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK: vec.epilog.scalar.ph: @@ -397,7 +397,7 @@ define void @small_trip_count_loop(ptr %arg, ptr %arg2) { ; CHECK-NEXT: store i8 [[SELECT]], ptr [[GEP_B]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 20 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -447,16 +447,16 @@ define void @trip_count_based_on_ptrtoint(i64 %x) "target-cpu"="apple-m1" { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[INDEX]], 4 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR_START]], i64 [[TMP7]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 4 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 8 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 12 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 4 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 8 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 12 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[NEXT_GEP]], align 4 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP8]], align 4 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP9]], align 4 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP10]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] @@ -464,7 +464,7 @@ define void @trip_count_based_on_ptrtoint(i64 %x) "target-cpu"="apple-m1" { ; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[N_VEC]], 4 ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[PTR_START]], i64 [[TMP12]] ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF11]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[N_MOD_VF1:%.*]] = urem i64 [[TMP2]], 4 @@ -479,7 +479,7 @@ define void @trip_count_based_on_ptrtoint(i64 %x) "target-cpu"="apple-m1" { ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[NEXT_GEP4]], align 4 ; CHECK-NEXT: [[INDEX_NEXT5]] = add nuw i64 [[INDEX3]], 4 ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT5]], [[N_VEC2]] -; CHECK-NEXT: br i1 [[TMP15]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP15]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[CMP_N6:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC2]] ; CHECK-NEXT: br i1 [[CMP_N6]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -491,7 +491,7 @@ define void @trip_count_based_on_ptrtoint(i64 %x) "target-cpu"="apple-m1" { ; CHECK-NEXT: store i32 0, ptr [[IV]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = getelementptr i8, ptr [[IV]], i64 4 ; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[IV]], [[PTR_END]] -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll index 5e92123891b31..85726c161cc54 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll @@ -29,7 +29,7 @@ define void @test_widen_ptr_induction(ptr %ptr.start.1) { ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP12]]) ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP13]]) ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP14]]) -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 2 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 2 ; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[NEXT_GEP]], align 1 ; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP15]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -117,7 +117,7 @@ define void @test_widen_induction(ptr %A, i64 %N) { ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 2 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i64 2 ; CHECK-NEXT: store <2 x i64> [[VEC_IND]], ptr [[TMP1]], align 4 ; CHECK-NEXT: store <2 x i64> [[STEP_ADD]], ptr [[TMP3]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -201,7 +201,7 @@ define void @test_widen_induction_variable_start(ptr %A, i64 %N, i64 %start) { ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[START]], [[INDEX]] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 2 ; CHECK-NEXT: store <2 x i64> [[VEC_IND]], ptr [[TMP2]], align 4 ; CHECK-NEXT: store <2 x i64> [[STEP_ADD]], ptr [[TMP4]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -285,7 +285,7 @@ define void @test_widen_induction_step_2(ptr %A, i64 %N, i32 %step) { ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP2:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 10) ; CHECK-NEXT: [[TMP3:%.*]] = add <2 x i64> [[STEP_ADD]], splat (i64 10) -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 2 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i64 2 ; CHECK-NEXT: store <2 x i64> [[TMP2]], ptr [[TMP1]], align 4 ; CHECK-NEXT: store <2 x i64> [[TMP3]], ptr [[TMP5]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -399,7 +399,7 @@ define void @test_widen_truncated_induction(ptr %A) { ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i8> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i8> [[VEC_IND]], splat (i8 2) ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 2 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 2 ; CHECK-NEXT: store <2 x i8> [[VEC_IND]], ptr [[TMP1]], align 1 ; CHECK-NEXT: store <2 x i8> [[STEP_ADD]], ptr [[TMP3]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/epilogue-vectorization-fix-scalar-resume-values.ll b/llvm/test/Transforms/LoopVectorize/AArch64/epilogue-vectorization-fix-scalar-resume-values.ll index cb4e99332c04b..4eacc55a99f72 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/epilogue-vectorization-fix-scalar-resume-values.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/epilogue-vectorization-fix-scalar-resume-values.ll @@ -20,7 +20,7 @@ define void @epilogue_vectorization_fix_scalar_resume_values(ptr %dst, i64 %n) { ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i64 16 ; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP0]], align 1 ; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/f128-fmuladd-reduction.ll b/llvm/test/Transforms/LoopVectorize/AArch64/f128-fmuladd-reduction.ll index 35d7e2cc8c586..feb0175e75542 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/f128-fmuladd-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/f128-fmuladd-reduction.ll @@ -21,16 +21,16 @@ define double @fp128_fmuladd_reduction(ptr %start0, ptr %start1, ptr %end0, ptr ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[START0]], i64 [[TMP0]] ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[START1]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP24:%.*]] = getelementptr fp128, ptr [[TMP1]], i32 2 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr fp128, ptr [[TMP1]], i32 4 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr fp128, ptr [[TMP1]], i32 6 +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr fp128, ptr [[TMP1]], i64 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr fp128, ptr [[TMP1]], i64 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr fp128, ptr [[TMP1]], i64 6 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x fp128>, ptr [[TMP1]], align 16 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <2 x fp128>, ptr [[TMP24]], align 16 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <2 x fp128>, ptr [[TMP4]], align 16 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x fp128>, ptr [[TMP5]], align 16 -; CHECK-NEXT: [[TMP28:%.*]] = getelementptr double, ptr [[TMP3]], i32 2 -; CHECK-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[TMP3]], i32 4 -; CHECK-NEXT: [[TMP36:%.*]] = getelementptr double, ptr [[TMP3]], i32 6 +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr double, ptr [[TMP3]], i64 2 +; CHECK-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[TMP3]], i64 4 +; CHECK-NEXT: [[TMP36:%.*]] = getelementptr double, ptr [[TMP3]], i64 6 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x double>, ptr [[TMP3]], align 16 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <2 x double>, ptr [[TMP28]], align 16 ; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <2 x double>, ptr [[TMP35]], align 16 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fixed-order-recurrence.ll index c94b3a4c49555..c692ba5b06690 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fixed-order-recurrence.ll @@ -26,7 +26,7 @@ define void @firstorderrec(ptr nocapture noundef readonly %x, ptr noalias nocapt ; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <16 x i8> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 16 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-NEXT: [[WIDE_LOAD1]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <16 x i8> [[VECTOR_RECUR]], <16 x i8> [[WIDE_LOAD]], <16 x i32> @@ -34,7 +34,7 @@ define void @firstorderrec(ptr nocapture noundef readonly %x, ptr noalias nocapt ; CHECK-NEXT: [[TMP9:%.*]] = add <16 x i8> [[WIDE_LOAD]], [[TMP7]] ; CHECK-NEXT: [[TMP10:%.*]] = add <16 x i8> [[WIDE_LOAD1]], [[TMP8]] ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[Y:%.*]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 16 +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 16 ; CHECK-NEXT: store <16 x i8> [[TMP9]], ptr [[TMP11]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP10]], ptr [[TMP14]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 @@ -119,7 +119,7 @@ define void @thirdorderrec(ptr nocapture noundef readonly %x, ptr noalias nocapt ; CHECK-NEXT: [[VECTOR_RECUR4:%.*]] = phi <16 x i8> [ [[VECTOR_RECUR_INIT3]], [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX]] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 16 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-NEXT: [[WIDE_LOAD5]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <16 x i8> [[VECTOR_RECUR]], <16 x i8> [[WIDE_LOAD]], <16 x i32> @@ -135,7 +135,7 @@ define void @thirdorderrec(ptr nocapture noundef readonly %x, ptr noalias nocapt ; CHECK-NEXT: [[TMP17:%.*]] = add <16 x i8> [[TMP15]], [[WIDE_LOAD]] ; CHECK-NEXT: [[TMP18:%.*]] = add <16 x i8> [[TMP16]], [[WIDE_LOAD5]] ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[Y:%.*]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP19]], i32 16 +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP19]], i64 16 ; CHECK-NEXT: store <16 x i8> [[TMP17]], ptr [[TMP19]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP18]], ptr [[TMP22]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fixed-wide-lane-mask.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fixed-wide-lane-mask.ll index faee4c1194018..591bdabca65e7 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fixed-wide-lane-mask.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fixed-wide-lane-mask.ll @@ -56,9 +56,9 @@ define void @fixed_wide_active_lane_mask(ptr noalias %dst, ptr noalias readonly ; CHECK-UF4-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP7]], i64 0 ; CHECK-UF4-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; CHECK-UF4-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[INDEX]] -; CHECK-UF4-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 4 -; CHECK-UF4-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 8 -; CHECK-UF4-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 12 +; CHECK-UF4-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 4 +; CHECK-UF4-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 8 +; CHECK-UF4-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 12 ; CHECK-UF4-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[BROADCAST_SPLAT]], ptr align 4 [[TMP8]], <4 x i1> [[ACTIVE_LANE_MASK]]) ; CHECK-UF4-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[BROADCAST_SPLAT]], ptr align 4 [[TMP17]], <4 x i1> [[ACTIVE_LANE_MASK4]]) ; CHECK-UF4-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[BROADCAST_SPLAT]], ptr align 4 [[TMP18]], <4 x i1> [[ACTIVE_LANE_MASK5]]) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll index 6902dd990509e..a04367f32dd01 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll @@ -53,7 +53,7 @@ define float @fmaxnum(ptr %src, i64 %n) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC]], i32 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]]) @@ -128,10 +128,10 @@ define float @test_fmax_and_fmin(ptr %src.0, ptr %src.1, i64 %n) { ; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]] ; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_0]], i32 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_0]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC_0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_1]], i32 4 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_1]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[GEP_SRC_1]], align 4 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[TMP4]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI2]], <4 x float> [[WIDE_LOAD]]) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll index 193424d3eb70a..0bddc498f9e83 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll @@ -53,7 +53,7 @@ define float @fminnum(ptr %src, i64 %n) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC]], i32 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]]) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fminimumnum.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fminimumnum.ll index f15f04fe5f6f2..3a9d5c34bacab 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fminimumnum.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fminimumnum.ll @@ -20,17 +20,17 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i32 4 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP7]], align 4 ; CHECK-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> [[WIDE_LOAD5]], <4 x float> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i32 4 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i64 4 ; CHECK-NEXT: store <4 x float> [[TMP8]], ptr [[TMP10]], align 4 ; CHECK-NEXT: store <4 x float> [[TMP9]], ptr [[TMP12]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -97,17 +97,17 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i32 4 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP7]], align 4 ; CHECK-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> [[WIDE_LOAD5]], <4 x float> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i32 4 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i64 4 ; CHECK-NEXT: store <4 x float> [[TMP8]], ptr [[TMP10]], align 4 ; CHECK-NEXT: store <4 x float> [[TMP9]], ptr [[TMP12]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -174,17 +174,17 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i32 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP2]], align 8 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x double>, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x double>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <2 x double>, ptr [[TMP7]], align 8 ; CHECK-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> [[WIDE_LOAD]], <2 x double> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP9:%.*]] = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> [[WIDE_LOAD5]], <2 x double> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i32 2 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i64 2 ; CHECK-NEXT: store <2 x double> [[TMP8]], ptr [[TMP10]], align 8 ; CHECK-NEXT: store <2 x double> [[TMP9]], ptr [[TMP12]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -251,17 +251,17 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i32 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP2]], align 8 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x double>, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x double>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <2 x double>, ptr [[TMP7]], align 8 ; CHECK-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> [[WIDE_LOAD]], <2 x double> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP9:%.*]] = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> [[WIDE_LOAD5]], <2 x double> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i32 2 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i64 2 ; CHECK-NEXT: store <2 x double> [[TMP8]], ptr [[TMP10]], align 8 ; CHECK-NEXT: store <2 x double> [[TMP9]], ptr [[TMP12]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -328,17 +328,17 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw half, ptr [[TMP2]], i32 8 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw half, ptr [[TMP2]], i64 8 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x half>, ptr [[TMP2]], align 2 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x half>, ptr [[TMP6]], align 2 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw half, ptr [[TMP4]], i32 8 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw half, ptr [[TMP4]], i64 8 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x half>, ptr [[TMP4]], align 2 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <8 x half>, ptr [[TMP10]], align 2 ; CHECK-NEXT: [[TMP11:%.*]] = call <8 x half> @llvm.minimumnum.v8f16(<8 x half> [[WIDE_LOAD]], <8 x half> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP13:%.*]] = call <8 x half> @llvm.minimumnum.v8f16(<8 x half> [[WIDE_LOAD5]], <8 x half> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw half, ptr [[TMP7]], i32 8 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw half, ptr [[TMP7]], i64 8 ; CHECK-NEXT: store <8 x half> [[TMP11]], ptr [[TMP7]], align 2 ; CHECK-NEXT: store <8 x half> [[TMP13]], ptr [[TMP12]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 @@ -405,17 +405,17 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw half, ptr [[TMP2]], i32 8 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw half, ptr [[TMP2]], i64 8 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x half>, ptr [[TMP2]], align 2 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x half>, ptr [[TMP6]], align 2 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw half, ptr [[TMP4]], i32 8 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw half, ptr [[TMP4]], i64 8 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x half>, ptr [[TMP4]], align 2 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <8 x half>, ptr [[TMP10]], align 2 ; CHECK-NEXT: [[TMP11:%.*]] = call <8 x half> @llvm.maximumnum.v8f16(<8 x half> [[WIDE_LOAD]], <8 x half> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP13:%.*]] = call <8 x half> @llvm.maximumnum.v8f16(<8 x half> [[WIDE_LOAD5]], <8 x half> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw half, ptr [[TMP7]], i32 8 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw half, ptr [[TMP7]], i64 8 ; CHECK-NEXT: store <8 x half> [[TMP11]], ptr [[TMP7]], align 2 ; CHECK-NEXT: store <8 x half> [[TMP13]], ptr [[TMP12]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll index 56edee44fe3b1..21b21774d18cf 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll @@ -62,7 +62,7 @@ define void @test_iv_cost(ptr %ptr.start, i8 %a, i64 %b) { ; COST1: [[VECTOR_BODY]]: ; COST1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; COST1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR_START]], i64 [[INDEX]] -; COST1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16 +; COST1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 16 ; COST1-NEXT: store <16 x i8> zeroinitializer, ptr [[NEXT_GEP]], align 1 ; COST1-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP0]], align 1 ; COST1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 @@ -328,7 +328,7 @@ define void @invalid_legacy_cost(i64 %N, ptr %x) #0 { ; COST1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP1]], i64 0 ; COST1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x ptr> [[BROADCAST_SPLATINSERT]], <2 x ptr> poison, <2 x i32> zeroinitializer ; COST1-NEXT: [[TMP2:%.*]] = getelementptr ptr, ptr [[X]], i64 [[INDEX]] -; COST1-NEXT: [[TMP3:%.*]] = getelementptr ptr, ptr [[TMP2]], i32 2 +; COST1-NEXT: [[TMP3:%.*]] = getelementptr ptr, ptr [[TMP2]], i64 2 ; COST1-NEXT: store <2 x ptr> [[BROADCAST_SPLAT]], ptr [[TMP2]], align 8 ; COST1-NEXT: store <2 x ptr> [[BROADCAST_SPLAT]], ptr [[TMP3]], align 8 ; COST1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll index 1164778c19070..f645db16ed3c6 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll @@ -50,7 +50,7 @@ define i64 @test_external_iv_user(ptr %a, ptr %b) #0 { ; CHECK-NEXT: Cost of 0 for VF 16: induction instruction %i.iv = phi i64 [ 0, %entry ], [ %i.iv.next, %for.body ] ; CHECK-NEXT: Cost of 0 for VF 16: EMIT vp<{{.+}}> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK: Cost for VF 16: 57 -; CHECK: LV: Selecting VF: vscale x 2 +; CHECK: LV: Selecting VF: 16 entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll index 42a1940925968..7b42e565e127d 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll @@ -25,7 +25,7 @@ define i32 @multi_exit_iv_uniform(i32 %a, i64 %N, ptr %dst) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[TMP5]], i32 4 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[TMP5]], i64 4 ; CHECK-NEXT: store <4 x i64> [[TMP7]], ptr [[TMP5]], align 8 ; CHECK-NEXT: store <4 x i64> [[TMP7]], ptr [[TMP9]], align 8 ; CHECK-NEXT: [[TMP10]] = add <4 x i32> [[VEC_PHI]], splat (i32 -1) @@ -106,7 +106,7 @@ define i64 @pointer_induction_only(ptr %start, ptr %end) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <2 x i32>, ptr [[TMP7]], align 1 ; CHECK-NEXT: [[TMP9:%.*]] = zext <2 x i32> [[WIDE_LOAD4]] to <2 x i64> ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -161,7 +161,7 @@ define i64 @int_and_pointer_iv(ptr %start, i32 %N) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 4 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i32> [[WIDE_LOAD3]] to <4 x i64> ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -205,7 +205,7 @@ define void @wide_truncated_iv(ptr %dst) { ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <8 x i8> [ , [[VECTOR_PH1]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <8 x i8> [[VEC_IND]], splat (i8 8) ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i32 8 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 8 ; CHECK-NEXT: store <8 x i8> [[VEC_IND]], ptr [[TMP2]], align 1 ; CHECK-NEXT: store <8 x i8> [[STEP_ADD]], ptr [[TMP5]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 @@ -287,7 +287,7 @@ define i64 @test_ptr_ivs_and_widened_ivs(ptr %src, i32 %N) { ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 4) ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 4 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP6]], align 4 ; CHECK-NEXT: [[TMP7:%.*]] = xor <4 x i32> [[WIDE_LOAD]], splat (i32 1) ; CHECK-NEXT: [[TMP8:%.*]] = zext <4 x i32> [[TMP7]] to <4 x i64> diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll index 0c91661d20ae7..5b4bb70e6a479 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll @@ -3,9 +3,9 @@ target triple = "aarch64-linux-gnu" -; Original loop has trip count 16, but contains interleave groups with gaps, so +; Original loop has trip count 17, but contains interleave groups with gaps, so ; the last iteration must execute in the scalar loop. Thus the vector loop can -; only execute up to 15 iterations. +; only execute up to 16 iterations. define i64 @vector_loop_with_remaining_iterations(ptr %src, ptr noalias %dst, i32 %x) #0 { ; CHECK-LABEL: define i64 @vector_loop_with_remaining_iterations( ; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[X:%.*]]) #[[ATTR0:[0-9]+]] { diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-load-store.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-load-store.ll index 9b4151f30d640..f7060ec3512ac 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-load-store.ll @@ -35,9 +35,9 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-4: vector.body: ; INTERLEAVE-4-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; INTERLEAVE-4-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX]] -; INTERLEAVE-4-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 16 -; INTERLEAVE-4-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 32 -; INTERLEAVE-4-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 48 +; INTERLEAVE-4-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 16 +; INTERLEAVE-4-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 32 +; INTERLEAVE-4-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 48 ; INTERLEAVE-4-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; INTERLEAVE-4-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 ; INTERLEAVE-4-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1 @@ -55,9 +55,9 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-4-NEXT: [[TMP23:%.*]] = select <16 x i1> [[TMP15]], <16 x i8> [[BROADCAST_SPLAT]], <16 x i8> [[TMP19]] ; INTERLEAVE-4-NEXT: [[TMP24:%.*]] = select <16 x i1> [[TMP16]], <16 x i8> [[BROADCAST_SPLAT]], <16 x i8> [[TMP20]] ; INTERLEAVE-4-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDEX]] -; INTERLEAVE-4-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[TMP25]], i32 16 -; INTERLEAVE-4-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[TMP25]], i32 32 -; INTERLEAVE-4-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[TMP25]], i32 48 +; INTERLEAVE-4-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[TMP25]], i64 16 +; INTERLEAVE-4-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[TMP25]], i64 32 +; INTERLEAVE-4-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[TMP25]], i64 48 ; INTERLEAVE-4-NEXT: store <16 x i8> [[TMP21]], ptr [[TMP25]], align 1 ; INTERLEAVE-4-NEXT: store <16 x i8> [[TMP22]], ptr [[TMP30]], align 1 ; INTERLEAVE-4-NEXT: store <16 x i8> [[TMP23]], ptr [[TMP31]], align 1 @@ -70,7 +70,7 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-4-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; INTERLEAVE-4: vec.epilog.iter.check: ; INTERLEAVE-4-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 -; INTERLEAVE-4-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; INTERLEAVE-4-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; INTERLEAVE-4: vec.epilog.ph: ; INTERLEAVE-4-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; INTERLEAVE-4-NEXT: [[N_MOD_VF9:%.*]] = urem i64 [[N]], 8 @@ -91,7 +91,7 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-4-NEXT: store <8 x i8> [[TMP39]], ptr [[TMP40]], align 1 ; INTERLEAVE-4-NEXT: [[INDEX_NEXT18]] = add nuw i64 [[INDEX12]], 8 ; INTERLEAVE-4-NEXT: [[TMP42:%.*]] = icmp eq i64 [[INDEX_NEXT18]], [[N_VEC10]] -; INTERLEAVE-4-NEXT: br i1 [[TMP42]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; INTERLEAVE-4-NEXT: br i1 [[TMP42]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; INTERLEAVE-4: vec.epilog.middle.block: ; INTERLEAVE-4-NEXT: [[CMP_N11:%.*]] = icmp eq i64 [[N]], [[N_VEC10]] ; INTERLEAVE-4-NEXT: br i1 [[CMP_N11]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -109,7 +109,7 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-4-NEXT: store i8 [[SEL]], ptr [[GEP_DST]], align 1 ; INTERLEAVE-4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; INTERLEAVE-4-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; INTERLEAVE-4-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] +; INTERLEAVE-4-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] ; INTERLEAVE-4: exit: ; INTERLEAVE-4-NEXT: ret void ; @@ -137,7 +137,7 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-2: vector.body: ; INTERLEAVE-2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; INTERLEAVE-2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX]] -; INTERLEAVE-2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 16 +; INTERLEAVE-2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 16 ; INTERLEAVE-2-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; INTERLEAVE-2-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 ; INTERLEAVE-2-NEXT: [[TMP7:%.*]] = icmp sgt <16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] @@ -147,7 +147,7 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-2-NEXT: [[TMP11:%.*]] = select <16 x i1> [[TMP7]], <16 x i8> [[BROADCAST_SPLAT]], <16 x i8> [[TMP9]] ; INTERLEAVE-2-NEXT: [[TMP12:%.*]] = select <16 x i1> [[TMP8]], <16 x i8> [[BROADCAST_SPLAT]], <16 x i8> [[TMP10]] ; INTERLEAVE-2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDEX]] -; INTERLEAVE-2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TMP13]], i32 16 +; INTERLEAVE-2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TMP13]], i64 16 ; INTERLEAVE-2-NEXT: store <16 x i8> [[TMP11]], ptr [[TMP13]], align 1 ; INTERLEAVE-2-NEXT: store <16 x i8> [[TMP12]], ptr [[TMP16]], align 1 ; INTERLEAVE-2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 @@ -158,7 +158,7 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-2-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; INTERLEAVE-2: vec.epilog.iter.check: ; INTERLEAVE-2-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 -; INTERLEAVE-2-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; INTERLEAVE-2-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; INTERLEAVE-2: vec.epilog.ph: ; INTERLEAVE-2-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; INTERLEAVE-2-NEXT: [[N_MOD_VF7:%.*]] = urem i64 [[N]], 8 @@ -179,7 +179,7 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-2-NEXT: store <8 x i8> [[TMP23]], ptr [[TMP24]], align 1 ; INTERLEAVE-2-NEXT: [[INDEX_NEXT16]] = add nuw i64 [[INDEX10]], 8 ; INTERLEAVE-2-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT16]], [[N_VEC8]] -; INTERLEAVE-2-NEXT: br i1 [[TMP26]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; INTERLEAVE-2-NEXT: br i1 [[TMP26]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; INTERLEAVE-2: vec.epilog.middle.block: ; INTERLEAVE-2-NEXT: [[CMP_N9:%.*]] = icmp eq i64 [[N]], [[N_VEC8]] ; INTERLEAVE-2-NEXT: br i1 [[CMP_N9]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll index aa94763b44a30..53cb0653fd241 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll @@ -29,9 +29,9 @@ define i32 @interleave_integer_reduction(ptr %src, i64 %N) { ; INTERLEAVE-4-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] ; INTERLEAVE-4-NEXT: [[VEC_PHI4:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; INTERLEAVE-4-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 [[INDEX]] -; INTERLEAVE-4-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 4 -; INTERLEAVE-4-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 8 -; INTERLEAVE-4-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 12 +; INTERLEAVE-4-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 4 +; INTERLEAVE-4-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 8 +; INTERLEAVE-4-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 12 ; INTERLEAVE-4-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 1 ; INTERLEAVE-4-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP1]], align 1 ; INTERLEAVE-4-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP2]], align 1 @@ -103,7 +103,7 @@ define i32 @interleave_integer_reduction(ptr %src, i64 %N) { ; INTERLEAVE-2-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ] ; INTERLEAVE-2-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ] ; INTERLEAVE-2-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 [[INDEX]] -; INTERLEAVE-2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 4 +; INTERLEAVE-2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 4 ; INTERLEAVE-2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 1 ; INTERLEAVE-2-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP1]], align 1 ; INTERLEAVE-2-NEXT: [[TMP2]] = add <4 x i32> [[VEC_PHI]], [[WIDE_LOAD]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll index ee3a4a04566c9..3eb42845bec4a 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll @@ -36,12 +36,12 @@ define void @saddsat(ptr nocapture readonly %pSrc, i16 signext %offset, ptr noca ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC:%.*]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[OFFSET_IDX2:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[PDST:%.*]], i64 [[OFFSET_IDX2]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 8 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i64 8 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2 ; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> [[WIDE_LOAD]], <8 x i16> [[BROADCAST_SPLAT]]) ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> [[WIDE_LOAD4]], <8 x i16> [[BROADCAST_SPLAT]]) -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 8 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i64 8 ; CHECK-NEXT: store <8 x i16> [[TMP2]], ptr [[NEXT_GEP3]], align 2 ; CHECK-NEXT: store <8 x i16> [[TMP3]], ptr [[TMP4]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 @@ -160,12 +160,12 @@ define void @umin(ptr nocapture readonly %pSrc, i8 signext %offset, ptr nocaptur ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC:%.*]], i64 [[INDEX]] ; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[PDST:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 2 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP1]], align 2 ; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.umin.v16i8(<16 x i8> [[WIDE_LOAD]], <16 x i8> [[BROADCAST_SPLAT]]) ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.umin.v16i8(<16 x i8> [[WIDE_LOAD3]], <16 x i8> [[BROADCAST_SPLAT]]) -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i32 16 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i64 16 ; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[NEXT_GEP2]], align 2 ; CHECK-NEXT: store <16 x i8> [[TMP3]], ptr [[TMP4]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/licm-calls.ll b/llvm/test/Transforms/LoopVectorize/AArch64/licm-calls.ll index 0a9494e4c7ade..c43d62404006d 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/licm-calls.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/licm-calls.ll @@ -22,7 +22,7 @@ define void @licm_replicate_call(double %x, ptr %dst) { ; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x double> [[TMP3]], [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = fmul <2 x double> [[TMP3]], [[TMP5]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, ptr [[TMP8]], i32 2 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, ptr [[TMP8]], i64 2 ; CHECK-NEXT: store <2 x double> [[TMP6]], ptr [[TMP8]], align 8 ; CHECK-NEXT: store <2 x double> [[TMP7]], ptr [[TMP10]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll index c768fec31a497..bdbf08aecf6b3 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll @@ -103,7 +103,7 @@ define void @vectorize_without_optsize(ptr %p, i32 %x, i64 %n) { ; DEFAULT: [[VECTOR_BODY]]: ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; DEFAULT-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 4 +; DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 4 ; DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 ; DEFAULT-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 ; DEFAULT-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] @@ -621,17 +621,17 @@ define void @dont_vectorize_with_minsize() { ; DEFAULT: [[VECTOR_BODY]]: ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; DEFAULT-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i32 8 +; DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i64 8 ; DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4 ; DEFAULT-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4 ; DEFAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP4]], i32 8 +; DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP4]], i64 8 ; DEFAULT-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i32>, ptr [[TMP4]], align 4 ; DEFAULT-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i32>, ptr [[TMP6]], align 4 ; DEFAULT-NEXT: [[TMP7:%.*]] = mul nsw <8 x i32> [[WIDE_LOAD]], [[WIDE_LOAD2]] ; DEFAULT-NEXT: [[TMP8:%.*]] = mul nsw <8 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD3]] ; DEFAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i16, ptr [[TMP9]], i32 8 +; DEFAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i16, ptr [[TMP9]], i64 8 ; DEFAULT-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i16>, ptr [[TMP9]], align 2 ; DEFAULT-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i16>, ptr [[TMP11]], align 2 ; DEFAULT-NEXT: [[TMP12:%.*]] = trunc <8 x i32> [[TMP7]] to <8 x i16> @@ -737,17 +737,17 @@ define void @vectorization_forced_minsize_reduce_width() { ; DEFAULT: [[VECTOR_BODY]]: ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; DEFAULT-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i32 8 +; DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i64 8 ; DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4 ; DEFAULT-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4 ; DEFAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP4]], i32 8 +; DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP4]], i64 8 ; DEFAULT-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i32>, ptr [[TMP4]], align 4 ; DEFAULT-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i32>, ptr [[TMP6]], align 4 ; DEFAULT-NEXT: [[TMP7:%.*]] = mul nsw <8 x i32> [[WIDE_LOAD]], [[WIDE_LOAD2]] ; DEFAULT-NEXT: [[TMP8:%.*]] = mul nsw <8 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD3]] ; DEFAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i16, ptr [[TMP9]], i32 8 +; DEFAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i16, ptr [[TMP9]], i64 8 ; DEFAULT-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i16>, ptr [[TMP9]], align 2 ; DEFAULT-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i16>, ptr [[TMP11]], align 2 ; DEFAULT-NEXT: [[TMP12:%.*]] = trunc <8 x i32> [[TMP7]] to <8 x i16> diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll index 287226f14b753..dd0107b8c4bff 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll @@ -52,34 +52,38 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]]) -; CHECK-SVE-NEXT: [[TMP6:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP6]] -; CHECK-SVE-NEXT: [[TMP12:%.*]] = sub <16 x i32> zeroinitializer, [[TMP11]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP12]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw [[TMP13]], [[TMP15]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP16]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-SVE-NEXT: [[TMP11:%.*]] = mul nsw [[TMP13]], [[TMP10]] +; CHECK-SVE-NEXT: [[TMP12:%.*]] = sub zeroinitializer, [[TMP11]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP12]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) +; CHECK-SVE-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -201,33 +205,37 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) -; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP5]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP10]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP12:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[TMP15:%.*]] = mul nsw [[TMP12]], [[TMP14]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP15]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-SVE-NEXT: [[TMP11:%.*]] = mul nsw [[TMP12]], [[TMP10]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP11]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) +; CHECK-SVE-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -349,34 +357,38 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] -; CHECK-SVE-NEXT: [[TMP6:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP5]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) -; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP11]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP12]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw [[TMP13]], [[TMP15]] +; CHECK-SVE-NEXT: [[TMP10:%.*]] = sub nsw zeroinitializer, [[TMP16]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP10]]) +; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw [[TMP13]], [[TMP11]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP12]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) +; CHECK-SVE-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -502,35 +514,39 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] -; CHECK-SVE-NEXT: [[TMP6:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP5]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) -; CHECK-SVE-NEXT: [[TMP10:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP10]] -; CHECK-SVE-NEXT: [[TMP13:%.*]] = sub <16 x i32> zeroinitializer, [[TMP12]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP13]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[TMP16:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[TMP17:%.*]] = mul nsw [[TMP14]], [[TMP16]] +; CHECK-SVE-NEXT: [[TMP10:%.*]] = sub nsw zeroinitializer, [[TMP17]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP10]]) +; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw [[TMP14]], [[TMP11]] +; CHECK-SVE-NEXT: [[TMP13:%.*]] = sub zeroinitializer, [[TMP12]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP13]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) +; CHECK-SVE-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -658,35 +674,39 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE4:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE4:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) -; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP5]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP11]]) -; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP4]], [[TMP5]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE4]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE3]], <16 x i32> [[TMP12]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw [[TMP13]], [[TMP15]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP16]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-SVE-NEXT: [[TMP11:%.*]] = mul nsw [[TMP13]], [[TMP10]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP11]]) +; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw [[TMP15]], [[TMP10]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE4]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE3]], [[TMP12]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE4]]) +; CHECK-SVE-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE4]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -818,37 +838,41 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE4:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE4:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] -; CHECK-SVE-NEXT: [[TMP6:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP5]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) -; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP11]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP13]]) -; CHECK-SVE-NEXT: [[TMP14:%.*]] = mul nsw <16 x i32> [[TMP4]], [[TMP11]] -; CHECK-SVE-NEXT: [[TMP10:%.*]] = sub <16 x i32> zeroinitializer, [[TMP14]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE4]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE3]], <16 x i32> [[TMP10]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[TMP17:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw [[TMP15]], [[TMP17]] +; CHECK-SVE-NEXT: [[TMP10:%.*]] = sub nsw zeroinitializer, [[TMP18]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP10]]) +; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw [[TMP15]], [[TMP11]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP12]]) +; CHECK-SVE-NEXT: [[TMP13:%.*]] = mul nsw [[TMP17]], [[TMP11]] +; CHECK-SVE-NEXT: [[TMP14:%.*]] = sub zeroinitializer, [[TMP13]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE4]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE3]], [[TMP14]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE4]]) +; CHECK-SVE-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE4]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -978,32 +1002,36 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) -; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP5]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[TMP14:%.*]] = mul nsw [[TMP11]], [[TMP13]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP14]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP10]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) +; CHECK-SVE-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -1118,28 +1146,32 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 { ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE2:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE2:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[TMP2:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP2]]) -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP3]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[TMP6:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP6]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE2]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP10]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE2]]) +; CHECK-SVE-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE2]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -1251,32 +1283,36 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]]) -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP6]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP11]]) +; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[TMP10:%.*]] = mul nsw [[TMP13]], [[TMP14]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP10]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) +; CHECK-SVE-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll index 5b9bd0997f2fa..fed979e34e5d5 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll @@ -10,26 +10,30 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-NEXT: entry: ; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP3]], align 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 -; CHECK-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP8]], [[TMP5]] -; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP9]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP6]], align 1 +; CHECK-NEXT: [[TMP4:%.*]] = zext [[WIDE_LOAD1]] to +; CHECK-NEXT: [[TMP5:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-NEXT: [[TMP9:%.*]] = mul [[TMP4]], [[TMP5]] +; CHECK-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP9]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) -; CHECK-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK: for.exit: -; CHECK-NEXT: ret i32 [[TMP11]] +; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] +; CHECK: scalar.ph: ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll index 0ee6b52a2450b..3142227815383 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll @@ -61,13 +61,13 @@ define i32 @sudot(ptr %a, ptr %b) #0 { ; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-NOI8MM-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP6]], i32 16 +; CHECK-NOI8MM-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP6]], i64 16 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-NOI8MM-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-NOI8MM-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-NOI8MM-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NOI8MM-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP13]], i32 16 +; CHECK-NOI8MM-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP13]], i64 16 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP13]], align 1 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-NOI8MM-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> @@ -164,13 +164,13 @@ define i32 @usdot(ptr %a, ptr %b) #0 { ; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-NOI8MM-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP6]], i32 16 +; CHECK-NOI8MM-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP6]], i64 16 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-NOI8MM-NEXT: [[TMP2:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-NOI8MM-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-NOI8MM-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NOI8MM-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP13]], i32 16 +; CHECK-NOI8MM-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP13]], i64 16 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP13]], align 1 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-NOI8MM-NEXT: [[TMP14:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> @@ -223,11 +223,11 @@ define i32 @sudot_neon(ptr %a, ptr %b) #1 { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 16 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 ; CHECK-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> @@ -259,13 +259,13 @@ define i32 @sudot_neon(ptr %a, ptr %b) #1 { ; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-NOI8MM-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NOI8MM-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 16 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-NOI8MM-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-NOI8MM-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-NOI8MM-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NOI8MM-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 16 +; CHECK-NOI8MM-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 16 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 ; CHECK-NOI8MM-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> @@ -318,11 +318,11 @@ define i32 @usdot_neon(ptr %a, ptr %b) #1 { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 16 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 ; CHECK-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> @@ -354,13 +354,13 @@ define i32 @usdot_neon(ptr %a, ptr %b) #1 { ; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-NOI8MM-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NOI8MM-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 16 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-NOI8MM-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-NOI8MM-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-NOI8MM-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NOI8MM-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 16 +; CHECK-NOI8MM-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 16 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 ; CHECK-NOI8MM-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll index c6c5c5105d540..b2be0e1d7a442 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll @@ -44,11 +44,11 @@ define i32 @dotp(ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP6]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP6]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> @@ -249,7 +249,7 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = add i64 [[INDEX]], 30 ; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = add i64 [[INDEX]], 31 ; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP0]] -; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = getelementptr i8, ptr [[TMP32]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = getelementptr i8, ptr [[TMP32]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP32]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP34]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> @@ -513,13 +513,13 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) { ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> @@ -791,10 +791,10 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = or disjoint i64 [[INDEX]], 3 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] -; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP38]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <16 x i8>, ptr [[TMP16]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32> @@ -805,10 +805,10 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = mul nsw <16 x i32> [[TMP19]], [[TMP17]] ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE11]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP21]]) -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD13:%.*]] = load <16 x i8>, ptr [[TMP22]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD14:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD15:%.*]] = load <16 x i8>, ptr [[TMP26]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32> @@ -819,10 +819,10 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[WIDE_LOAD15]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = mul nsw <16 x i32> [[TMP27]], [[TMP25]] ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP29]]) -; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD18:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD19:%.*]] = load <16 x i8>, ptr [[TMP32]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD20:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD21:%.*]] = load <16 x i8>, ptr [[TMP36]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = sext <16 x i8> [[WIDE_LOAD18]] to <16 x i32> @@ -833,10 +833,10 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = sext <16 x i8> [[WIDE_LOAD21]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = mul nsw <16 x i32> [[TMP35]], [[TMP33]] ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP37]]) -; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD24:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD25:%.*]] = load <16 x i8>, ptr [[TMP42]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP46:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP46:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD26:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD27:%.*]] = load <16 x i8>, ptr [[TMP46]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = sext <16 x i8> [[WIDE_LOAD24]] to <16 x i32> @@ -1811,13 +1811,13 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP6]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP6]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll index ab593f6f8bb6b..2bea0733f65b0 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll @@ -12,62 +12,74 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT1:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX1]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP16]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP16]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX1]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP20]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP2]], [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT1]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP20]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = zext [[WIDE_LOAD1]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = mul [[TMP4]], [[TMP5]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP6]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT1]], [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) -; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: for.exit: -; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP6]] +; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i32 @dotp( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT1:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX1]] -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP20]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP20]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP20]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP20]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP5]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX1]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP28]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP28]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul <16 x i32> [[TMP4]], [[TMP5]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) -; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP7]], [[TMP8]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP9]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], 32 -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT1]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = shl nuw i64 [[TMP7]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP28]], i64 [[TMP8]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP28]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext [[WIDE_LOAD3]] to +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul [[TMP10]], [[TMP11]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP12]]) +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext [[WIDE_LOAD4]] to +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = zext [[WIDE_LOAD2]] to +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = mul [[TMP13]], [[TMP14]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI1]], [[TMP15]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT1]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: for.exit: -; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP11]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i32 @dotp( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { @@ -162,16 +174,16 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE14:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 32 -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 48 +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 16 +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 32 +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 48 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD7:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i32 32 -; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i32 48 +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i64 16 +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i64 32 +; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i64 48 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <16 x i8>, ptr [[TMP18]], align 1 @@ -299,16 +311,16 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly % ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]] ; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX2:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX2]] -; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 8 -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 24 +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i64 8 +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i64 16 +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i64 24 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD7:%.*]] = load <8 x i16>, ptr [[TMP10]], align 2 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load <8 x i16>, ptr [[TMP11]], align 2 -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 8 -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 24 +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i64 8 +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i64 16 +; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i64 24 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i16>, ptr [[NEXT_GEP3]], align 2 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD11:%.*]] = load <8 x i16>, ptr [[TMP18]], align 2 @@ -525,7 +537,7 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = add i64 [[INDEX]], 30 ; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = add i64 [[INDEX]], 31 ; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP0]] -; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = getelementptr i8, ptr [[TMP32]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = getelementptr i8, ptr [[TMP32]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP32]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP34]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> @@ -789,13 +801,13 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP10]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP10]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP17]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP17]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP17]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> @@ -991,18 +1003,22 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-LABEL: define i32 @dotp_unrolled( ; CHECK-INTERLEAVE1-SAME: i32 [[NUM_OUT:%.*]], i64 [[NUM_IN:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVE1-NEXT: entry: -; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], [[TMP12]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], [[TMP14]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[NUM_IN]], [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE13:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE10:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE7:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE13:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE10:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI2:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE7:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI3:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 1 @@ -1014,38 +1030,38 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = or disjoint i64 [[INDEX]], 3 ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] ; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP12]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP13]]) -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD5]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[WIDE_LOAD6]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP14]], [[TMP15]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP16]]) -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP18]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP19]]) -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD11:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[WIDE_LOAD11]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP20]], [[TMP21]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP22]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[TMP2]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = sext [[WIDE_LOAD4]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = mul nsw [[TMP15]], [[TMP16]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI3]], [[TMP17]]) +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP4]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD6:%.*]] = load , ptr [[TMP5]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = sext [[WIDE_LOAD5]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = sext [[WIDE_LOAD6]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = mul nsw [[TMP18]], [[TMP19]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE7]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI2]], [[TMP20]]) +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD8:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD9:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = sext [[WIDE_LOAD8]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = sext [[WIDE_LOAD9]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = mul nsw [[TMP21]], [[TMP22]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE10]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI1]], [[TMP23]]) +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD11:%.*]] = load , ptr [[TMP10]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD12:%.*]] = load , ptr [[TMP11]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP24:%.*]] = sext [[WIDE_LOAD11]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP25:%.*]] = sext [[WIDE_LOAD12]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP26:%.*]] = mul nsw [[TMP24]], [[TMP25]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE13]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP26]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP14]] ; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP24:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE13]]) -; CHECK-INTERLEAVE1-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE10]]) -; CHECK-INTERLEAVE1-NEXT: [[TMP26:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE7]]) -; CHECK-INTERLEAVE1-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP28:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE13]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP29:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE10]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE7]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[NUM_IN]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: @@ -1053,22 +1069,26 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-LABEL: define i32 @dotp_unrolled( ; CHECK-INTERLEAVED-SAME: i32 [[NUM_OUT:%.*]], i64 [[NUM_IN:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVED-NEXT: entry: -; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = shl nuw nsw i64 [[TMP0]], 5 +; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], [[TMP12]] ; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], [[TMP14]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[NUM_IN]], [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE28:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE29:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE22:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE23:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI4:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE16:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI5:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE17:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI6:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI7:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE11:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE28:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE29:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE22:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE23:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI4:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE16:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI5:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE17:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI6:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI7:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE11:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 1 @@ -1080,74 +1100,90 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = or disjoint i64 [[INDEX]], 3 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] -; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP43]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <16 x i8>, ptr [[TMP12]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = mul nsw <16 x i32> [[TMP13]], [[TMP14]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP15]]) -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = sext <16 x i8> [[WIDE_LOAD10]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul nsw <16 x i32> [[TMP16]], [[TMP17]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE11]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP18]]) -; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD13:%.*]] = load <16 x i8>, ptr [[TMP19]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD14:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD15:%.*]] = load <16 x i8>, ptr [[TMP20]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = sext <16 x i8> [[WIDE_LOAD14]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = mul nsw <16 x i32> [[TMP21]], [[TMP22]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP23]]) -; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sext <16 x i8> [[WIDE_LOAD13]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[WIDE_LOAD15]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP24]], [[TMP25]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP26]]) -; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD18:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD19:%.*]] = load <16 x i8>, ptr [[TMP27]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD20:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD21:%.*]] = load <16 x i8>, ptr [[TMP28]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = sext <16 x i8> [[WIDE_LOAD18]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = sext <16 x i8> [[WIDE_LOAD20]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = mul nsw <16 x i32> [[TMP29]], [[TMP30]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE22]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP31]]) -; CHECK-INTERLEAVED-NEXT: [[TMP48:%.*]] = sext <16 x i8> [[WIDE_LOAD19]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = sext <16 x i8> [[WIDE_LOAD21]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = mul nsw <16 x i32> [[TMP48]], [[TMP33]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE23]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP34]]) -; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD24:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD25:%.*]] = load <16 x i8>, ptr [[TMP35]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD26:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD27:%.*]] = load <16 x i8>, ptr [[TMP36]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = sext <16 x i8> [[WIDE_LOAD24]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = sext <16 x i8> [[WIDE_LOAD26]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = mul nsw <16 x i32> [[TMP37]], [[TMP38]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE28]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP39]]) -; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = sext <16 x i8> [[WIDE_LOAD25]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP41:%.*]] = sext <16 x i8> [[WIDE_LOAD27]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = mul nsw <16 x i32> [[TMP40]], [[TMP41]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE29]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP42]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[TMP16]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load , ptr [[TMP17]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = shl nuw i64 [[TMP18]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 [[TMP19]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD9:%.*]] = load , ptr [[TMP2]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load , ptr [[TMP20]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = sext [[WIDE_LOAD9]] to +; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = mul nsw [[TMP21]], [[TMP22]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI6]], [[TMP23]]) +; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sext [[WIDE_LOAD10]] to +; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = sext [[WIDE_LOAD8]] to +; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = mul nsw [[TMP24]], [[TMP25]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE11]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI7]], [[TMP26]]) +; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = shl nuw i64 [[TMP27]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 [[TMP28]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD12:%.*]] = load , ptr [[TMP4]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD13:%.*]] = load , ptr [[TMP29]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = shl nuw i64 [[TMP30]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP63:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 [[TMP31]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD14:%.*]] = load , ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD15:%.*]] = load , ptr [[TMP63]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = sext [[WIDE_LOAD12]] to +; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = sext [[WIDE_LOAD14]] to +; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = mul nsw [[TMP33]], [[TMP34]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI4]], [[TMP35]]) +; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = sext [[WIDE_LOAD13]] to +; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = sext [[WIDE_LOAD15]] to +; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = mul nsw [[TMP36]], [[TMP37]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI5]], [[TMP38]]) +; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = shl nuw i64 [[TMP39]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 [[TMP40]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD18:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD19:%.*]] = load , ptr [[TMP41]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = shl nuw i64 [[TMP42]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i64 [[TMP43]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD20:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD21:%.*]] = load , ptr [[TMP44]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP45:%.*]] = sext [[WIDE_LOAD18]] to +; CHECK-INTERLEAVED-NEXT: [[TMP46:%.*]] = sext [[WIDE_LOAD20]] to +; CHECK-INTERLEAVED-NEXT: [[TMP47:%.*]] = mul nsw [[TMP45]], [[TMP46]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE22]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI2]], [[TMP47]]) +; CHECK-INTERLEAVED-NEXT: [[TMP48:%.*]] = sext [[WIDE_LOAD19]] to +; CHECK-INTERLEAVED-NEXT: [[TMP49:%.*]] = sext [[WIDE_LOAD21]] to +; CHECK-INTERLEAVED-NEXT: [[TMP50:%.*]] = mul nsw [[TMP48]], [[TMP49]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE23]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI3]], [[TMP50]]) +; CHECK-INTERLEAVED-NEXT: [[TMP51:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP52:%.*]] = shl nuw i64 [[TMP51]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i64 [[TMP52]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD24:%.*]] = load , ptr [[TMP10]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD25:%.*]] = load , ptr [[TMP53]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP54:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP55:%.*]] = shl nuw i64 [[TMP54]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP56:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 [[TMP55]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD26:%.*]] = load , ptr [[TMP11]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD27:%.*]] = load , ptr [[TMP56]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP57:%.*]] = sext [[WIDE_LOAD24]] to +; CHECK-INTERLEAVED-NEXT: [[TMP58:%.*]] = sext [[WIDE_LOAD26]] to +; CHECK-INTERLEAVED-NEXT: [[TMP59:%.*]] = mul nsw [[TMP57]], [[TMP58]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE28]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP59]]) +; CHECK-INTERLEAVED-NEXT: [[TMP60:%.*]] = sext [[WIDE_LOAD25]] to +; CHECK-INTERLEAVED-NEXT: [[TMP61:%.*]] = sext [[WIDE_LOAD27]] to +; CHECK-INTERLEAVED-NEXT: [[TMP62:%.*]] = mul nsw [[TMP60]], [[TMP61]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE29]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI1]], [[TMP62]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP14]] ; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE29]], [[PARTIAL_REDUCE28]] -; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX30:%.*]] = add <4 x i32> [[PARTIAL_REDUCE23]], [[PARTIAL_REDUCE22]] -; CHECK-INTERLEAVED-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX30]]) -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX31:%.*]] = add <4 x i32> [[PARTIAL_REDUCE17]], [[PARTIAL_REDUCE16]] -; CHECK-INTERLEAVED-NEXT: [[TMP46:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX31]]) -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX32:%.*]] = add <4 x i32> [[PARTIAL_REDUCE11]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP47:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX32]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE29]], [[PARTIAL_REDUCE28]] +; CHECK-INTERLEAVED-NEXT: [[TMP64:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX30:%.*]] = add [[PARTIAL_REDUCE23]], [[PARTIAL_REDUCE22]] +; CHECK-INTERLEAVED-NEXT: [[TMP65:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX30]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX31:%.*]] = add [[PARTIAL_REDUCE17]], [[PARTIAL_REDUCE16]] +; CHECK-INTERLEAVED-NEXT: [[TMP66:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX31]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX32:%.*]] = add [[PARTIAL_REDUCE11]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP67:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX32]]) ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[NUM_IN]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: @@ -1280,32 +1316,32 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 2 +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 4 ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] ; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 -; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]]) +; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 [[N]]) ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP11]], [[ACTIVE_LANE_MASK]], poison) -; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = sext [[WIDE_MASKED_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP11]], [[ACTIVE_LANE_MASK]], poison) ; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP14]], [[ACTIVE_LANE_MASK]], poison) -; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = sext [[WIDE_MASKED_LOAD1]] to -; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = mul nsw [[TMP16]], [[TMP13]] -; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = add [[TMP17]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[TMP19]] = select [[ACTIVE_LANE_MASK]], [[TMP18]], [[VEC_PHI]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP14]], [[ACTIVE_LANE_MASK]], poison) +; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = sext [[WIDE_MASKED_LOAD1]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = sext [[WIDE_MASKED_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = mul nsw [[TMP10]], [[TMP13]] +; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP15]], zeroinitializer +; CHECK-INTERLEAVE1-NEXT: [[TMP19]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP12]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] -; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) -; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP9]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = xor i1 [[TMP20]], true -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP19]]) ; CHECK-INTERLEAVE1-NEXT: br label [[EXIT:%.*]] @@ -1318,32 +1354,32 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 2 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 4 ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 -; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]]) +; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 [[N]]) ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP11]], [[ACTIVE_LANE_MASK]], poison) -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext [[WIDE_MASKED_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP11]], [[ACTIVE_LANE_MASK]], poison) ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP14]], [[ACTIVE_LANE_MASK]], poison) -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = sext [[WIDE_MASKED_LOAD1]] to -; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = mul nsw [[TMP16]], [[TMP13]] -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = add [[TMP17]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP19]] = select [[ACTIVE_LANE_MASK]], [[TMP18]], [[VEC_PHI]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP14]], [[ACTIVE_LANE_MASK]], poison) +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = sext [[WIDE_MASKED_LOAD1]] to +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext [[WIDE_MASKED_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = mul nsw [[TMP10]], [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP15]], zeroinitializer +; CHECK-INTERLEAVED-NEXT: [[TMP19]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP12]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] -; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) -; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP9]]) +; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = xor i1 [[TMP20]], true -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP19]]) ; CHECK-INTERLEAVED-NEXT: br label [[EXIT:%.*]] @@ -1416,66 +1452,82 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP3]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = zext [[WIDE_LOAD]] to ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP9]], [[TMP1]] -; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <16 x i32> [[TMP4]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP6]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext [[WIDE_LOAD1]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = mul [[TMP5]], [[TMP4]] +; CHECK-INTERLEAVE1-NEXT: [[TMP7]] = add [[TMP14]], [[VEC_PHI]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]]) -; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = extractelement <16 x i32> [[TMP9]], i32 15 -; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: for.exit: -; CHECK-INTERLEAVE1-NEXT: [[RESULT:%.*]] = add i32 [[TMP7]], [[TMP8]] -; CHECK-INTERLEAVE1-NEXT: ret i32 [[RESULT]] +; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32( [[TMP7]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = mul nuw i32 [[TMP10]], 8 +; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = sub i32 [[TMP11]], 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = extractelement [[TMP5]], i32 [[TMP12]] +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_extend_user( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP3]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP24]], 3 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP3]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP3]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext [[WIDE_LOAD2]] to ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP8]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = mul <16 x i32> [[TMP6]], [[TMP2]] -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP7]], [[TMP4]] -; CHECK-INTERLEAVED-NEXT: [[TMP10]] = add <16 x i32> [[TMP15]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP11]] = add <16 x i32> [[TMP9]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP9]], 3 +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP8]], i64 [[TMP10]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[TMP11]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = zext [[WIDE_LOAD3]] to +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext [[WIDE_LOAD4]] to +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = mul [[TMP12]], [[TMP6]] +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = mul [[TMP13]], [[TMP7]] +; CHECK-INTERLEAVED-NEXT: [[TMP16]] = add [[TMP14]], [[VEC_PHI]] +; CHECK-INTERLEAVED-NEXT: [[TMP17]] = add [[TMP15]], [[VEC_PHI1]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP11]], [[TMP10]] -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = extractelement <16 x i32> [[TMP7]], i32 15 -; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: for.exit: -; CHECK-INTERLEAVED-NEXT: [[RESULT:%.*]] = add i32 [[TMP13]], [[TMP14]] -; CHECK-INTERLEAVED-NEXT: ret i32 [[RESULT]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[TMP17]], [[TMP16]] +; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = mul nuw i32 [[TMP20]], 8 +; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = sub i32 [[TMP21]], 1 +; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = extractelement [[TMP13]], i32 [[TMP22]] +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i32 @not_dotp_extend_user( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -1538,62 +1590,83 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-LABEL: define i64 @dotp_cost_disagreement( ; CHECK-INTERLEAVE1-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVE1-NEXT: entry: -; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 41, [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 41, [[TMP3]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 41, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = add nuw nsw i64 [[INDEX]], 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP10]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw nsw <16 x i64> [[TMP3]], [[TMP4]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP5]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP11]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = zext [[WIDE_LOAD1]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = mul nuw nsw [[TMP12]], [[TMP8]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI]], [[TMP9]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) -; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i64 @dotp_cost_disagreement( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVED-NEXT: entry: -; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 5 +; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 41, [[TMP1]] +; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 41, [[TMP3]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 41, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP7]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP7]], i64 [[TMP6]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP8]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = add nuw nsw i64 [[INDEX]], 1 ; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP14]] -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP15]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP15]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = mul nuw nsw <16 x i64> [[TMP5]], [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP13]]) -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP8]], [[TMP9]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP10]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; CHECK-INTERLEAVED-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = shl nuw i64 [[TMP10]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP15]], i64 [[TMP11]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP15]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[TMP12]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext [[WIDE_LOAD3]] to +; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul nuw nsw [[TMP13]], [[TMP21]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI]], [[TMP22]]) +; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = zext [[WIDE_LOAD4]] to +; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = zext [[WIDE_LOAD2]] to +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul nuw nsw [[TMP16]], [[TMP17]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI1]], [[TMP18]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i64 @dotp_cost_disagreement( @@ -1880,7 +1953,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i16, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i16, ptr [[TMP1]], i32 8 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i16, ptr [[TMP1]], i64 8 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i16>, ptr [[TMP3]], align 2 ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64> @@ -2009,7 +2082,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i16, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i16, ptr [[TMP1]], i32 8 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i16, ptr [[TMP1]], i64 8 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i16>, ptr [[TMP3]], align 2 ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64> @@ -2093,32 +2166,36 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP]], label [[FOR_BODY_PREHEADER:%.*]], label [[EXIT:%.*]] ; CHECK-INTERLEAVE1: for.body.preheader: ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 -; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 +; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 ; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = insertelement <2 x i64> zeroinitializer, i64 [[COST]], i32 0 +; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = insertelement zeroinitializer, i64 [[COST]], i32 0 ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ [[TMP4]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ [[TMP12]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP5]], [[TMP6]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP10]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[NEXT_GEP]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[NEXT_GEP1]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = zext [[WIDE_LOAD2]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = mul nuw nsw [[TMP14]], [[TMP10]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI]], [[TMP11]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: @@ -2130,42 +2207,50 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP]], label [[FOR_BODY_PREHEADER:%.*]], label [[EXIT:%.*]] ; CHECK-INTERLEAVED: for.body.preheader: ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 -; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 5 +; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] ; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = insertelement <2 x i64> zeroinitializer, i64 [[COST]], i32 0 +; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = insertelement zeroinitializer, i64 [[COST]], i32 0 ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ [[TMP4]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE6:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ [[TMP21]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE6:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[NEXT_GEP2]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = mul nuw nsw <16 x i64> [[TMP13]], [[TMP15]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP16]]) -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul nuw nsw <16 x i64> [[TMP10]], [[TMP11]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE6]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP12]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP23]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 [[TMP10]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[NEXT_GEP]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP11]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = shl nuw i64 [[TMP12]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i64 [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[NEXT_GEP2]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP14]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext [[WIDE_LOAD4]] to +; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = mul nuw nsw [[TMP15]], [[TMP16]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI]], [[TMP17]]) +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = zext [[WIDE_LOAD5]] to +; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = zext [[WIDE_LOAD3]] to +; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = mul nuw nsw [[TMP18]], [[TMP19]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE6]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI1]], [[TMP20]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE6]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE6]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll index bd9fae6cd610b..80edfb5f0b6ff 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll @@ -20,7 +20,7 @@ define i32 @partial_reduce_with_non_constant_start_value(ptr %src, i32 %rdx.star ; IC2-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP0]], %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], %[[VECTOR_BODY]] ] ; IC2-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], %[[VECTOR_BODY]] ] ; IC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX]] -; IC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 +; IC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 16 ; IC2-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; IC2-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; IC2-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> @@ -73,9 +73,9 @@ define i32 @partial_reduce_with_non_constant_start_value(ptr %src, i32 %rdx.star ; IC4-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE8:%.*]], %[[VECTOR_BODY]] ] ; IC4-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE9:%.*]], %[[VECTOR_BODY]] ] ; IC4-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX]] -; IC4-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 -; IC4-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 32 -; IC4-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 48 +; IC4-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 16 +; IC4-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 32 +; IC4-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 48 ; IC4-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; IC4-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; IC4-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-no-dotprod.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-no-dotprod.ll index 672d19b1edeba..a439f5189794a 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-no-dotprod.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-no-dotprod.ll @@ -16,13 +16,13 @@ define i32 @not_dotp(ptr %a, ptr %b) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP13:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP14:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP6]], i32 16 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP6]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 ; CHECK-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll index 6dae09ef97e1c..66b8026d46704 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll @@ -12,65 +12,77 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP2]], [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = sub <16 x i32> zeroinitializer, [[TMP4]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP10]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = zext [[WIDE_LOAD1]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = mul [[TMP4]], [[TMP5]] +; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = sub zeroinitializer, [[TMP6]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP11]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) -; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: for.exit: -; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP8]] +; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i32 @dotp( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP7]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP7]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP5]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP14]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP14]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul <16 x i32> [[TMP4]], [[TMP5]] -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = sub <16 x i32> zeroinitializer, [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP15]]) -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul <16 x i32> [[TMP8]], [[TMP9]] -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = sub <16 x i32> zeroinitializer, [[TMP10]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP11]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = shl nuw i64 [[TMP20]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP8]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP14]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext [[WIDE_LOAD3]] to +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul [[TMP10]], [[TMP11]] +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sub zeroinitializer, [[TMP12]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP13]]) +; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = zext [[WIDE_LOAD4]] to +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext [[WIDE_LOAD2]] to +; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = mul [[TMP21]], [[TMP15]] +; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = sub zeroinitializer, [[TMP16]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI1]], [[TMP17]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: for.exit: -; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i32 @dotp( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll index 46ec858d7455c..3d2832eb366ad 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll @@ -14,20 +14,25 @@ define i32 @zext_add_reduc_i8_i32_sve(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP2]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP0]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP3]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) -; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i32 @zext_add_reduc_i8_i32_sve( @@ -35,26 +40,33 @@ define i32 @zext_add_reduc_i8_i32_sve(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP2]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]]) -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP4]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP0]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP0]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP6]]) +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext [[WIDE_LOAD2]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI1]], [[TMP7]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i32 @zext_add_reduc_i8_i32_sve( @@ -134,7 +146,7 @@ define i32 @zext_add_reduc_i8_i32_neon(ptr %a) #2 { ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> @@ -196,20 +208,25 @@ define i64 @zext_add_reduc_i8_i64(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP3]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI]], [[TMP3]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) -; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i64 @zext_add_reduc_i8_i64( @@ -217,26 +234,33 @@ define i64 @zext_add_reduc_i8_i64(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP4]]) -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP5]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP1]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI]], [[TMP6]]) +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext [[WIDE_LOAD2]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI1]], [[TMP7]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i64 @zext_add_reduc_i8_i64( @@ -290,20 +314,25 @@ define i64 @zext_add_reduc_i16_i64(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], 8 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2 -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64> -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP3]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 2 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv8i64( [[VEC_PHI]], [[TMP3]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) -; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i64 @zext_add_reduc_i16_i64( @@ -311,26 +340,33 @@ define i64 @zext_add_reduc_i16_i64(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[TMP1]], i32 8 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i16>, ptr [[TMP3]], align 2 -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP4]]) -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <8 x i16> [[WIDE_LOAD2]] to <8 x i64> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI1]], <8 x i64> [[TMP5]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 3 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i16, ptr [[TMP1]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 2 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP5]], align 2 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv8i64( [[VEC_PHI]], [[TMP6]]) +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext [[WIDE_LOAD2]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv8i64( [[VEC_PHI1]], [[TMP7]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i64 @zext_add_reduc_i16_i64( @@ -413,9 +449,9 @@ define i32 @zext_add_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 { ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE8:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE9:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i32 32 -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP1]], i32 48 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i64 16 +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i64 32 +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP1]], i64 48 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 @@ -486,21 +522,21 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 1025) +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 1025) ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP6]], [[ACTIVE_LANE_MASK]], poison) -; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = zext [[WIDE_MASKED_LOAD]] to -; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = add [[TMP8]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[TMP10]] = select [[ACTIVE_LANE_MASK]], [[TMP9]], [[VEC_PHI]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP6]], [[ACTIVE_LANE_MASK]], poison) +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext [[WIDE_MASKED_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP3]], zeroinitializer +; CHECK-INTERLEAVE1-NEXT: [[TMP10]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP4]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] -; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 1025) -; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX_NEXT]], i64 1025) +; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = xor i1 [[TMP11]], true ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: @@ -515,21 +551,21 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 1025) +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 1025) ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP6]], [[ACTIVE_LANE_MASK]], poison) -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext [[WIDE_MASKED_LOAD]] to -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = add [[TMP8]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP10]] = select [[ACTIVE_LANE_MASK]], [[TMP9]], [[VEC_PHI]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP6]], [[ACTIVE_LANE_MASK]], poison) +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext [[WIDE_MASKED_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP3]], zeroinitializer +; CHECK-INTERLEAVED-NEXT: [[TMP10]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP4]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] -; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 1025) -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX_NEXT]], i64 1025) +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = xor i1 [[TMP11]], true ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: @@ -674,20 +710,25 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], 8 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP4]] = sub <16 x i32> [[VEC_PHI]], [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP4]] = sub [[VEC_PHI]], [[TMP3]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP4]]) -; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32( [[TMP4]]) +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod( @@ -695,38 +736,49 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i32 32 -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP1]], i32 48 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = zext <16 x i8> [[WIDE_LOAD6]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP6]] = sub <16 x i32> [[VEC_PHI]], [[TMP4]] -; CHECK-INTERLEAVED-NEXT: [[TMP7]] = sub <16 x i32> [[VEC_PHI1]], [[TMP5]] -; CHECK-INTERLEAVED-NEXT: [[TMP10]] = sub <16 x i32> [[VEC_PHI2]], [[TMP12]] -; CHECK-INTERLEAVED-NEXT: [[TMP11]] = sub <16 x i32> [[VEC_PHI3]], [[TMP14]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64 -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 3 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP1]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = shl nuw i64 [[TMP6]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP1]], i64 [[TMP7]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 24 +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP1]], i64 [[TMP10]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load , ptr [[TMP11]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext [[WIDE_LOAD4]] to +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = zext [[WIDE_LOAD5]] to +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext [[WIDE_LOAD6]] to +; CHECK-INTERLEAVED-NEXT: [[TMP16]] = sub [[VEC_PHI]], [[TMP12]] +; CHECK-INTERLEAVED-NEXT: [[TMP17]] = sub [[VEC_PHI1]], [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[TMP18]] = sub [[VEC_PHI2]], [[TMP14]] +; CHECK-INTERLEAVED-NEXT: [[TMP19]] = sub [[VEC_PHI3]], [[TMP15]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP7]], [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX7:%.*]] = add <16 x i32> [[TMP10]], [[BIN_RDX]] -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX8:%.*]] = add <16 x i32> [[TMP11]], [[BIN_RDX7]] -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX8]]) -; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[TMP17]], [[TMP16]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX7:%.*]] = add [[TMP18]], [[BIN_RDX]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX8:%.*]] = add [[TMP19]], [[BIN_RDX7]] +; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32( [[BIN_RDX8]]) +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod( @@ -780,20 +832,25 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP3]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) -; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i32 @sext_add_reduc_i8_i32( @@ -801,26 +858,33 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]]) -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP5]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP1]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP6]]) +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI1]], [[TMP7]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i32 @sext_add_reduc_i8_i32( @@ -874,28 +938,32 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: [[CONV1:%.*]] = zext i8 [[C]] to i32 ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = sub i32 1024, [[D]] -; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP5]], 4 +; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP0]], [[TMP2]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP7]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], [[TMP4]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = insertelement <16 x i32> zeroinitializer, i32 [[A]], i32 0 -; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[CONV1]], i64 0 -; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i32> [[BROADCAST_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer +; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = insertelement zeroinitializer, i32 [[A]], i32 0 +; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[CONV1]], i64 0 +; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ [[TMP2]], [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[FOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]] -; CHECK-INTERLEAVE1-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <16 x i32> [[VEC_PHI]], [[BROADCAST_SPLAT]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 +; CHECK-INTERLEAVE1-NEXT: store zeroinitializer, ptr [[TMP3]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP8]] = add [[VEC_PHI]], [[BROADCAST_SPLAT]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]] ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.nxv16i32( [[TMP8]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: @@ -905,33 +973,39 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: [[CONV1:%.*]] = zext i8 [[C]] to i32 ; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = sub i32 1024, [[D]] -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = icmp ult i32 [[TMP2]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = shl nuw nsw i32 [[TMP5]], 5 +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = icmp ult i32 [[TMP2]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP11]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP2]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP13]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP2]], [[TMP4]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP2]], [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = insertelement <16 x i32> zeroinitializer, i32 [[A]], i32 0 -; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[CONV1]], i64 0 -; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i32> [[BROADCAST_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = insertelement zeroinitializer, i32 [[A]], i32 0 +; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[CONV1]], i64 0 +; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ [[TMP10]], [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ [[TMP6]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]] -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 16 -; CHECK-INTERLEAVED-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVED-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP5]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP6]] = add <16 x i32> [[VEC_PHI]], [[BROADCAST_SPLAT]] -; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add <16 x i32> [[VEC_PHI2]], [[BROADCAST_SPLAT]] -; CHECK-INTERLEAVED-NEXT: [[TMP21]] = add nuw i32 [[VEC_PHI1]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP16]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 [[TMP9]] +; CHECK-INTERLEAVED-NEXT: store zeroinitializer, ptr [[TMP3]], align 1 +; CHECK-INTERLEAVED-NEXT: store zeroinitializer, ptr [[TMP10]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP15]] = add [[VEC_PHI]], [[BROADCAST_SPLAT]] +; CHECK-INTERLEAVED-NEXT: [[TMP12]] = add [[VEC_PHI2]], [[BROADCAST_SPLAT]] +; CHECK-INTERLEAVED-NEXT: [[TMP21]] = add nuw i32 [[VEC_PHI1]], [[TMP4]] ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP21]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP7]], [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[TMP12]], [[TMP15]] +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.nxv16i32( [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP2]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: @@ -995,29 +1069,33 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVE1-SAME: i32 [[A:%.*]], ptr [[B:%.*]], i8 [[C:%.*]], i32 [[D:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = sub i32 1024, [[D]] -; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP5]], 4 +; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP0]], [[TMP2]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = mul nuw i32 [[TMP3]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], [[TMP9]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[A]], i32 0 -; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0 -; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = insertelement zeroinitializer, i32 [[A]], i32 0 +; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i8 [[C]], i64 0 +; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP2]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[FOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ [[TMP6]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[FOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]] -; CHECK-INTERLEAVE1-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP4]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 +; CHECK-INTERLEAVE1-NEXT: store zeroinitializer, ptr [[TMP4]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = zext [[BROADCAST_SPLAT]] to +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP8]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP9]] ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: @@ -1026,34 +1104,40 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVED-SAME: i32 [[A:%.*]], ptr [[B:%.*]], i8 [[C:%.*]], i32 [[D:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = sub i32 1024, [[D]] -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = icmp ult i32 [[TMP2]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = shl nuw nsw i32 [[TMP5]], 5 +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = icmp ult i32 [[TMP2]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP11]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP2]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul nuw i32 [[TMP3]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP2]], [[TMP12]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP2]], [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[A]], i32 0 -; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0 -; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = insertelement zeroinitializer, i32 [[A]], i32 0 +; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i8 [[C]], i64 0 +; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE2:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ [[TMP6]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE2:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]] -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16 -; CHECK-INTERLEAVED-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP4]], align 1 -; CHECK-INTERLEAVED-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP6]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]]) -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP5]]) -; CHECK-INTERLEAVED-NEXT: [[TMP22]] = add nuw i32 [[VEC_PHI1]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = shl nuw i64 [[TMP8]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 [[TMP14]] +; CHECK-INTERLEAVED-NEXT: store zeroinitializer, ptr [[TMP4]], align 1 +; CHECK-INTERLEAVED-NEXT: store zeroinitializer, ptr [[TMP10]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext [[BROADCAST_SPLAT]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP15]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE2]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI2]], [[TMP15]]) +; CHECK-INTERLEAVED-NEXT: [[TMP22]] = add nuw i32 [[VEC_PHI1]], [[TMP12]] ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP22]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE2]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE2]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP2]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: @@ -1156,9 +1240,9 @@ define i64 @sext_reduction_i32_to_i64(ptr %arr, i64 %n) #1 { ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE8:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE9:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 4 -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 8 -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 12 +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 4 +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 8 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 12 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP14]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll index 5355a9772ef10..73dbefeb10413 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll @@ -15,7 +15,7 @@ define void @cost_hoisted_vector_code(ptr %p, float %arg) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr float, ptr [[P]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[TMP1]], i32 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[TMP1]], i64 4 ; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[TMP1]], align 4 ; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[TMP2]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll index b2b0a1539b4f9..88e035ebf3be8 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -passes=loop-vectorize -S -prefer-predicate-over-epilogue=scalar-epilogue < %s | FileCheck %s +; RUN: opt -passes=loop-vectorize -S -force-vector-interleave=2 -prefer-predicate-over-epilogue=scalar-epilogue < %s | FileCheck --check-prefix=IC2 %s target triple = "aarch64-unknown-linux-gnu" @@ -17,16 +18,16 @@ define void @test_invar_gep(ptr %dst) #0 { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP6:%.*]] = call @llvm.stepvector.nxv4i64() +; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.stepvector.nxv4i64() ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[INDEX]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[TMP7:%.*]] = add zeroinitializer, [[TMP6]] -; CHECK-NEXT: [[TMP8:%.*]] = mul [[TMP7]], splat (i64 1) -; CHECK-NEXT: [[TMP9:%.*]] = add [[DOTSPLAT]], [[TMP8]] -; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 2 -; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 3 +; CHECK-NEXT: [[TMP10:%.*]] = add zeroinitializer, [[TMP5]] +; CHECK-NEXT: [[TMP4:%.*]] = mul [[TMP10]], splat (i64 1) +; CHECK-NEXT: [[TMP9:%.*]] = add [[DOTSPLAT]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 3 ; CHECK-NEXT: [[TMP15:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i32 [[TMP15]], 4 ; CHECK-NEXT: [[TMP17:%.*]] = sub i32 [[TMP16]], 1 @@ -50,6 +51,65 @@ define void @test_invar_gep(ptr %dst) #0 { ; CHECK: exit: ; CHECK-NEXT: ret void ; +; IC2-LABEL: @test_invar_gep( +; IC2-NEXT: entry: +; IC2-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; IC2-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3 +; IC2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP1]] +; IC2-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; IC2: vector.ph: +; IC2-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; IC2-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP2]], 4 +; IC2-NEXT: [[TMP3:%.*]] = mul i64 [[TMP11]], 2 +; IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP3]] +; IC2-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; IC2-NEXT: br label [[VECTOR_BODY:%.*]] +; IC2: vector.body: +; IC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; IC2-NEXT: [[BROADCAST_SPLAT:%.*]] = call @llvm.stepvector.nxv4i64() +; IC2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[INDEX]], i64 0 +; IC2-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; IC2-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement poison, i64 [[TMP11]], i64 0 +; IC2-NEXT: [[VEC_IND:%.*]] = shufflevector [[DOTSPLATINSERT1]], poison, zeroinitializer +; IC2-NEXT: [[TMP5:%.*]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] +; IC2-NEXT: [[TMP21:%.*]] = mul [[TMP5]], splat (i64 1) +; IC2-NEXT: [[TMP22:%.*]] = add [[DOTSPLAT]], [[TMP21]] +; IC2-NEXT: [[TMP23:%.*]] = add i64 [[TMP11]], 0 +; IC2-NEXT: [[TMP24:%.*]] = mul i64 [[TMP23]], 1 +; IC2-NEXT: [[TMP25:%.*]] = add i64 [[INDEX]], [[TMP24]] +; IC2-NEXT: [[TMP12:%.*]] = add i64 [[TMP11]], 1 +; IC2-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 1 +; IC2-NEXT: [[TMP14:%.*]] = add i64 [[INDEX]], [[TMP13]] +; IC2-NEXT: [[TMP15:%.*]] = add i64 [[TMP11]], 2 +; IC2-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 1 +; IC2-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], [[TMP16]] +; IC2-NEXT: [[TMP18:%.*]] = add i64 [[TMP11]], 3 +; IC2-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 1 +; IC2-NEXT: [[TMP20:%.*]] = add i64 [[INDEX]], [[TMP19]] +; IC2-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() +; IC2-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 4 +; IC2-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1 +; IC2-NEXT: [[TMP9:%.*]] = extractelement [[TMP22]], i32 [[TMP8]] +; IC2-NEXT: store i64 [[TMP9]], ptr [[DST:%.*]], align 1 +; IC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; IC2-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; IC2-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; IC2: middle.block: +; IC2-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; IC2-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; IC2: scalar.ph: +; IC2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; IC2-NEXT: br label [[LOOP:%.*]] +; IC2: loop: +; IC2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; IC2-NEXT: [[GEP_INVAR:%.*]] = getelementptr i8, ptr [[DST]], i64 0 +; IC2-NEXT: store i64 [[IV]], ptr [[GEP_INVAR]], align 1 +; IC2-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100 +; IC2-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] +; IC2: exit: +; IC2-NEXT: ret void +; entry: br label %loop @@ -65,6 +125,272 @@ exit: ret void } +define void @test_invar_gep_var_start(i64 %start, ptr %dst) #0 { +; CHECK-LABEL: @test_invar_gep_var_start( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = sub i64 100, [[START:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[START]], [[N_VEC]] +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[START]], [[INDEX]] +; CHECK-NEXT: [[TMP6:%.*]] = call @llvm.stepvector.nxv4i64() +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[OFFSET_IDX]], i64 0 +; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = add zeroinitializer, [[TMP6]] +; CHECK-NEXT: [[TMP15:%.*]] = mul [[TMP14]], splat (i64 1) +; CHECK-NEXT: [[TMP7:%.*]] = add [[DOTSPLAT]], [[TMP15]] +; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[OFFSET_IDX]], 0 +; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[OFFSET_IDX]], 1 +; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[OFFSET_IDX]], 2 +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 3 +; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i32 [[TMP8]], 4 +; CHECK-NEXT: [[TMP10:%.*]] = sub i32 [[TMP9]], 1 +; CHECK-NEXT: [[TMP11:%.*]] = extractelement [[TMP7]], i32 [[TMP10]] +; CHECK-NEXT: store i64 [[TMP11]], ptr [[DST:%.*]], align 1 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY:%.*]] ] +; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK: loop: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[GEP_INVAR:%.*]] = getelementptr i8, ptr [[DST]], i64 0 +; CHECK-NEXT: store i64 [[IV]], ptr [[GEP_INVAR]], align 1 +; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100 +; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK: exit: +; CHECK-NEXT: ret void +; +; IC2-LABEL: @test_invar_gep_var_start( +; IC2-NEXT: entry: +; IC2-NEXT: [[TMP0:%.*]] = sub i64 100, [[START:%.*]] +; IC2-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; IC2-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 +; IC2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; IC2-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; IC2: vector.ph: +; IC2-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; IC2-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; IC2-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2 +; IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP5]] +; IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] +; IC2-NEXT: [[TMP6:%.*]] = add i64 [[START]], [[N_VEC]] +; IC2-NEXT: br label [[VECTOR_BODY:%.*]] +; IC2: vector.body: +; IC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; IC2-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[START]], [[INDEX]] +; IC2-NEXT: [[BROADCAST_SPLAT:%.*]] = call @llvm.stepvector.nxv4i64() +; IC2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[OFFSET_IDX]], i64 0 +; IC2-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; IC2-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement poison, i64 [[TMP4]], i64 0 +; IC2-NEXT: [[VEC_IND:%.*]] = shufflevector [[DOTSPLATINSERT1]], poison, zeroinitializer +; IC2-NEXT: [[TMP11:%.*]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] +; IC2-NEXT: [[TMP9:%.*]] = mul [[TMP11]], splat (i64 1) +; IC2-NEXT: [[TMP10:%.*]] = add [[DOTSPLAT]], [[TMP9]] +; IC2-NEXT: [[TMP23:%.*]] = add i64 [[TMP4]], 0 +; IC2-NEXT: [[TMP24:%.*]] = mul i64 [[TMP23]], 1 +; IC2-NEXT: [[TMP25:%.*]] = add i64 [[OFFSET_IDX]], [[TMP24]] +; IC2-NEXT: [[TMP26:%.*]] = add i64 [[TMP4]], 1 +; IC2-NEXT: [[TMP27:%.*]] = mul i64 [[TMP26]], 1 +; IC2-NEXT: [[TMP28:%.*]] = add i64 [[OFFSET_IDX]], [[TMP27]] +; IC2-NEXT: [[TMP17:%.*]] = add i64 [[TMP4]], 2 +; IC2-NEXT: [[TMP18:%.*]] = mul i64 [[TMP17]], 1 +; IC2-NEXT: [[TMP19:%.*]] = add i64 [[OFFSET_IDX]], [[TMP18]] +; IC2-NEXT: [[TMP20:%.*]] = add i64 [[TMP4]], 3 +; IC2-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 1 +; IC2-NEXT: [[TMP22:%.*]] = add i64 [[OFFSET_IDX]], [[TMP21]] +; IC2-NEXT: [[TMP12:%.*]] = call i32 @llvm.vscale.i32() +; IC2-NEXT: [[TMP13:%.*]] = mul nuw i32 [[TMP12]], 4 +; IC2-NEXT: [[TMP14:%.*]] = sub i32 [[TMP13]], 1 +; IC2-NEXT: [[TMP15:%.*]] = extractelement [[TMP10]], i32 [[TMP14]] +; IC2-NEXT: store i64 [[TMP15]], ptr [[DST:%.*]], align 1 +; IC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; IC2-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; IC2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; IC2: middle.block: +; IC2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; IC2-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; IC2: scalar.ph: +; IC2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY:%.*]] ] +; IC2-NEXT: br label [[LOOP:%.*]] +; IC2: loop: +; IC2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; IC2-NEXT: [[GEP_INVAR:%.*]] = getelementptr i8, ptr [[DST]], i64 0 +; IC2-NEXT: store i64 [[IV]], ptr [[GEP_INVAR]], align 1 +; IC2-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100 +; IC2-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] +; IC2: exit: +; IC2-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i64 [ %start, %entry ], [ %iv.next, %loop ] + %gep.invar = getelementptr i8, ptr %dst, i64 0 + store i64 %iv, ptr %gep.invar, align 1 + %iv.next = add nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 100 + br i1 %ec, label %exit, label %loop, !llvm.loop !0 + +exit: + ret void +} + +define void @test_invar_gep_var_start_step_2(i64 %start, ptr %dst) #0 { +; CHECK-LABEL: @test_invar_gep_var_start_step_2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = sub i64 98, [[START:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = add nuw i64 [[TMP1]], 1 +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP4]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[N_VEC]], 2 +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[START]], [[TMP7]] +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[INDEX]], 2 +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[START]], [[TMP9]] +; CHECK-NEXT: [[TMP10:%.*]] = call @llvm.stepvector.nxv4i64() +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[OFFSET_IDX]], i64 0 +; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP11:%.*]] = add zeroinitializer, [[TMP10]] +; CHECK-NEXT: [[TMP18:%.*]] = mul [[TMP11]], splat (i64 2) +; CHECK-NEXT: [[TMP12:%.*]] = add [[DOTSPLAT]], [[TMP18]] +; CHECK-NEXT: [[TMP19:%.*]] = add i64 [[OFFSET_IDX]], 0 +; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[OFFSET_IDX]], 2 +; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[OFFSET_IDX]], 4 +; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[OFFSET_IDX]], 6 +; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP14:%.*]] = mul nuw i32 [[TMP13]], 4 +; CHECK-NEXT: [[TMP15:%.*]] = sub i32 [[TMP14]], 1 +; CHECK-NEXT: [[TMP16:%.*]] = extractelement [[TMP12]], i32 [[TMP15]] +; CHECK-NEXT: store i64 [[TMP16]], ptr [[DST:%.*]], align 1 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP8]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY:%.*]] ] +; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK: loop: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[GEP_INVAR:%.*]] = getelementptr i8, ptr [[DST]], i64 0 +; CHECK-NEXT: store i64 [[IV]], ptr [[GEP_INVAR]], align 1 +; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 2 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100 +; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK: exit: +; CHECK-NEXT: ret void +; +; IC2-LABEL: @test_invar_gep_var_start_step_2( +; IC2-NEXT: entry: +; IC2-NEXT: [[TMP0:%.*]] = sub i64 98, [[START:%.*]] +; IC2-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 1 +; IC2-NEXT: [[TMP2:%.*]] = add nuw i64 [[TMP1]], 1 +; IC2-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; IC2-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 3 +; IC2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP4]] +; IC2-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; IC2: vector.ph: +; IC2-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; IC2-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 +; IC2-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 2 +; IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP7]] +; IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] +; IC2-NEXT: [[TMP8:%.*]] = mul i64 [[N_VEC]], 2 +; IC2-NEXT: [[TMP9:%.*]] = add i64 [[START]], [[TMP8]] +; IC2-NEXT: br label [[VECTOR_BODY:%.*]] +; IC2: vector.body: +; IC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; IC2-NEXT: [[TMP10:%.*]] = mul i64 [[INDEX]], 2 +; IC2-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[START]], [[TMP10]] +; IC2-NEXT: [[TMP11:%.*]] = call @llvm.stepvector.nxv4i64() +; IC2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[OFFSET_IDX]], i64 0 +; IC2-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; IC2-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement poison, i64 [[TMP6]], i64 0 +; IC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[DOTSPLATINSERT1]], poison, zeroinitializer +; IC2-NEXT: [[TMP16:%.*]] = add [[BROADCAST_SPLAT]], [[TMP11]] +; IC2-NEXT: [[TMP13:%.*]] = mul [[TMP16]], splat (i64 2) +; IC2-NEXT: [[TMP14:%.*]] = add [[DOTSPLAT]], [[TMP13]] +; IC2-NEXT: [[TMP15:%.*]] = add i64 [[TMP6]], 0 +; IC2-NEXT: [[TMP27:%.*]] = mul i64 [[TMP15]], 2 +; IC2-NEXT: [[TMP28:%.*]] = add i64 [[OFFSET_IDX]], [[TMP27]] +; IC2-NEXT: [[TMP29:%.*]] = add i64 [[TMP6]], 1 +; IC2-NEXT: [[TMP30:%.*]] = mul i64 [[TMP29]], 2 +; IC2-NEXT: [[TMP31:%.*]] = add i64 [[OFFSET_IDX]], [[TMP30]] +; IC2-NEXT: [[TMP32:%.*]] = add i64 [[TMP6]], 2 +; IC2-NEXT: [[TMP22:%.*]] = mul i64 [[TMP32]], 2 +; IC2-NEXT: [[TMP23:%.*]] = add i64 [[OFFSET_IDX]], [[TMP22]] +; IC2-NEXT: [[TMP24:%.*]] = add i64 [[TMP6]], 3 +; IC2-NEXT: [[TMP25:%.*]] = mul i64 [[TMP24]], 2 +; IC2-NEXT: [[TMP26:%.*]] = add i64 [[OFFSET_IDX]], [[TMP25]] +; IC2-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() +; IC2-NEXT: [[TMP18:%.*]] = mul nuw i32 [[TMP17]], 4 +; IC2-NEXT: [[TMP19:%.*]] = sub i32 [[TMP18]], 1 +; IC2-NEXT: [[TMP20:%.*]] = extractelement [[TMP14]], i32 [[TMP19]] +; IC2-NEXT: store i64 [[TMP20]], ptr [[DST:%.*]], align 1 +; IC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] +; IC2-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; IC2-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IC2: middle.block: +; IC2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] +; IC2-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; IC2: scalar.ph: +; IC2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY:%.*]] ] +; IC2-NEXT: br label [[LOOP:%.*]] +; IC2: loop: +; IC2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; IC2-NEXT: [[GEP_INVAR:%.*]] = getelementptr i8, ptr [[DST]], i64 0 +; IC2-NEXT: store i64 [[IV]], ptr [[GEP_INVAR]], align 1 +; IC2-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 2 +; IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100 +; IC2-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] +; IC2: exit: +; IC2-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i64 [ %start, %entry ], [ %iv.next, %loop ] + %gep.invar = getelementptr i8, ptr %dst, i64 0 + store i64 %iv, ptr %gep.invar, align 1 + %iv.next = add nsw i64 %iv, 2 + %ec = icmp eq i64 %iv.next, 100 + br i1 %ec, label %exit, label %loop, !llvm.loop !0 + +exit: + ret void +} + define void @test_loop2(i64 %n, ptr %dst) { ; CHECK-LABEL: @test_loop2( ; CHECK-NEXT: iter.check: @@ -130,11 +456,11 @@ define void @test_loop2(i64 %n, ptr %dst) { ; CHECK-NEXT: store i8 [[TMP51]], ptr [[TMP50]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[TMP52:%.*]] = icmp eq i64 [[INDEX_NEXT]], 992 -; CHECK-NEXT: br i1 [[TMP52]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP52]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF10:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 992, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] @@ -171,7 +497,7 @@ define void @test_loop2(i64 %n, ptr %dst) { ; CHECK-NEXT: store i8 [[TMP80]], ptr [[TMP79]], align 1 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX2]], 8 ; CHECK-NEXT: [[TMP81:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 1000 -; CHECK-NEXT: br i1 [[TMP81]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP81]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: br i1 false, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK: vec.epilog.scalar.ph: @@ -186,10 +512,187 @@ define void @test_loop2(i64 %n, ptr %dst) { ; CHECK-NEXT: store i8 [[SUB_N_TRUNC]], ptr [[GEP]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 ; CHECK-NEXT: [[C:%.*]] = icmp sle i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; +; IC2-LABEL: @test_loop2( +; IC2-NEXT: iter.check: +; IC2-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] +; IC2: vector.main.loop.iter.check: +; IC2-NEXT: br i1 false, label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] +; IC2: vector.ph: +; IC2-NEXT: br label [[VECTOR_BODY:%.*]] +; IC2: vector.body: +; IC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; IC2-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; IC2-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; IC2-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; IC2-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 +; IC2-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 4 +; IC2-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 5 +; IC2-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 6 +; IC2-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 7 +; IC2-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 8 +; IC2-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 9 +; IC2-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 10 +; IC2-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 11 +; IC2-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 12 +; IC2-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 13 +; IC2-NEXT: [[TMP14:%.*]] = add i64 [[INDEX]], 14 +; IC2-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 15 +; IC2-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 16 +; IC2-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 17 +; IC2-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 18 +; IC2-NEXT: [[TMP19:%.*]] = add i64 [[INDEX]], 19 +; IC2-NEXT: [[TMP20:%.*]] = add i64 [[INDEX]], 20 +; IC2-NEXT: [[TMP21:%.*]] = add i64 [[INDEX]], 21 +; IC2-NEXT: [[TMP22:%.*]] = add i64 [[INDEX]], 22 +; IC2-NEXT: [[TMP23:%.*]] = add i64 [[INDEX]], 23 +; IC2-NEXT: [[TMP24:%.*]] = add i64 [[INDEX]], 24 +; IC2-NEXT: [[TMP25:%.*]] = add i64 [[INDEX]], 25 +; IC2-NEXT: [[TMP26:%.*]] = add i64 [[INDEX]], 26 +; IC2-NEXT: [[TMP27:%.*]] = add i64 [[INDEX]], 27 +; IC2-NEXT: [[TMP28:%.*]] = add i64 [[INDEX]], 28 +; IC2-NEXT: [[TMP29:%.*]] = add i64 [[INDEX]], 29 +; IC2-NEXT: [[TMP30:%.*]] = add i64 [[INDEX]], 30 +; IC2-NEXT: [[TMP31:%.*]] = add i64 [[INDEX]], 31 +; IC2-NEXT: [[TMP32:%.*]] = sub nsw i64 [[N:%.*]], [[TMP0]] +; IC2-NEXT: [[TMP33:%.*]] = sub nsw i64 [[N]], [[TMP1]] +; IC2-NEXT: [[TMP34:%.*]] = sub nsw i64 [[N]], [[TMP2]] +; IC2-NEXT: [[TMP35:%.*]] = sub nsw i64 [[N]], [[TMP3]] +; IC2-NEXT: [[TMP36:%.*]] = sub nsw i64 [[N]], [[TMP4]] +; IC2-NEXT: [[TMP37:%.*]] = sub nsw i64 [[N]], [[TMP5]] +; IC2-NEXT: [[TMP38:%.*]] = sub nsw i64 [[N]], [[TMP6]] +; IC2-NEXT: [[TMP39:%.*]] = sub nsw i64 [[N]], [[TMP7]] +; IC2-NEXT: [[TMP40:%.*]] = sub nsw i64 [[N]], [[TMP8]] +; IC2-NEXT: [[TMP41:%.*]] = sub nsw i64 [[N]], [[TMP9]] +; IC2-NEXT: [[TMP42:%.*]] = sub nsw i64 [[N]], [[TMP10]] +; IC2-NEXT: [[TMP43:%.*]] = sub nsw i64 [[N]], [[TMP11]] +; IC2-NEXT: [[TMP44:%.*]] = sub nsw i64 [[N]], [[TMP12]] +; IC2-NEXT: [[TMP45:%.*]] = sub nsw i64 [[N]], [[TMP13]] +; IC2-NEXT: [[TMP46:%.*]] = sub nsw i64 [[N]], [[TMP14]] +; IC2-NEXT: [[TMP47:%.*]] = sub nsw i64 [[N]], [[TMP15]] +; IC2-NEXT: [[TMP48:%.*]] = insertelement <16 x i64> poison, i64 [[TMP32]], i32 0 +; IC2-NEXT: [[TMP49:%.*]] = insertelement <16 x i64> [[TMP48]], i64 [[TMP33]], i32 1 +; IC2-NEXT: [[TMP50:%.*]] = insertelement <16 x i64> [[TMP49]], i64 [[TMP34]], i32 2 +; IC2-NEXT: [[TMP51:%.*]] = insertelement <16 x i64> [[TMP50]], i64 [[TMP35]], i32 3 +; IC2-NEXT: [[TMP52:%.*]] = insertelement <16 x i64> [[TMP51]], i64 [[TMP36]], i32 4 +; IC2-NEXT: [[TMP53:%.*]] = insertelement <16 x i64> [[TMP52]], i64 [[TMP37]], i32 5 +; IC2-NEXT: [[TMP54:%.*]] = insertelement <16 x i64> [[TMP53]], i64 [[TMP38]], i32 6 +; IC2-NEXT: [[TMP55:%.*]] = insertelement <16 x i64> [[TMP54]], i64 [[TMP39]], i32 7 +; IC2-NEXT: [[TMP56:%.*]] = insertelement <16 x i64> [[TMP55]], i64 [[TMP40]], i32 8 +; IC2-NEXT: [[TMP57:%.*]] = insertelement <16 x i64> [[TMP56]], i64 [[TMP41]], i32 9 +; IC2-NEXT: [[TMP58:%.*]] = insertelement <16 x i64> [[TMP57]], i64 [[TMP42]], i32 10 +; IC2-NEXT: [[TMP59:%.*]] = insertelement <16 x i64> [[TMP58]], i64 [[TMP43]], i32 11 +; IC2-NEXT: [[TMP60:%.*]] = insertelement <16 x i64> [[TMP59]], i64 [[TMP44]], i32 12 +; IC2-NEXT: [[TMP61:%.*]] = insertelement <16 x i64> [[TMP60]], i64 [[TMP45]], i32 13 +; IC2-NEXT: [[TMP62:%.*]] = insertelement <16 x i64> [[TMP61]], i64 [[TMP46]], i32 14 +; IC2-NEXT: [[TMP63:%.*]] = insertelement <16 x i64> [[TMP62]], i64 [[TMP47]], i32 15 +; IC2-NEXT: [[TMP64:%.*]] = sub nsw i64 [[N]], [[TMP16]] +; IC2-NEXT: [[TMP65:%.*]] = sub nsw i64 [[N]], [[TMP17]] +; IC2-NEXT: [[TMP66:%.*]] = sub nsw i64 [[N]], [[TMP18]] +; IC2-NEXT: [[TMP67:%.*]] = sub nsw i64 [[N]], [[TMP19]] +; IC2-NEXT: [[TMP68:%.*]] = sub nsw i64 [[N]], [[TMP20]] +; IC2-NEXT: [[TMP69:%.*]] = sub nsw i64 [[N]], [[TMP21]] +; IC2-NEXT: [[TMP70:%.*]] = sub nsw i64 [[N]], [[TMP22]] +; IC2-NEXT: [[TMP71:%.*]] = sub nsw i64 [[N]], [[TMP23]] +; IC2-NEXT: [[TMP72:%.*]] = sub nsw i64 [[N]], [[TMP24]] +; IC2-NEXT: [[TMP73:%.*]] = sub nsw i64 [[N]], [[TMP25]] +; IC2-NEXT: [[TMP74:%.*]] = sub nsw i64 [[N]], [[TMP26]] +; IC2-NEXT: [[TMP75:%.*]] = sub nsw i64 [[N]], [[TMP27]] +; IC2-NEXT: [[TMP76:%.*]] = sub nsw i64 [[N]], [[TMP28]] +; IC2-NEXT: [[TMP77:%.*]] = sub nsw i64 [[N]], [[TMP29]] +; IC2-NEXT: [[TMP78:%.*]] = sub nsw i64 [[N]], [[TMP30]] +; IC2-NEXT: [[TMP79:%.*]] = sub nsw i64 [[N]], [[TMP31]] +; IC2-NEXT: [[TMP80:%.*]] = insertelement <16 x i64> poison, i64 [[TMP64]], i32 0 +; IC2-NEXT: [[TMP81:%.*]] = insertelement <16 x i64> [[TMP80]], i64 [[TMP65]], i32 1 +; IC2-NEXT: [[TMP82:%.*]] = insertelement <16 x i64> [[TMP81]], i64 [[TMP66]], i32 2 +; IC2-NEXT: [[TMP83:%.*]] = insertelement <16 x i64> [[TMP82]], i64 [[TMP67]], i32 3 +; IC2-NEXT: [[TMP84:%.*]] = insertelement <16 x i64> [[TMP83]], i64 [[TMP68]], i32 4 +; IC2-NEXT: [[TMP85:%.*]] = insertelement <16 x i64> [[TMP84]], i64 [[TMP69]], i32 5 +; IC2-NEXT: [[TMP86:%.*]] = insertelement <16 x i64> [[TMP85]], i64 [[TMP70]], i32 6 +; IC2-NEXT: [[TMP87:%.*]] = insertelement <16 x i64> [[TMP86]], i64 [[TMP71]], i32 7 +; IC2-NEXT: [[TMP88:%.*]] = insertelement <16 x i64> [[TMP87]], i64 [[TMP72]], i32 8 +; IC2-NEXT: [[TMP89:%.*]] = insertelement <16 x i64> [[TMP88]], i64 [[TMP73]], i32 9 +; IC2-NEXT: [[TMP90:%.*]] = insertelement <16 x i64> [[TMP89]], i64 [[TMP74]], i32 10 +; IC2-NEXT: [[TMP91:%.*]] = insertelement <16 x i64> [[TMP90]], i64 [[TMP75]], i32 11 +; IC2-NEXT: [[TMP92:%.*]] = insertelement <16 x i64> [[TMP91]], i64 [[TMP76]], i32 12 +; IC2-NEXT: [[TMP93:%.*]] = insertelement <16 x i64> [[TMP92]], i64 [[TMP77]], i32 13 +; IC2-NEXT: [[TMP94:%.*]] = insertelement <16 x i64> [[TMP93]], i64 [[TMP78]], i32 14 +; IC2-NEXT: [[TMP95:%.*]] = insertelement <16 x i64> [[TMP94]], i64 [[TMP79]], i32 15 +; IC2-NEXT: [[TMP96:%.*]] = trunc <16 x i64> [[TMP63]] to <16 x i8> +; IC2-NEXT: [[TMP97:%.*]] = trunc <16 x i64> [[TMP95]] to <16 x i8> +; IC2-NEXT: [[TMP98:%.*]] = add i64 [[TMP0]], [[TMP32]] +; IC2-NEXT: [[TMP99:%.*]] = add i64 [[TMP16]], [[TMP64]] +; IC2-NEXT: [[TMP100:%.*]] = getelementptr i8, ptr [[DST:%.*]], i64 [[TMP98]] +; IC2-NEXT: [[TMP101:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP99]] +; IC2-NEXT: [[TMP102:%.*]] = extractelement <16 x i8> [[TMP96]], i32 15 +; IC2-NEXT: [[TMP103:%.*]] = extractelement <16 x i8> [[TMP97]], i32 15 +; IC2-NEXT: store i8 [[TMP102]], ptr [[TMP100]], align 1 +; IC2-NEXT: store i8 [[TMP103]], ptr [[TMP101]], align 1 +; IC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 +; IC2-NEXT: [[TMP104:%.*]] = icmp eq i64 [[INDEX_NEXT]], 992 +; IC2-NEXT: br i1 [[TMP104]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; IC2: middle.block: +; IC2-NEXT: br i1 false, label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] +; IC2: vec.epilog.iter.check: +; IC2-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF10:![0-9]+]] +; IC2: vec.epilog.ph: +; IC2-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 992, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; IC2-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] +; IC2: vec.epilog.vector.body: +; IC2-NEXT: [[INDEX1:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] +; IC2-NEXT: [[TMP105:%.*]] = add i64 [[INDEX1]], 0 +; IC2-NEXT: [[TMP106:%.*]] = add i64 [[INDEX1]], 1 +; IC2-NEXT: [[TMP107:%.*]] = add i64 [[INDEX1]], 2 +; IC2-NEXT: [[TMP108:%.*]] = add i64 [[INDEX1]], 3 +; IC2-NEXT: [[TMP109:%.*]] = add i64 [[INDEX1]], 4 +; IC2-NEXT: [[TMP110:%.*]] = add i64 [[INDEX1]], 5 +; IC2-NEXT: [[TMP111:%.*]] = add i64 [[INDEX1]], 6 +; IC2-NEXT: [[TMP112:%.*]] = add i64 [[INDEX1]], 7 +; IC2-NEXT: [[TMP113:%.*]] = sub nsw i64 [[N]], [[TMP105]] +; IC2-NEXT: [[TMP114:%.*]] = sub nsw i64 [[N]], [[TMP106]] +; IC2-NEXT: [[TMP115:%.*]] = sub nsw i64 [[N]], [[TMP107]] +; IC2-NEXT: [[TMP116:%.*]] = sub nsw i64 [[N]], [[TMP108]] +; IC2-NEXT: [[TMP117:%.*]] = sub nsw i64 [[N]], [[TMP109]] +; IC2-NEXT: [[TMP118:%.*]] = sub nsw i64 [[N]], [[TMP110]] +; IC2-NEXT: [[TMP119:%.*]] = sub nsw i64 [[N]], [[TMP111]] +; IC2-NEXT: [[TMP120:%.*]] = sub nsw i64 [[N]], [[TMP112]] +; IC2-NEXT: [[TMP121:%.*]] = insertelement <8 x i64> poison, i64 [[TMP113]], i32 0 +; IC2-NEXT: [[TMP122:%.*]] = insertelement <8 x i64> [[TMP121]], i64 [[TMP114]], i32 1 +; IC2-NEXT: [[TMP123:%.*]] = insertelement <8 x i64> [[TMP122]], i64 [[TMP115]], i32 2 +; IC2-NEXT: [[TMP124:%.*]] = insertelement <8 x i64> [[TMP123]], i64 [[TMP116]], i32 3 +; IC2-NEXT: [[TMP125:%.*]] = insertelement <8 x i64> [[TMP124]], i64 [[TMP117]], i32 4 +; IC2-NEXT: [[TMP126:%.*]] = insertelement <8 x i64> [[TMP125]], i64 [[TMP118]], i32 5 +; IC2-NEXT: [[TMP127:%.*]] = insertelement <8 x i64> [[TMP126]], i64 [[TMP119]], i32 6 +; IC2-NEXT: [[TMP128:%.*]] = insertelement <8 x i64> [[TMP127]], i64 [[TMP120]], i32 7 +; IC2-NEXT: [[TMP129:%.*]] = trunc <8 x i64> [[TMP128]] to <8 x i8> +; IC2-NEXT: [[TMP130:%.*]] = add i64 [[TMP105]], [[TMP113]] +; IC2-NEXT: [[TMP131:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP130]] +; IC2-NEXT: [[TMP132:%.*]] = extractelement <8 x i8> [[TMP129]], i32 7 +; IC2-NEXT: store i8 [[TMP132]], ptr [[TMP131]], align 1 +; IC2-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8 +; IC2-NEXT: [[TMP133:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1000 +; IC2-NEXT: br i1 [[TMP133]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; IC2: vec.epilog.middle.block: +; IC2-NEXT: br i1 false, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] +; IC2: vec.epilog.scalar.ph: +; IC2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 992, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ] +; IC2-NEXT: br label [[LOOP:%.*]] +; IC2: loop: +; IC2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; IC2-NEXT: [[SUB_N:%.*]] = sub nsw i64 [[N]], [[IV]] +; IC2-NEXT: [[SUB_N_TRUNC:%.*]] = trunc i64 [[SUB_N]] to i8 +; IC2-NEXT: [[ADD:%.*]] = add i64 [[IV]], [[SUB_N]] +; IC2-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[ADD]] +; IC2-NEXT: store i8 [[SUB_N_TRUNC]], ptr [[GEP]], align 1 +; IC2-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; IC2-NEXT: [[C:%.*]] = icmp sle i64 [[IV_NEXT]], 1000 +; IC2-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP12:![0-9]+]] +; IC2: exit: +; IC2-NEXT: ret void +; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll index 44ae1757ce6e6..f2c0ca30a6c18 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll @@ -59,8 +59,6 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2) ; VSCALEFORTUNING2-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8 ; VSCALEFORTUNING2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] ; VSCALEFORTUNING2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] -; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i32 [[Z]], i64 0 -; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer ; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[X]], i64 0 ; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; VSCALEFORTUNING2-NEXT: [[TMP7:%.*]] = add i64 [[Y]], 1 @@ -68,7 +66,9 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2) ; VSCALEFORTUNING2-NEXT: [[TMP9:%.*]] = lshr [[BROADCAST_SPLAT]], splat (i32 1) ; VSCALEFORTUNING2-NEXT: [[TMP10:%.*]] = shl [[BROADCAST_SPLAT]], splat (i32 1) ; VSCALEFORTUNING2-NEXT: [[TMP11:%.*]] = or [[TMP9]], [[TMP10]] -; VSCALEFORTUNING2-NEXT: [[TMP12:%.*]] = or [[BROADCAST_SPLAT2]], [[BROADCAST_SPLAT]] +; VSCALEFORTUNING2-NEXT: [[TMP16:%.*]] = or i32 [[Z]], [[X]] +; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i32 [[TMP16]], i64 0 +; VSCALEFORTUNING2-NEXT: [[TMP12:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer ; VSCALEFORTUNING2-NEXT: [[TMP13:%.*]] = and [[TMP12]], splat (i32 1) ; VSCALEFORTUNING2-NEXT: [[TMP14:%.*]] = xor [[TMP13]], splat (i32 1) ; VSCALEFORTUNING2-NEXT: [[TMP15:%.*]] = zext [[TMP14]] to @@ -180,8 +180,6 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2) ; PRED: [[VECTOR_PH]]: ; PRED-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() ; PRED-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4 -; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[Z]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; PRED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i32 [[X]], i64 0 ; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer ; PRED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() @@ -195,7 +193,9 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2) ; PRED-NEXT: [[TMP13:%.*]] = lshr [[BROADCAST_SPLAT]], splat (i32 1) ; PRED-NEXT: [[TMP14:%.*]] = shl [[BROADCAST_SPLAT]], splat (i32 1) ; PRED-NEXT: [[TMP15:%.*]] = or [[TMP13]], [[TMP14]] -; PRED-NEXT: [[TMP16:%.*]] = or [[BROADCAST_SPLAT2]], [[BROADCAST_SPLAT]] +; PRED-NEXT: [[TMP20:%.*]] = or i32 [[Z]], [[X]] +; PRED-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement poison, i32 [[TMP20]], i64 0 +; PRED-NEXT: [[TMP16:%.*]] = shufflevector [[BROADCAST_SPLATINSERT2]], poison, zeroinitializer ; PRED-NEXT: [[TMP17:%.*]] = and [[TMP16]], splat (i32 1) ; PRED-NEXT: [[TMP18:%.*]] = xor [[TMP17]], splat (i32 1) ; PRED-NEXT: [[TMP19:%.*]] = zext [[TMP18]] to diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll index 68cfc659e1e94..fceab6f823d5a 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll @@ -229,9 +229,9 @@ define void @test_load_gep_widen_induction(ptr noalias %dst, ptr noalias %dst2) ; CHECK-NEXT: store ptr null, ptr [[TMP11]], align 8 ; CHECK-NEXT: store ptr null, ptr [[TMP17]], align 8 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr ptr, ptr [[DST2]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr ptr, ptr [[TMP12]], i32 2 -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr ptr, ptr [[TMP12]], i32 4 -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr ptr, ptr [[TMP12]], i32 6 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr ptr, ptr [[TMP12]], i64 2 +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr ptr, ptr [[TMP12]], i64 4 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr ptr, ptr [[TMP12]], i64 6 ; CHECK-NEXT: store <2 x ptr> [[TMP0]], ptr [[TMP12]], align 8 ; CHECK-NEXT: store <2 x ptr> [[TMP1]], ptr [[TMP13]], align 8 ; CHECK-NEXT: store <2 x ptr> [[TMP2]], ptr [[TMP14]], align 8 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization.ll index dcb2b9b08d1e9..1d215118449aa 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization.ll @@ -1,19 +1,19 @@ ; REQUIRES: asserts ; RUN: opt -mtriple=aarch64-none-linux-gnu -mattr=+sve -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize --disable-output -scalable-vectorization=off < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK_SCALABLE_DISABLED ; RUN: opt -mtriple=aarch64-none-linux-gnu -mattr=+sve -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize --disable-output -scalable-vectorization=on < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK_SCALABLE_ON -; RUN: opt -mtriple=aarch64-none-linux-gnu -mattr=+sve -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize --disable-output -vectorizer-maximize-bandwidth -scalable-vectorization=on < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK_SCALABLE_ON_MAXBW +; RUN: opt -mtriple=aarch64-none-linux-gnu -mattr=+sve -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize --disable-output -vectorizer-maximize-bandwidth=false -scalable-vectorization=on < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK_SCALABLE_ON_NOMAXBW ; Test that the MaxVF for the following loop, that has no dependence distances, ; is calculated as vscale x 4 (max legal SVE vector size) or vscale x 16 ; (maximized bandwidth for i8 in the loop). define void @test0(ptr %a, ptr %b, ptr %c) #0 { ; CHECK: LV: Checking a loop in 'test0' -; CHECK_SCALABLE_ON: LV: Found feasible scalable VF = vscale x 4 -; CHECK_SCALABLE_ON: LV: Selecting VF: 16 +; CHECK_SCALABLE_ON: LV: Found feasible scalable VF = vscale x 16 +; CHECK_SCALABLE_ON: LV: Selecting VF: vscale x 16 ; CHECK_SCALABLE_DISABLED-NOT: LV: Found feasible scalable VF ; CHECK_SCALABLE_DISABLED: LV: Selecting VF: 16 -; CHECK_SCALABLE_ON_MAXBW: LV: Found feasible scalable VF = vscale x 16 -; CHECK_SCALABLE_ON_MAXBW: LV: Selecting VF: vscale x 16 +; CHECK_SCALABLE_ON_NOMAXBW: LV: Found feasible scalable VF = vscale x 4 +; CHECK_SCALABLE_ON_NOMAXBW: LV: Selecting VF: vscale x 4 entry: br label %loop @@ -43,8 +43,8 @@ define void @test1(ptr %a, ptr %b) #0 { ; CHECK_SCALABLE_ON: LV: Selecting VF: 16 ; CHECK_SCALABLE_DISABLED-NOT: LV: Found feasible scalable VF ; CHECK_SCALABLE_DISABLED: LV: Selecting VF: 16 -; CHECK_SCALABLE_ON_MAXBW: LV: Found feasible scalable VF = vscale x 4 -; CHECK_SCALABLE_ON_MAXBW: LV: Selecting VF: 16 +; CHECK_SCALABLE_ON_NOMAXBW: LV: Found feasible scalable VF = vscale x 4 +; CHECK_SCALABLE_ON_NOMAXBW: LV: Selecting VF: vscale x 4 entry: br label %loop @@ -75,8 +75,8 @@ define void @test2(ptr %a, ptr %b) #0 { ; CHECK_SCALABLE_ON: LV: Selecting VF: 16 ; CHECK_SCALABLE_DISABLED-NOT: LV: Found feasible scalable VF ; CHECK_SCALABLE_DISABLED: LV: Selecting VF: 16 -; CHECK_SCALABLE_ON_MAXBW: LV: Found feasible scalable VF = vscale x 2 -; CHECK_SCALABLE_ON_MAXBW: LV: Selecting VF: 16 +; CHECK_SCALABLE_ON_NOMAXBW: LV: Found feasible scalable VF = vscale x 2 +; CHECK_SCALABLE_ON_NOMAXBW: LV: Selecting VF: 4 entry: br label %loop @@ -107,8 +107,8 @@ define void @test3(ptr %a, ptr %b) #0 { ; CHECK_SCALABLE_ON: LV: Selecting VF: 16 ; CHECK_SCALABLE_DISABLED-NOT: LV: Found feasible scalable VF ; CHECK_SCALABLE_DISABLED: LV: Selecting VF: 16 -; CHECK_SCALABLE_ON_MAXBW: LV: Found feasible scalable VF = vscale x 1 -; CHECK_SCALABLE_ON_MAXBW: LV: Selecting VF: 16 +; CHECK_SCALABLE_ON_NOMAXBW: LV: Found feasible scalable VF = vscale x 1 +; CHECK_SCALABLE_ON_NOMAXBW: LV: Selecting VF: 4 entry: br label %loop @@ -140,8 +140,8 @@ define void @test4(ptr %a, ptr %b) #0 { ; CHECK_SCALABLE_ON: LV: Selecting VF: 4 ; CHECK_SCALABLE_DISABLED-NOT: LV: Found feasible scalable VF ; CHECK_SCALABLE_DISABLED: LV: Selecting VF: 4 -; CHECK_SCALABLE_ON_MAXBW-NOT: LV: Found feasible scalable VF -; CHECK_SCALABLE_ON_MAXBW: LV: Selecting VF: 4 +; CHECK_SCALABLE_ON_NOMAXBW-NOT: LV: Found feasible scalable VF +; CHECK_SCALABLE_ON_NOMAXBW: LV: Selecting VF: 4 entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/select-index.ll b/llvm/test/Transforms/LoopVectorize/AArch64/select-index.ll index cc7b9e26ca256..56d34a61be1db 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/select-index.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/select-index.ll @@ -9,7 +9,7 @@ define i64 @test_vectorize_select_umin_first_idx(ptr %src, i64 %n) { ; CHECK: [[LOOP]]: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[MIN_IDX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[MIN_IDX_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MIN_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[MIN_VAL_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_VAL:%.*]] = phi i64 [ 100, %[[ENTRY]] ], [ [[MIN_VAL_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] ; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[GEP]], align 8 ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[MIN_VAL]], [[L]] @@ -28,15 +28,15 @@ entry: loop: %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] %min.idx = phi i64 [ 0, %entry ], [ %min.idx.next, %loop ] - %min.val = phi i64 [ 0, %entry ], [ %min.val.next, %loop ] + %min.val = phi i64 [ 100, %entry ], [ %min.val.next, %loop ] %gep = getelementptr i64, ptr %src, i64 %iv %l = load i64, ptr %gep %cmp = icmp ugt i64 %min.val, %l %min.val.next = tail call i64 @llvm.umin.i64(i64 %min.val, i64 %l) %min.idx.next = select i1 %cmp, i64 %iv, i64 %min.idx %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %exit, label %loop + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop exit: %res = phi i64 [ %min.idx.next, %loop ] @@ -47,21 +47,68 @@ define i64 @test_vectorize_select_umin_last_idx(ptr %src, i64 %n) { ; CHECK-LABEL: define i64 @test_vectorize_select_umin_last_idx( ; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) { ; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ splat (i64 -9223372036854775808), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ splat (i64 -9223372036854775808), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <2 x i64> [ splat (i64 100), %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <2 x i64> [ splat (i64 100), %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[GEP]], i64 2 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[GEP]], align 8 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <2 x i64>, ptr [[TMP2]], align 8 +; CHECK-NEXT: [[TMP3:%.*]] = icmp uge <2 x i64> [[VEC_PHI2]], [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP4:%.*]] = icmp uge <2 x i64> [[VEC_PHI3]], [[WIDE_LOAD4]] +; CHECK-NEXT: [[TMP5]] = call <2 x i64> @llvm.umin.v2i64(<2 x i64> [[VEC_PHI2]], <2 x i64> [[WIDE_LOAD]]) +; CHECK-NEXT: [[TMP6]] = call <2 x i64> @llvm.umin.v2i64(<2 x i64> [[VEC_PHI3]], <2 x i64> [[WIDE_LOAD4]]) +; CHECK-NEXT: [[TMP7]] = select <2 x i1> [[TMP3]], <2 x i64> [[VEC_IND]], <2 x i64> [[VEC_PHI]] +; CHECK-NEXT: [[TMP8]] = select <2 x i1> [[TMP4]], <2 x i64> [[STEP_ADD]], <2 x i64> [[VEC_PHI1]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <2 x i64> [[STEP_ADD]], splat (i64 2) +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call <2 x i64> @llvm.umin.v2i64(<2 x i64> [[TMP5]], <2 x i64> [[TMP6]]) +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> [[RDX_MINMAX]]) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP10]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq <2 x i64> [[TMP5]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <2 x i64> [[TMP6]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP13:%.*]] = select <2 x i1> [[TMP11]], <2 x i64> [[TMP7]], <2 x i64> splat (i64 -9223372036854775808) +; CHECK-NEXT: [[TMP14:%.*]] = select <2 x i1> [[TMP12]], <2 x i64> [[TMP8]], <2 x i64> splat (i64 -9223372036854775808) +; CHECK-NEXT: [[RDX_MINMAX5:%.*]] = call <2 x i64> @llvm.smax.v2i64(<2 x i64> [[TMP13]], <2 x i64> [[TMP14]]) +; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> [[RDX_MINMAX5]]) +; CHECK-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP15]], -9223372036854775808 +; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[TMP15]], i64 0 +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX6:%.*]] = phi i64 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 100, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MIN_IDX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[MIN_IDX_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MIN_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[MIN_VAL_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[GEP]], align 8 +; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_IDX:%.*]] = phi i64 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MIN_IDX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_VAL:%.*]] = phi i64 [ [[BC_MERGE_RDX6]], %[[SCALAR_PH]] ], [ [[MIN_VAL_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV1]] +; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[GEP1]], align 8 ; CHECK-NEXT: [[CMP:%.*]] = icmp uge i64 [[MIN_VAL]], [[L]] ; CHECK-NEXT: [[MIN_VAL_NEXT]] = tail call i64 @llvm.umin.i64(i64 [[MIN_VAL]], i64 [[L]]) -; CHECK-NEXT: [[MIN_IDX_NEXT]] = select i1 [[CMP]], i64 [[IV]], i64 [[MIN_IDX]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[MIN_IDX_NEXT]] = select i1 [[CMP]], i64 [[IV1]], i64 [[MIN_IDX]] +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[MIN_IDX_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[MIN_IDX_NEXT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i64 [[RES]] ; entry: @@ -70,15 +117,15 @@ entry: loop: %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] %min.idx = phi i64 [ 0, %entry ], [ %min.idx.next, %loop ] - %min.val = phi i64 [ 0, %entry ], [ %min.val.next, %loop ] + %min.val = phi i64 [ 100, %entry ], [ %min.val.next, %loop ] %gep = getelementptr i64, ptr %src, i64 %iv %l = load i64, ptr %gep %cmp = icmp uge i64 %min.val, %l %min.val.next = tail call i64 @llvm.umin.i64(i64 %min.val, i64 %l) %min.idx.next = select i1 %cmp, i64 %iv, i64 %min.idx %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %exit, label %loop + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop exit: %res = phi i64 [ %min.idx.next, %loop ] @@ -119,8 +166,8 @@ loop: %min.val.next = tail call i64 @llvm.smin.i64(i64 %min.val, i64 %l) %min.idx.next = select i1 %cmp, i64 %iv, i64 %min.idx %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %exit, label %loop + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop exit: %res = phi i64 [ %min.idx.next, %loop ] @@ -131,21 +178,68 @@ define i64 @test_vectorize_select_smin_last_idx(ptr %src, i64 %n) { ; CHECK-LABEL: define i64 @test_vectorize_select_smin_last_idx( ; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) { ; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ splat (i64 -9223372036854775808), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ splat (i64 -9223372036854775808), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[GEP]], i64 2 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[GEP]], align 8 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <2 x i64>, ptr [[TMP2]], align 8 +; CHECK-NEXT: [[TMP3:%.*]] = icmp sge <2 x i64> [[VEC_PHI2]], [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP4:%.*]] = icmp sge <2 x i64> [[VEC_PHI3]], [[WIDE_LOAD4]] +; CHECK-NEXT: [[TMP5]] = call <2 x i64> @llvm.smin.v2i64(<2 x i64> [[VEC_PHI2]], <2 x i64> [[WIDE_LOAD]]) +; CHECK-NEXT: [[TMP6]] = call <2 x i64> @llvm.smin.v2i64(<2 x i64> [[VEC_PHI3]], <2 x i64> [[WIDE_LOAD4]]) +; CHECK-NEXT: [[TMP7]] = select <2 x i1> [[TMP3]], <2 x i64> [[VEC_IND]], <2 x i64> [[VEC_PHI]] +; CHECK-NEXT: [[TMP8]] = select <2 x i1> [[TMP4]], <2 x i64> [[STEP_ADD]], <2 x i64> [[VEC_PHI1]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <2 x i64> [[STEP_ADD]], splat (i64 2) +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call <2 x i64> @llvm.smin.v2i64(<2 x i64> [[TMP5]], <2 x i64> [[TMP6]]) +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vector.reduce.smin.v2i64(<2 x i64> [[RDX_MINMAX]]) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP10]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq <2 x i64> [[TMP5]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <2 x i64> [[TMP6]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP13:%.*]] = select <2 x i1> [[TMP11]], <2 x i64> [[TMP7]], <2 x i64> splat (i64 -9223372036854775808) +; CHECK-NEXT: [[TMP14:%.*]] = select <2 x i1> [[TMP12]], <2 x i64> [[TMP8]], <2 x i64> splat (i64 -9223372036854775808) +; CHECK-NEXT: [[RDX_MINMAX5:%.*]] = call <2 x i64> @llvm.smax.v2i64(<2 x i64> [[TMP13]], <2 x i64> [[TMP14]]) +; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> [[RDX_MINMAX5]]) +; CHECK-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP15]], -9223372036854775808 +; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[TMP15]], i64 0 +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX6:%.*]] = phi i64 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MIN_IDX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[MIN_IDX_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MIN_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[MIN_VAL_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[GEP]], align 8 +; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_IDX:%.*]] = phi i64 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MIN_IDX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_VAL:%.*]] = phi i64 [ [[BC_MERGE_RDX6]], %[[SCALAR_PH]] ], [ [[MIN_VAL_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV1]] +; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[GEP1]], align 8 ; CHECK-NEXT: [[CMP:%.*]] = icmp sge i64 [[MIN_VAL]], [[L]] ; CHECK-NEXT: [[MIN_VAL_NEXT]] = tail call i64 @llvm.smin.i64(i64 [[MIN_VAL]], i64 [[L]]) -; CHECK-NEXT: [[MIN_IDX_NEXT]] = select i1 [[CMP]], i64 [[IV]], i64 [[MIN_IDX]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[MIN_IDX_NEXT]] = select i1 [[CMP]], i64 [[IV1]], i64 [[MIN_IDX]] +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[MIN_IDX_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[MIN_IDX_NEXT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i64 [[RES]] ; entry: @@ -161,8 +255,8 @@ loop: %min.val.next = tail call i64 @llvm.smin.i64(i64 %min.val, i64 %l) %min.idx.next = select i1 %cmp, i64 %iv, i64 %min.idx %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %exit, label %loop + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop exit: %res = phi i64 [ %min.idx.next, %loop ] @@ -203,8 +297,8 @@ loop: %min.val.next = tail call i64 @llvm.umax.i64(i64 %min.val, i64 %l) %min.idx.next = select i1 %cmp, i64 %iv, i64 %min.idx %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %exit, label %loop + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop exit: %res = phi i64 [ %min.idx.next, %loop ] @@ -215,21 +309,68 @@ define i64 @test_vectorize_select_umax_last_idx(ptr %src, i64 %n) { ; CHECK-LABEL: define i64 @test_vectorize_select_umax_last_idx( ; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) { ; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ splat (i64 -9223372036854775808), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ splat (i64 -9223372036854775808), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[GEP]], i64 2 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[GEP]], align 8 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <2 x i64>, ptr [[TMP2]], align 8 +; CHECK-NEXT: [[TMP3:%.*]] = icmp ule <2 x i64> [[VEC_PHI2]], [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP4:%.*]] = icmp ule <2 x i64> [[VEC_PHI3]], [[WIDE_LOAD4]] +; CHECK-NEXT: [[TMP5]] = call <2 x i64> @llvm.umax.v2i64(<2 x i64> [[VEC_PHI2]], <2 x i64> [[WIDE_LOAD]]) +; CHECK-NEXT: [[TMP6]] = call <2 x i64> @llvm.umax.v2i64(<2 x i64> [[VEC_PHI3]], <2 x i64> [[WIDE_LOAD4]]) +; CHECK-NEXT: [[TMP7]] = select <2 x i1> [[TMP3]], <2 x i64> [[VEC_IND]], <2 x i64> [[VEC_PHI]] +; CHECK-NEXT: [[TMP8]] = select <2 x i1> [[TMP4]], <2 x i64> [[STEP_ADD]], <2 x i64> [[VEC_PHI1]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <2 x i64> [[STEP_ADD]], splat (i64 2) +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call <2 x i64> @llvm.umax.v2i64(<2 x i64> [[TMP5]], <2 x i64> [[TMP6]]) +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> [[RDX_MINMAX]]) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP10]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq <2 x i64> [[TMP5]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <2 x i64> [[TMP6]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP13:%.*]] = select <2 x i1> [[TMP11]], <2 x i64> [[TMP7]], <2 x i64> splat (i64 -9223372036854775808) +; CHECK-NEXT: [[TMP14:%.*]] = select <2 x i1> [[TMP12]], <2 x i64> [[TMP8]], <2 x i64> splat (i64 -9223372036854775808) +; CHECK-NEXT: [[RDX_MINMAX5:%.*]] = call <2 x i64> @llvm.smax.v2i64(<2 x i64> [[TMP13]], <2 x i64> [[TMP14]]) +; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> [[RDX_MINMAX5]]) +; CHECK-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP15]], -9223372036854775808 +; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[TMP15]], i64 0 +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX6:%.*]] = phi i64 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MIN_IDX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[MIN_IDX_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MIN_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[MIN_VAL_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[GEP]], align 8 +; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_IDX:%.*]] = phi i64 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MIN_IDX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_VAL:%.*]] = phi i64 [ [[BC_MERGE_RDX6]], %[[SCALAR_PH]] ], [ [[MIN_VAL_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV1]] +; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[GEP1]], align 8 ; CHECK-NEXT: [[CMP:%.*]] = icmp ule i64 [[MIN_VAL]], [[L]] ; CHECK-NEXT: [[MIN_VAL_NEXT]] = tail call i64 @llvm.umax.i64(i64 [[MIN_VAL]], i64 [[L]]) -; CHECK-NEXT: [[MIN_IDX_NEXT]] = select i1 [[CMP]], i64 [[IV]], i64 [[MIN_IDX]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[MIN_IDX_NEXT]] = select i1 [[CMP]], i64 [[IV1]], i64 [[MIN_IDX]] +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[MIN_IDX_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[MIN_IDX_NEXT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i64 [[RES]] ; entry: @@ -245,8 +386,8 @@ loop: %min.val.next = tail call i64 @llvm.umax.i64(i64 %min.val, i64 %l) %min.idx.next = select i1 %cmp, i64 %iv, i64 %min.idx %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %exit, label %loop + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop exit: %res = phi i64 [ %min.idx.next, %loop ] @@ -287,8 +428,8 @@ loop: %min.val.next = tail call i64 @llvm.smax.i64(i64 %min.val, i64 %l) %min.idx.next = select i1 %cmp, i64 %iv, i64 %min.idx %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %exit, label %loop + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop exit: %res = phi i64 [ %min.idx.next, %loop ] @@ -299,21 +440,68 @@ define i64 @test_vectorize_select_smax_last_idx(ptr %src, i64 %n) { ; CHECK-LABEL: define i64 @test_vectorize_select_smax_last_idx( ; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) { ; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ splat (i64 -9223372036854775808), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ splat (i64 -9223372036854775808), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[GEP]], i64 2 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[GEP]], align 8 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <2 x i64>, ptr [[TMP2]], align 8 +; CHECK-NEXT: [[TMP3:%.*]] = icmp sle <2 x i64> [[VEC_PHI2]], [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP4:%.*]] = icmp sle <2 x i64> [[VEC_PHI3]], [[WIDE_LOAD4]] +; CHECK-NEXT: [[TMP5]] = call <2 x i64> @llvm.smax.v2i64(<2 x i64> [[VEC_PHI2]], <2 x i64> [[WIDE_LOAD]]) +; CHECK-NEXT: [[TMP6]] = call <2 x i64> @llvm.smax.v2i64(<2 x i64> [[VEC_PHI3]], <2 x i64> [[WIDE_LOAD4]]) +; CHECK-NEXT: [[TMP7]] = select <2 x i1> [[TMP3]], <2 x i64> [[VEC_IND]], <2 x i64> [[VEC_PHI]] +; CHECK-NEXT: [[TMP8]] = select <2 x i1> [[TMP4]], <2 x i64> [[STEP_ADD]], <2 x i64> [[VEC_PHI1]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <2 x i64> [[STEP_ADD]], splat (i64 2) +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call <2 x i64> @llvm.smax.v2i64(<2 x i64> [[TMP5]], <2 x i64> [[TMP6]]) +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> [[RDX_MINMAX]]) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP10]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq <2 x i64> [[TMP5]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <2 x i64> [[TMP6]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP13:%.*]] = select <2 x i1> [[TMP11]], <2 x i64> [[TMP7]], <2 x i64> splat (i64 -9223372036854775808) +; CHECK-NEXT: [[TMP14:%.*]] = select <2 x i1> [[TMP12]], <2 x i64> [[TMP8]], <2 x i64> splat (i64 -9223372036854775808) +; CHECK-NEXT: [[RDX_MINMAX5:%.*]] = call <2 x i64> @llvm.smax.v2i64(<2 x i64> [[TMP13]], <2 x i64> [[TMP14]]) +; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> [[RDX_MINMAX5]]) +; CHECK-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP15]], -9223372036854775808 +; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[TMP15]], i64 0 +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX6:%.*]] = phi i64 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MIN_IDX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[MIN_IDX_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MIN_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[MIN_VAL_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[GEP]], align 8 +; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_IDX:%.*]] = phi i64 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MIN_IDX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_VAL:%.*]] = phi i64 [ [[BC_MERGE_RDX6]], %[[SCALAR_PH]] ], [ [[MIN_VAL_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV1]] +; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[GEP1]], align 8 ; CHECK-NEXT: [[CMP:%.*]] = icmp sle i64 [[MIN_VAL]], [[L]] ; CHECK-NEXT: [[MIN_VAL_NEXT]] = tail call i64 @llvm.smax.i64(i64 [[MIN_VAL]], i64 [[L]]) -; CHECK-NEXT: [[MIN_IDX_NEXT]] = select i1 [[CMP]], i64 [[IV]], i64 [[MIN_IDX]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[MIN_IDX_NEXT]] = select i1 [[CMP]], i64 [[IV1]], i64 [[MIN_IDX]] +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[MIN_IDX_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[MIN_IDX_NEXT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i64 [[RES]] ; entry: @@ -329,10 +517,154 @@ loop: %min.val.next = tail call i64 @llvm.smax.i64(i64 %min.val, i64 %l) %min.idx.next = select i1 %cmp, i64 %iv, i64 %min.idx %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %exit, label %loop + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop exit: %res = phi i64 [ %min.idx.next, %loop ] ret i64 %res } + +define i32 @test_select_no_iv_operand_optsize(ptr %s) #0 { +; CHECK-LABEL: define i32 @test_select_no_iv_operand_optsize( +; CHECK-SAME: ptr [[S:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[MIN_VAL_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[MIN_IDX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[C_0:%.*]] = icmp eq i64 [[MIN_VAL]], 0 +; CHECK-NEXT: [[MIN_IDX_NEXT]] = select i1 [[C_0]], i32 [[MIN_IDX]], i32 0 +; CHECK-NEXT: [[MIN_VAL_NEXT]] = tail call i64 @llvm.umin.i64(i64 [[IV]], i64 [[MIN_VAL]]) +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 19 +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[MIN_IDX_NEXT_LCSSA:%.*]] = phi i32 [ [[MIN_IDX_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: ret i32 [[MIN_IDX_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %min.val = phi i64 [ 0, %entry ], [ %min.val.next, %loop ] + %min.idx = phi i32 [ 0, %entry ], [ %min.idx.next, %loop ] + %c.0 = icmp eq i64 %min.val, 0 + %min.idx.next = select i1 %c.0, i32 %min.idx, i32 0 + %min.val.next = tail call i64 @llvm.umin.i64(i64 %iv, i64 %min.val) + %iv.next = add i64 %iv, 1 + %ec = icmp eq i64 %iv, 19 + br i1 %ec, label %exit, label %loop + +exit: + ret i32 %min.idx.next +} + +; The reduction phi is used in a comparison that feeds a select with a truncated IV. +define i32 @test_multi_use_reduction_with_trunc_iv(ptr %src, i32 %n) { +; CHECK-LABEL: define i32 @test_multi_use_reduction_with_trunc_iv( +; CHECK-SAME: ptr [[SRC:%.*]], i32 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[N_EXT:%.*]] = zext i32 [[N]] to i64 +; CHECK-NEXT: [[PRE:%.*]] = icmp eq i32 [[N]], 0 +; CHECK-NEXT: br i1 [[PRE]], label %[[EXIT:.*]], label %[[LOOP_PREHEADER:.*]] +; CHECK: [[LOOP_PREHEADER]]: +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N_EXT]], 8 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_EXT]], 8 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_EXT]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 1, [[N_VEC]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 4) +; CHECK-NEXT: [[IV:%.*]] = add i64 1, [[INDEX]] +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[GEP_SRC]], i64 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[GEP_SRC]], align 4 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt <4 x i32> [[WIDE_LOAD]], [[VEC_PHI2]] +; CHECK-NEXT: [[TMP4:%.*]] = icmp ugt <4 x i32> [[WIDE_LOAD4]], [[VEC_PHI3]] +; CHECK-NEXT: [[TMP5]] = call <4 x i32> @llvm.umin.v4i32(<4 x i32> [[WIDE_LOAD]], <4 x i32> [[VEC_PHI2]]) +; CHECK-NEXT: [[TMP6]] = call <4 x i32> @llvm.umin.v4i32(<4 x i32> [[WIDE_LOAD4]], <4 x i32> [[VEC_PHI3]]) +; CHECK-NEXT: [[TMP7]] = select <4 x i1> [[TMP3]], <4 x i32> [[VEC_PHI]], <4 x i32> [[VEC_IND]] +; CHECK-NEXT: [[TMP8]] = select <4 x i1> [[TMP4]], <4 x i32> [[VEC_PHI1]], <4 x i32> [[STEP_ADD]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[STEP_ADD]], splat (i32 4) +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call <4 x i32> @llvm.umin.v4i32(<4 x i32> [[TMP5]], <4 x i32> [[TMP6]]) +; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> [[RDX_MINMAX]]) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP10]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq <4 x i32> [[TMP5]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <4 x i32> [[TMP6]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP13:%.*]] = select <4 x i1> [[TMP11]], <4 x i32> [[TMP7]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = select <4 x i1> [[TMP12]], <4 x i32> [[TMP8]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[RDX_MINMAX5:%.*]] = call <4 x i32> @llvm.umax.v4i32(<4 x i32> [[TMP13]], <4 x i32> [[TMP14]]) +; CHECK-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[RDX_MINMAX5]]) +; CHECK-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP15]], 0 +; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP15]], i32 0 +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_EXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[MIDDLE_BLOCK]] ], [ 1, %[[LOOP_PREHEADER]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ] +; CHECK-NEXT: [[BC_MERGE_RDX6:%.*]] = phi i32 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[MIN_IDX:%.*]] = phi i32 [ [[MIN_IDX_NEXT:%.*]], %[[LOOP]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[MIN_VAL:%.*]] = phi i32 [ [[MIN_VAL_NEXT:%.*]], %[[LOOP]] ], [ [[BC_MERGE_RDX6]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[GEP_SRC1:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV1]] +; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC1]], align 4 +; CHECK-NEXT: [[C_0:%.*]] = icmp ugt i32 [[L]], [[MIN_VAL]] +; CHECK-NEXT: [[MIN_VAL_NEXT]] = tail call i32 @llvm.umin.i32(i32 [[L]], i32 [[MIN_VAL]]) +; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV1]] to i32 +; CHECK-NEXT: [[MIN_IDX_NEXT]] = select i1 [[C_0]], i32 [[MIN_IDX]], i32 [[IV_TRUNC]] +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV1]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV1]], [[N_EXT]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK: [[EXIT_LOOPEXIT]]: +; CHECK-NEXT: [[MIN_IDX_NEXT_LCSSA:%.*]] = phi i32 [ [[MIN_IDX_NEXT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: br label %[[EXIT]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RES:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[MIN_IDX_NEXT_LCSSA]], %[[EXIT_LOOPEXIT]] ] +; CHECK-NEXT: ret i32 [[RES]] +; +entry: + %n.ext = zext i32 %n to i64 + %pre = icmp eq i32 %n, 0 + br i1 %pre, label %exit, label %loop + +loop: + %iv = phi i64 [ 1, %entry ], [ %iv.next, %loop ] + %min.idx = phi i32 [ 0, %entry ], [ %min.idx.next, %loop ] + %min.val = phi i32 [ 0, %entry ], [ %min.val.next, %loop ] + %gep.src = getelementptr i32, ptr %src, i64 %iv + %l = load i32, ptr %gep.src, align 4 + %c.0 = icmp ugt i32 %l, %min.val + %min.val.next = tail call i32 @llvm.umin.i32(i32 %l, i32 %min.val) + %iv.trunc = trunc i64 %iv to i32 + %min.idx.next = select i1 %c.0, i32 %min.idx, i32 %iv.trunc + %iv.next = add i64 %iv, 1 + %ec = icmp eq i64 %iv, %n.ext + br i1 %ec, label %exit, label %loop + +exit: + %res = phi i32 [ 0, %entry ], [ %min.idx.next, %loop ] + ret i32 %res +} + +declare i32 @llvm.umin.i32(i32, i32) + +attributes #0 = { optsize "target-cpu"="neoverse-v2" } diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll index 1596b60f48567..365ac6d27fcb0 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll @@ -9,15 +9,15 @@ define void @cost_store_i8(ptr %dst) #0 { ; DEFAULT-LABEL: define void @cost_store_i8( ; DEFAULT-SAME: ptr [[DST:%.*]]) #[[ATTR0:[0-9]+]] { ; DEFAULT-NEXT: iter.check: +; DEFAULT-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP13:%.*]] = shl nuw i64 [[TMP10]], 2 +; DEFAULT-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 101, [[TMP13]] +; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] +; DEFAULT: vector.main.loop.iter.check: ; DEFAULT-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2 +; DEFAULT-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 5 ; DEFAULT-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 101, [[TMP1]] -; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] -; DEFAULT: vector.main.loop.iter.check: -; DEFAULT-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP9]], 5 -; DEFAULT-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 101, [[TMP10]] -; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] +; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] ; DEFAULT: vector.ph: ; DEFAULT-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; DEFAULT-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 @@ -39,22 +39,22 @@ define void @cost_store_i8(ptr %dst) #0 { ; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 101, [[N_VEC]] ; DEFAULT-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; DEFAULT: vec.epilog.iter.check: -; DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], [[TMP1]] +; DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], [[TMP13]] ; DEFAULT-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; DEFAULT: vec.epilog.ph: ; DEFAULT-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] -; DEFAULT-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 4 -; DEFAULT-NEXT: [[N_MOD_VF2:%.*]] = urem i64 101, [[TMP15]] +; DEFAULT-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 +; DEFAULT-NEXT: [[N_MOD_VF2:%.*]] = urem i64 101, [[TMP12]] ; DEFAULT-NEXT: [[N_VEC3:%.*]] = sub i64 101, [[N_MOD_VF2]] ; DEFAULT-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; DEFAULT: vec.epilog.vector.body: -; DEFAULT-NEXT: [[INDEX5:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; DEFAULT-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX5]] -; DEFAULT-NEXT: store zeroinitializer, ptr [[TMP19]], align 1 -; DEFAULT-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX5]], [[TMP15]] -; DEFAULT-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC3]] -; DEFAULT-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; DEFAULT-NEXT: [[INDEX1:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] +; DEFAULT-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX1]] +; DEFAULT-NEXT: store zeroinitializer, ptr [[TMP9]], align 1 +; DEFAULT-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], [[TMP12]] +; DEFAULT-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC3]] +; DEFAULT-NEXT: br i1 [[TMP14]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; DEFAULT: vec.epilog.middle.block: ; DEFAULT-NEXT: [[CMP_N6:%.*]] = icmp eq i64 101, [[N_VEC3]] ; DEFAULT-NEXT: br i1 [[CMP_N6]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -128,35 +128,46 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 { ; DEFAULT-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; DEFAULT-NEXT: br i1 [[FOUND_CONFLICT]], label [[VEC_EPILOG_SCALAR_PH]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] ; DEFAULT: vector.main.loop.iter.check: -; DEFAULT-NEXT: br i1 false, label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] +; DEFAULT-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 5 +; DEFAULT-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1000, [[TMP1]] +; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] ; DEFAULT: vector.ph: -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[X]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i16> [[BROADCAST_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer -; DEFAULT-NEXT: [[TMP1:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META6:![0-9]+]] -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <16 x i64> poison, i64 [[TMP1]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT2]], <16 x i64> poison, <16 x i32> zeroinitializer -; DEFAULT-NEXT: [[TMP2:%.*]] = trunc <16 x i64> [[BROADCAST_SPLAT3]] to <16 x i8> -; DEFAULT-NEXT: [[TMP0:%.*]] = trunc <16 x i16> [[BROADCAST_SPLAT]] to <16 x i8> -; DEFAULT-NEXT: [[TMP3:%.*]] = and <16 x i8> [[TMP2]], [[TMP0]] +; DEFAULT-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 +; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 1000, [[TMP3]] +; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 1000, [[N_MOD_VF]] +; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i16 [[X]], i64 0 +; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; DEFAULT-NEXT: [[TMP5:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META6:![0-9]+]] +; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement poison, i64 [[TMP5]], i64 0 +; DEFAULT-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector [[BROADCAST_SPLATINSERT2]], poison, zeroinitializer +; DEFAULT-NEXT: [[TMP6:%.*]] = trunc [[BROADCAST_SPLAT3]] to +; DEFAULT-NEXT: [[TMP13:%.*]] = trunc [[BROADCAST_SPLAT]] to +; DEFAULT-NEXT: [[TMP14:%.*]] = and [[TMP6]], [[TMP13]] ; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]] ; DEFAULT: vector.body: ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; DEFAULT-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i32 16 -; DEFAULT-NEXT: store <16 x i8> [[TMP3]], ptr [[TMP4]], align 1, !alias.scope [[META9:![0-9]+]], !noalias [[META6]] -; DEFAULT-NEXT: store <16 x i8> [[TMP3]], ptr [[TMP5]], align 1, !alias.scope [[META9]], !noalias [[META6]] -; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; DEFAULT-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 992 -; DEFAULT-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; DEFAULT-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 4 +; DEFAULT-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP16]] +; DEFAULT-NEXT: store [[TMP14]], ptr [[TMP4]], align 1, !alias.scope [[META9:![0-9]+]], !noalias [[META6]] +; DEFAULT-NEXT: store [[TMP14]], ptr [[TMP17]], align 1, !alias.scope [[META9]], !noalias [[META6]] +; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; DEFAULT-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; DEFAULT-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; DEFAULT: middle.block: -; DEFAULT-NEXT: br i1 false, label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] +; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 1000, [[N_VEC]] +; DEFAULT-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; DEFAULT: vec.epilog.iter.check: -; DEFAULT-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF12:![0-9]+]] +; DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 +; DEFAULT-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF12:![0-9]+]] ; DEFAULT: vec.epilog.ph: -; DEFAULT-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 992, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; DEFAULT-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <8 x i16> poison, i16 [[X]], i64 0 ; DEFAULT-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <8 x i16> [[BROADCAST_SPLATINSERT4]], <8 x i16> poison, <8 x i32> zeroinitializer -; DEFAULT-NEXT: [[TMP8:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META6]] +; DEFAULT-NEXT: [[TMP8:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META13:![0-9]+]] ; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <8 x i64> poison, i64 [[TMP8]], i64 0 ; DEFAULT-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT7]], <8 x i64> poison, <8 x i32> zeroinitializer ; DEFAULT-NEXT: [[TMP9:%.*]] = trunc <8 x i64> [[BROADCAST_SPLAT8]] to <8 x i8> @@ -165,15 +176,15 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 { ; DEFAULT-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; DEFAULT: vec.epilog.vector.body: ; DEFAULT-NEXT: [[INDEX6:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT9:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; DEFAULT-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX6]] -; DEFAULT-NEXT: store <8 x i8> [[TMP10]], ptr [[TMP13]], align 1, !alias.scope [[META9]], !noalias [[META6]] +; DEFAULT-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX6]] +; DEFAULT-NEXT: store <8 x i8> [[TMP10]], ptr [[TMP11]], align 1, !alias.scope [[META16:![0-9]+]], !noalias [[META13]] ; DEFAULT-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX6]], 8 ; DEFAULT-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT9]], 1000 -; DEFAULT-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; DEFAULT-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; DEFAULT: vec.epilog.middle.block: ; DEFAULT-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; DEFAULT: vec.epilog.scalar.ph: -; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 992, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ], [ 0, [[ITER_CHECK:%.*]] ] +; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ], [ 0, [[ITER_CHECK:%.*]] ] ; DEFAULT-NEXT: br label [[LOOP:%.*]] ; DEFAULT: loop: ; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] @@ -185,7 +196,7 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 { ; DEFAULT-NEXT: store i8 [[TRUNC]], ptr [[GEP]], align 1 ; DEFAULT-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; DEFAULT-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]] +; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP19:![0-9]+]] ; DEFAULT: exit: ; DEFAULT-NEXT: ret void ; @@ -202,25 +213,25 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 { ; PRED-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; PRED: vector.ph: ; PRED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; PRED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i16 [[X]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1000) -; PRED-NEXT: [[TMP2:%.*]] = trunc [[BROADCAST_SPLAT]] to +; PRED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 +; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i16 [[X]], i64 0 +; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 1000) +; PRED-NEXT: [[TMP2:%.*]] = trunc [[BROADCAST_SPLAT]] to ; PRED-NEXT: br label [[VECTOR_BODY:%.*]] ; PRED: vector.body: ; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; PRED-NEXT: [[TMP3:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META3:![0-9]+]] -; PRED-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement poison, i64 [[TMP3]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector [[BROADCAST_SPLATINSERT2]], poison, zeroinitializer -; PRED-NEXT: [[TMP4:%.*]] = trunc [[BROADCAST_SPLAT3]] to -; PRED-NEXT: [[TMP5:%.*]] = and [[TMP4]], [[TMP2]] +; PRED-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement poison, i64 [[TMP3]], i64 0 +; PRED-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector [[BROADCAST_SPLATINSERT2]], poison, zeroinitializer +; PRED-NEXT: [[TMP4:%.*]] = trunc [[BROADCAST_SPLAT3]] to +; PRED-NEXT: [[TMP5:%.*]] = and [[TMP4]], [[TMP2]] ; PRED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]] -; PRED-NEXT: call void @llvm.masked.store.nxv2i8.p0( [[TMP5]], ptr align 1 [[TMP6]], [[ACTIVE_LANE_MASK]]), !alias.scope [[META6:![0-9]+]], !noalias [[META3]] +; PRED-NEXT: call void @llvm.masked.store.nxv16i8.p0( [[TMP5]], ptr align 1 [[TMP6]], [[ACTIVE_LANE_MASK]]), !alias.scope [[META6:![0-9]+]], !noalias [[META3]] ; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] -; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1000) -; PRED-NEXT: [[TMP7:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX_NEXT]], i64 1000) +; PRED-NEXT: [[TMP7:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; PRED-NEXT: [[TMP8:%.*]] = xor i1 [[TMP7]], true ; PRED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; PRED: middle.block: @@ -278,8 +289,13 @@ attributes #1 = { vscale_range(1,16) "target-features"="+sve" } ; DEFAULT: [[META10]] = distinct !{[[META10]], [[META8]]} ; DEFAULT: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]]} ; DEFAULT: [[PROF12]] = !{!"branch_weights", i32 8, i32 24} -; DEFAULT: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META2]]} -; DEFAULT: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]]} +; DEFAULT: [[META13]] = !{[[META14:![0-9]+]]} +; DEFAULT: [[META14]] = distinct !{[[META14]], [[META15:![0-9]+]]} +; DEFAULT: [[META15]] = distinct !{[[META15]], !"LVerDomain"} +; DEFAULT: [[META16]] = !{[[META17:![0-9]+]]} +; DEFAULT: [[META17]] = distinct !{[[META17]], [[META15]]} +; DEFAULT: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]], [[META2]]} +; DEFAULT: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]]} ;. ; PRED: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; PRED: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll index 76a7536501bd6..389f91f878534 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll @@ -29,17 +29,17 @@ define void @sve_add(ptr %dst, ptr %a, ptr %b, i64 %n) { ; CHECK-CA510: [[VECTOR_BODY]]: ; CHECK-CA510-NEXT: [[TMP2:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-CA510-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP2]] -; CHECK-CA510-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP3]], i32 4 +; CHECK-CA510-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP3]], i64 4 ; CHECK-CA510-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP3]], align 4 ; CHECK-CA510-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-CA510-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP2]] -; CHECK-CA510-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[TMP6]], i32 4 +; CHECK-CA510-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[TMP6]], i64 4 ; CHECK-CA510-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP6]], align 4 ; CHECK-CA510-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP8]], align 4 ; CHECK-CA510-NEXT: [[TMP9:%.*]] = fadd fast <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD]] ; CHECK-CA510-NEXT: [[TMP10:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD5]] ; CHECK-CA510-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[TMP2]] -; CHECK-CA510-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i32 4 +; CHECK-CA510-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 4 ; CHECK-CA510-NEXT: store <4 x float> [[TMP9]], ptr [[TMP11]], align 4 ; CHECK-CA510-NEXT: store <4 x float> [[TMP10]], ptr [[TMP13]], align 4 ; CHECK-CA510-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP2]], 8 @@ -93,17 +93,17 @@ define void @sve_add(ptr %dst, ptr %a, ptr %b, i64 %n) { ; CHECK-CA520: [[VECTOR_BODY]]: ; CHECK-CA520-NEXT: [[TMP2:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-CA520-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP2]] -; CHECK-CA520-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP3]], i32 4 +; CHECK-CA520-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP3]], i64 4 ; CHECK-CA520-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP3]], align 4 ; CHECK-CA520-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-CA520-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP2]] -; CHECK-CA520-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[TMP6]], i32 4 +; CHECK-CA520-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[TMP6]], i64 4 ; CHECK-CA520-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP6]], align 4 ; CHECK-CA520-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP8]], align 4 ; CHECK-CA520-NEXT: [[TMP9:%.*]] = fadd fast <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD]] ; CHECK-CA520-NEXT: [[TMP10:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD5]] ; CHECK-CA520-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[TMP2]] -; CHECK-CA520-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i32 4 +; CHECK-CA520-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 4 ; CHECK-CA520-NEXT: store <4 x float> [[TMP9]], ptr [[TMP11]], align 4 ; CHECK-CA520-NEXT: store <4 x float> [[TMP10]], ptr [[TMP13]], align 4 ; CHECK-CA520-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP2]], 8 @@ -157,17 +157,17 @@ define void @sve_add(ptr %dst, ptr %a, ptr %b, i64 %n) { ; CHECK-CA320: [[VECTOR_BODY]]: ; CHECK-CA320-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-CA320-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]] -; CHECK-CA320-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i32 4 +; CHECK-CA320-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i64 4 ; CHECK-CA320-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-CA320-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP3]], align 4 ; CHECK-CA320-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX]] -; CHECK-CA320-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP4]], i32 4 +; CHECK-CA320-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP4]], i64 4 ; CHECK-CA320-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP4]], align 4 ; CHECK-CA320-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-CA320-NEXT: [[TMP6:%.*]] = fadd fast <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD]] ; CHECK-CA320-NEXT: [[TMP7:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD5]] ; CHECK-CA320-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX]] -; CHECK-CA320-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i32 4 +; CHECK-CA320-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i64 4 ; CHECK-CA320-NEXT: store <4 x float> [[TMP6]], ptr [[TMP8]], align 4 ; CHECK-CA320-NEXT: store <4 x float> [[TMP7]], ptr [[TMP9]], align 4 ; CHECK-CA320-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll index 871d9be609bd7..873b18beb85aa 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll @@ -238,31 +238,37 @@ for.exit: define void @histogram_8bit(ptr noalias %buckets, ptr readonly %indices, i64 %N) #0 { ; CHECK-LABEL: define void @histogram_8bit( ; CHECK-SAME: ptr noalias [[BUCKETS:%.*]], ptr readonly [[INDICES:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: entry: +; CHECK-NEXT: iter.check: ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP9:%.*]] = shl nuw nsw i64 [[TMP5]], 2 +; CHECK-NEXT: [[TMP9:%.*]] = shl nuw nsw i64 [[TMP5]], 3 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP9]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; CHECK: vector.main.loop.iter.check: +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP2]], 4 +; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[N]], [[TMP6]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 -; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]] +; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 +; CHECK-NEXT: [[DOTNOT:%.*]] = add nsw i64 [[TMP4]], -1 ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] +; CHECK-NEXT: [[N_VEC1:%.*]] = sub i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[TMP6:%.*]] = zext [[WIDE_LOAD]] to -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[BUCKETS]], [[TMP6]] -; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i8( [[TMP7]], i8 1, splat (i1 true)) +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[BUCKETS]], [[TMP8]] +; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv16p0.i8( [[TMP17]], i8 1, splat (i1 true)) ; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP4]] -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC1]] +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]] -; CHECK: scalar.ph: +; CHECK-NEXT: [[CMP_N1:%.*]] = icmp eq i64 [[N_VEC]], 0 +; CHECK-NEXT: br i1 [[CMP_N1]], label [[FOR_EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] +; CHECK: vec.epilog.iter.check: ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll index 2a19402347e40..6eb8242bf7975 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll @@ -178,9 +178,9 @@ define void @test_interleave_store_one_constant(ptr noalias %src, ptr noalias %d ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 6 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr double, ptr [[TMP13]], i32 2 -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr double, ptr [[TMP13]], i32 4 -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr double, ptr [[TMP13]], i32 6 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr double, ptr [[TMP13]], i64 2 +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr double, ptr [[TMP13]], i64 4 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr double, ptr [[TMP13]], i64 6 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP13]], align 8 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x double>, ptr [[TMP15]], align 8 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <2 x double>, ptr [[TMP16]], align 8 @@ -323,9 +323,9 @@ define void @single_fmul_used_by_each_member(ptr noalias %A, ptr noalias %B, ptr ; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[INDEX]], 6 ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr double, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i32 2 -; CHECK-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i32 4 -; CHECK-NEXT: [[TMP27:%.*]] = getelementptr double, ptr [[TMP23]], i32 6 +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i64 2 +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i64 4 +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr double, ptr [[TMP23]], i64 6 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP23]], align 8 ; CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <2 x double>, ptr [[TMP25]], align 8 ; CHECK-NEXT: [[WIDE_LOAD13:%.*]] = load <2 x double>, ptr [[TMP26]], align 8 @@ -456,8 +456,9 @@ define void @test_interleave_after_narrowing(i32 %n, ptr %x, ptr noalias %y) { ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br [[EXIT:label %.*]] -; CHECK: [[SCALAR_PH:.*:]] +; CHECK-NEXT: br label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-multi-block.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-multi-block.ll index 46b0ebdd2fa62..99c735f777b66 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-multi-block.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-multi-block.ll @@ -88,7 +88,7 @@ define void @load_store_interleave_group_block_invar_cond(ptr noalias %data, ptr ; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE11]] ; VF2IC2: [[PRED_STORE_CONTINUE11]]: ; VF2IC2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[DST_1]], i64 [[INDEX]] -; VF2IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 2 +; VF2IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 2 ; VF2IC2-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP5]], align 1 ; VF2IC2-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP6]], align 1 ; VF2IC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -199,7 +199,7 @@ define void @load_store_interleave_group_block_var_cond(ptr noalias %data, ptr % ; VF2IC2-NEXT: [[INTERLEAVED_VEC5:%.*]] = shufflevector <4 x i64> [[TMP6]], <4 x i64> poison, <4 x i32> ; VF2IC2-NEXT: store <4 x i64> [[INTERLEAVED_VEC5]], ptr [[TMP4]], align 8 ; VF2IC2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[MASKS]], i64 [[INDEX]] -; VF2IC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 2 +; VF2IC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 2 ; VF2IC2-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i8>, ptr [[TMP7]], align 1 ; VF2IC2-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x i8>, ptr [[TMP8]], align 1 ; VF2IC2-NEXT: [[TMP9:%.*]] = icmp eq <2 x i8> [[WIDE_LOAD]], zeroinitializer diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll index b63e03dccdc18..d82dace2c9e04 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll @@ -172,14 +172,13 @@ define void @test_masked_interleave_group(i32 %N, ptr %mask, ptr %src, ptr %dst) ; ; CHECK-LABEL: define void @test_masked_interleave_group( ; CHECK-SAME: i32 [[N:%.*]], ptr [[MASK:%.*]], ptr [[SRC:%.*]], ptr [[DST:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP3]], i64 8) +; CHECK-NEXT: [[UMAX:%.*]] = shl nuw i64 [[TMP2]], 3 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], [[UMAX]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: ; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[N]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 @@ -195,45 +194,129 @@ define void @test_masked_interleave_group(i32 %N, ptr %mask, ptr %src, ptr %dst) ; CHECK-NEXT: [[BOUND14:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT5:%.*]] = and i1 [[BOUND03]], [[BOUND14]] ; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]] -; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] -; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 +; CHECK-NEXT: [[TMP20:%.*]] = shl nuw i64 [[TMP8]], 4 +; CHECK-NEXT: [[MIN_ITERS_CHECK6:%.*]] = icmp ult i64 [[TMP1]], [[TMP20]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK6]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP21]], 16 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP9]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP25:%.*]] = mul i64 [[INDEX1]], 16 +; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP25]] +; CHECK-NEXT: [[TMP26:%.*]] = mul i64 [[INDEX1]], 16 +; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP26]] +; CHECK-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[INDEX1]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[NEXT_GEP10]], align 1, !alias.scope [[META6:![0-9]+]] +; CHECK-NEXT: [[TMP27:%.*]] = icmp eq [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call @llvm.vector.interleave4.nxv64i1( [[TMP27]], [[TMP27]], [[TMP27]], [[TMP27]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call @llvm.masked.load.nxv64f32.p0(ptr align 4 [[NEXT_GEP9]], [[INTERLEAVED_MASK]], poison), !alias.scope [[META9:![0-9]+]] +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { , , , } @llvm.vector.deinterleave4.nxv64f32( [[WIDE_MASKED_VEC]]) +; CHECK-NEXT: [[TMP28:%.*]] = extractvalue { , , , } [[STRIDED_VEC]], 0 +; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { , , , } [[STRIDED_VEC]], 1 +; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { , , , } [[STRIDED_VEC]], 2 +; CHECK-NEXT: [[TMP18:%.*]] = extractvalue { , , , } [[STRIDED_VEC]], 3 +; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call @llvm.vector.interleave4.nxv64f32( [[TMP28]], [[TMP16]], [[TMP17]], [[TMP18]]) +; CHECK-NEXT: [[INTERLEAVED_MASK9:%.*]] = call @llvm.vector.interleave4.nxv64i1( [[TMP27]], [[TMP27]], [[TMP27]], [[TMP27]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv64f32.p0( [[INTERLEAVED_VEC]], ptr align 4 [[NEXT_GEP1]], [[INTERLEAVED_MASK9]]), !alias.scope [[META11:![0-9]+]], !noalias [[META13:![0-9]+]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP9]] +; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] +; CHECK: [[VEC_EPILOG_ITER_CHECK]]: ; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[N_VEC]] to i32 ; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[N_VEC]], 16 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP11]] ; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[N_VEC]], 16 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP13]] ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[N_VEC]] -; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] -; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], [[UMAX]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF15:![0-9]+]] +; CHECK: [[VEC_EPILOG_PH]]: +; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP23:%.*]] = mul nuw i64 [[TMP22]], 8 +; CHECK-NEXT: [[N_MOD_VF10:%.*]] = urem i64 [[TMP1]], [[TMP23]] +; CHECK-NEXT: [[INDEX:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF10]] +; CHECK-NEXT: [[TMP24:%.*]] = trunc i64 [[INDEX]] to i32 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 16 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[OFFSET_IDX6:%.*]] = mul i64 [[INDEX]], 16 ; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX6]] ; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[NEXT_GEP8]], align 1, !alias.scope [[META6:![0-9]+]] -; CHECK-NEXT: [[TMP16:%.*]] = icmp eq [[WIDE_LOAD]], zeroinitializer -; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call @llvm.vector.interleave4.nxv16i1( [[TMP16]], [[TMP16]], [[TMP16]], [[TMP16]]) -; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call @llvm.masked.load.nxv16f32.p0(ptr align 4 [[NEXT_GEP7]], [[INTERLEAVED_MASK]], poison), !alias.scope [[META9:![0-9]+]] -; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { , , , } @llvm.vector.deinterleave4.nxv16f32( [[WIDE_MASKED_VEC]]) -; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { , , , } [[STRIDED_VEC]], 0 -; CHECK-NEXT: [[TMP18:%.*]] = extractvalue { , , , } [[STRIDED_VEC]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = extractvalue { , , , } [[STRIDED_VEC]], 2 -; CHECK-NEXT: [[TMP20:%.*]] = extractvalue { , , , } [[STRIDED_VEC]], 3 -; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call @llvm.vector.interleave4.nxv16f32( [[TMP17]], [[TMP18]], [[TMP19]], [[TMP20]]) -; CHECK-NEXT: [[INTERLEAVED_MASK9:%.*]] = call @llvm.vector.interleave4.nxv16i1( [[TMP16]], [[TMP16]], [[TMP16]], [[TMP16]]) -; CHECK-NEXT: call void @llvm.masked.store.nxv16f32.p0( [[INTERLEAVED_VEC]], ptr align 4 [[NEXT_GEP]], [[INTERLEAVED_MASK9]]), !alias.scope [[META11:![0-9]+]], !noalias [[META13:![0-9]+]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] -; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] -; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] -; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; CHECK: [[VEC_EPILOG_VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX12:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT23:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[OFFSET_IDX1:%.*]] = mul i64 [[INDEX12]], 16 +; CHECK-NEXT: [[NEXT_GEP13:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX1]] +; CHECK-NEXT: [[OFFSET_IDX14:%.*]] = mul i64 [[INDEX12]], 16 +; CHECK-NEXT: [[NEXT_GEP15:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX14]] +; CHECK-NEXT: [[NEXT_GEP16:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[INDEX12]] +; CHECK-NEXT: [[WIDE_LOAD17:%.*]] = load , ptr [[NEXT_GEP16]], align 1, !alias.scope [[META6]] +; CHECK-NEXT: [[TMP30:%.*]] = icmp eq [[WIDE_LOAD17]], zeroinitializer +; CHECK-NEXT: [[INTERLEAVED_MASK18:%.*]] = call @llvm.vector.interleave4.nxv32i1( [[TMP30]], [[TMP30]], [[TMP30]], [[TMP30]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC19:%.*]] = call @llvm.masked.load.nxv32f32.p0(ptr align 4 [[NEXT_GEP15]], [[INTERLEAVED_MASK18]], poison), !alias.scope [[META9]] +; CHECK-NEXT: [[STRIDED_VEC20:%.*]] = call { , , , } @llvm.vector.deinterleave4.nxv32f32( [[WIDE_MASKED_VEC19]]) +; CHECK-NEXT: [[TMP31:%.*]] = extractvalue { , , , } [[STRIDED_VEC20]], 0 +; CHECK-NEXT: [[TMP32:%.*]] = extractvalue { , , , } [[STRIDED_VEC20]], 1 +; CHECK-NEXT: [[TMP33:%.*]] = extractvalue { , , , } [[STRIDED_VEC20]], 2 +; CHECK-NEXT: [[TMP34:%.*]] = extractvalue { , , , } [[STRIDED_VEC20]], 3 +; CHECK-NEXT: [[INTERLEAVED_VEC21:%.*]] = call @llvm.vector.interleave4.nxv32f32( [[TMP31]], [[TMP32]], [[TMP33]], [[TMP34]]) +; CHECK-NEXT: [[INTERLEAVED_MASK22:%.*]] = call @llvm.vector.interleave4.nxv32i1( [[TMP30]], [[TMP30]], [[TMP30]], [[TMP30]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv32f32.p0( [[INTERLEAVED_VEC21]], ptr align 4 [[NEXT_GEP13]], [[INTERLEAVED_MASK22]]), !alias.scope [[META11]], !noalias [[META13]] +; CHECK-NEXT: [[INDEX_NEXT23]] = add nuw i64 [[INDEX12]], [[TMP23]] +; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT23]], [[INDEX]] +; CHECK-NEXT: br i1 [[TMP35]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N24:%.*]] = icmp eq i64 [[TMP1]], [[INDEX]] +; CHECK-NEXT: br i1 [[CMP_N24]], label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]] +; CHECK: [[VEC_EPILOG_SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[TMP24]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP10]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MEMCHECK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL25:%.*]] = phi ptr [ [[NEXT_GEP]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP12]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[DST]], %[[VECTOR_MEMCHECK]] ], [ [[DST]], %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL26:%.*]] = phi ptr [ [[NEXT_GEP7]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP14]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[SRC]], %[[VECTOR_MEMCHECK]] ], [ [[SRC]], %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL27:%.*]] = phi ptr [ [[NEXT_GEP8]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP15]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[MASK]], %[[VECTOR_MEMCHECK]] ], [ [[MASK]], %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] +; CHECK: [[LOOP_HEADER]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[DST_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL25]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[DST_IV_NEXT:%.*]], %[[LOOP_LATCH]] ] +; CHECK-NEXT: [[SRC_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL26]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[SRC_IV_NEXT:%.*]], %[[LOOP_LATCH]] ] +; CHECK-NEXT: [[MASK_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL27]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[MASK_IV_NEXT:%.*]], %[[LOOP_LATCH]] ] +; CHECK-NEXT: [[MASK_IV_NEXT]] = getelementptr i8, ptr [[MASK_IV]], i64 1 +; CHECK-NEXT: [[MASK_VAL:%.*]] = load i8, ptr [[MASK_IV]], align 1 +; CHECK-NEXT: [[SHOULD_COPY:%.*]] = icmp eq i8 [[MASK_VAL]], 0 +; CHECK-NEXT: br i1 [[SHOULD_COPY]], label %[[THEN:.*]], label %[[LOOP_LATCH]] +; CHECK: [[THEN]]: +; CHECK-NEXT: [[ELEM0:%.*]] = load float, ptr [[SRC_IV]], align 4 +; CHECK-NEXT: store float [[ELEM0]], ptr [[DST_IV]], align 4 +; CHECK-NEXT: [[SRC_1_PTR:%.*]] = getelementptr i8, ptr [[SRC_IV]], i64 4 +; CHECK-NEXT: [[S1:%.*]] = load float, ptr [[SRC_1_PTR]], align 4 +; CHECK-NEXT: [[DST_1_PTR:%.*]] = getelementptr i8, ptr [[DST_IV]], i64 4 +; CHECK-NEXT: store float [[S1]], ptr [[DST_1_PTR]], align 4 +; CHECK-NEXT: [[SRC_2_PTR:%.*]] = getelementptr i8, ptr [[SRC_IV]], i64 8 +; CHECK-NEXT: [[S2:%.*]] = load float, ptr [[SRC_2_PTR]], align 4 +; CHECK-NEXT: [[DST_2_PTR:%.*]] = getelementptr i8, ptr [[DST_IV]], i64 8 +; CHECK-NEXT: store float [[S2]], ptr [[DST_2_PTR]], align 4 +; CHECK-NEXT: [[SRC_3_PTR:%.*]] = getelementptr i8, ptr [[SRC_IV]], i64 12 +; CHECK-NEXT: [[S3:%.*]] = load float, ptr [[SRC_3_PTR]], align 4 +; CHECK-NEXT: [[DST_3_PTR:%.*]] = getelementptr i8, ptr [[DST_IV]], i64 12 +; CHECK-NEXT: store float [[S3]], ptr [[DST_3_PTR]], align 4 +; CHECK-NEXT: br label %[[LOOP_LATCH]] +; CHECK: [[LOOP_LATCH]]: +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: [[SRC_IV_NEXT]] = getelementptr i8, ptr [[SRC_IV]], i64 16 +; CHECK-NEXT: [[DST_IV_NEXT]] = getelementptr i8, ptr [[DST_IV]], i64 16 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], [[N]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void ; entry: br label %loop.header diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll index d290f2d4f5bc3..b14b1783c97e3 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll @@ -62,7 +62,7 @@ define void @test_2xi64_with_wide_load(ptr noalias %data, ptr noalias %factor) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 2 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i64 2 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = load <2 x i64>, ptr [[TMP1]], align 8 ; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = load <2 x i64>, ptr [[TMP3]], align 8 ; CHECK-NEXT: [[TMP6:%.*]] = shl nsw i64 [[INDEX]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll index 7bc606f5c61b3..0cfc14a0ae3a8 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll @@ -23,23 +23,27 @@ define void @zext_i8_i16(ptr noalias nocapture readonly %p, ptr noalias nocaptur ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 16 +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP4]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 16 +; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i16> -; CHECK-NEXT: [[TMP5:%.*]] = add <16 x i16> [[TMP4]], splat (i16 2) -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[Q]], i64 [[INDEX]] -; CHECK-NEXT: store <16 x i16> [[TMP5]], ptr [[TMP6]], align 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-NEXT: [[TMP8:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-NEXT: [[TMP9:%.*]] = add [[TMP8]], splat (i16 2) +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i16, ptr [[Q]], i64 [[INDEX]] +; CHECK-NEXT: store [[TMP9]], ptr [[TMP10]], align 2 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] @@ -99,23 +103,27 @@ define void @sext_i8_i16(ptr noalias nocapture readonly %p, ptr noalias nocaptur ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 16 +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP4]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 16 +; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i16> -; CHECK-NEXT: [[TMP5:%.*]] = add <16 x i16> [[TMP4]], splat (i16 2) -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[Q]], i64 [[INDEX]] -; CHECK-NEXT: store <16 x i16> [[TMP5]], ptr [[TMP6]], align 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-NEXT: [[TMP8:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-NEXT: [[TMP9:%.*]] = add [[TMP8]], splat (i16 2) +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i16, ptr [[Q]], i64 [[INDEX]] +; CHECK-NEXT: store [[TMP9]], ptr [[TMP10]], align 2 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vector-loop-backedge-elimination-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vector-loop-backedge-elimination-epilogue.ll index 44b4e5a8c2bc7..4ede21040f393 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/vector-loop-backedge-elimination-epilogue.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/vector-loop-backedge-elimination-epilogue.ll @@ -17,9 +17,9 @@ define void @test_remove_vector_loop_region_epilogue(ptr %dst, i1 %c) { ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[DST]], i32 16 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[DST]], i32 32 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[DST]], i32 48 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[DST]], i64 16 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[DST]], i64 32 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[DST]], i64 48 ; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[DST]], align 4 ; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP2]], align 4 ; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP3]], align 4 @@ -30,7 +30,7 @@ define void @test_remove_vector_loop_region_epilogue(ptr %dst, i1 %c) { ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF0:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] @@ -49,7 +49,7 @@ define void @test_remove_vector_loop_region_epilogue(ptr %dst, i1 %c) { ; CHECK-NEXT: store i8 0, ptr [[GEP]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[TC]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP1:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -69,7 +69,8 @@ exit: ret void } ;. -; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} -; CHECK: [[META1]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[META2]] = !{!"llvm.loop.isvectorized", i32 1} +; CHECK: [[PROF0]] = !{!"branch_weights", i32 8, i32 56} +; CHECK: [[LOOP1]] = distinct !{[[LOOP1]], [[META2:![0-9]+]], [[META3:![0-9]+]]} +; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK: [[META3]] = !{!"llvm.loop.isvectorized", i32 1} ;. diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse.ll index 2abc787061b53..ec874d0b48030 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse.ll @@ -11,14 +11,14 @@ define void @vector_reverse_f64(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-LABEL: vector_reverse_f64 ; CHECK-LABEL: vector.body -; CHECK: %[[GEP:.*]] = getelementptr inbounds double, ptr %{{.*}}, i32 0 -; CHECK-NEXT: %[[GEP1:.*]] = getelementptr inbounds double, ptr %[[GEP]], i32 -7 +; CHECK: %[[GEP:.*]] = getelementptr inbounds double, ptr %{{.*}}, i64 0 +; CHECK-NEXT: %[[GEP1:.*]] = getelementptr inbounds double, ptr %[[GEP]], i64 -7 ; CHECK-NEXT: %[[WIDE:.*]] = load <8 x double>, ptr %[[GEP1]], align 8 ; CHECK-NEXT: %[[REVERSE:.*]] = shufflevector <8 x double> %[[WIDE]], <8 x double> poison, <8 x i32> ; CHECK-NEXT: %[[FADD:.*]] = fadd <8 x double> %[[REVERSE]] ; CHECK-NEXT: %[[GEP2:.*]] = getelementptr inbounds double, ptr {{.*}}, i64 {{.*}} -; CHECK-NEXT: %[[GEP3:.*]] = getelementptr inbounds double, ptr %[[GEP2]], i32 0 -; CHECK-NEXT: %[[GEP4:.*]] = getelementptr inbounds double, ptr %[[GEP3]], i32 -7 +; CHECK-NEXT: %[[GEP3:.*]] = getelementptr inbounds double, ptr %[[GEP2]], i64 0 +; CHECK-NEXT: %[[GEP4:.*]] = getelementptr inbounds double, ptr %[[GEP3]], i64 -7 ; CHECK-NEXT: %[[REVERSE6:.*]] = shufflevector <8 x double> %[[FADD]], <8 x double> poison, <8 x i32> ; CHECK-NEXT: store <8 x double> %[[REVERSE6]], ptr %[[GEP4]], align 8 @@ -44,14 +44,14 @@ for.body: ; preds = %entry, %for.body define void @vector_reverse_i64(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-LABEL: vector_reverse_i64 ; CHECK-LABEL: vector.body -; CHECK: %[[GEP:.*]] = getelementptr inbounds i64, ptr %{{.*}}, i32 0 -; CHECK-NEXT: %[[GEP1:.*]] = getelementptr inbounds i64, ptr %[[GEP]], i32 -7 +; CHECK: %[[GEP:.*]] = getelementptr inbounds i64, ptr %{{.*}}, i64 0 +; CHECK-NEXT: %[[GEP1:.*]] = getelementptr inbounds i64, ptr %[[GEP]], i64 -7 ; CHECK-NEXT: %[[WIDE:.*]] = load <8 x i64>, ptr %[[GEP1]], align 8 ; CHECK-NEXT: %[[REVERSE:.*]] = shufflevector <8 x i64> %[[WIDE]], <8 x i64> poison, <8 x i32> ; CHECK-NEXT: %[[FADD:.*]] = add <8 x i64> %[[REVERSE]] ; CHECK-NEXT: %[[GEP2:.*]] = getelementptr inbounds i64, ptr {{.*}}, i64 {{.*}} -; CHECK-NEXT: %[[GEP3:.*]] = getelementptr inbounds i64, ptr %[[GEP2]], i32 0 -; CHECK-NEXT: %[[GEP4:.*]] = getelementptr inbounds i64, ptr %[[GEP3]], i32 -7 +; CHECK-NEXT: %[[GEP3:.*]] = getelementptr inbounds i64, ptr %[[GEP2]], i64 0 +; CHECK-NEXT: %[[GEP4:.*]] = getelementptr inbounds i64, ptr %[[GEP3]], i64 -7 ; CHECK-NEXT: %[[REVERSE6:.*]] = shufflevector <8 x i64> %[[FADD]], <8 x i64> poison, <8 x i32> ; CHECK-NEXT: store <8 x i64> %[[REVERSE6]], ptr %[[GEP4]], align 8 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll index 6df3f1b418eb6..a1d03c4a7fbc6 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll @@ -80,8 +80,8 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) "target-features"="+neon,+do ; CHECK-NEXT: Successor(s): vector.body ; CHECK-EMPTY: ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT-SCALAR vp<%index> = phi [ ir<0>, vector.ph ], [ vp<%index.next>, vector.body ] -; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%accum> = phi vp<[[RDX_START]]>, ir<%add> (VF scaled by 1/4) +; CHECK-NEXT: EMIT-SCALAR vp<[[EP_IV:%.+]]> = phi [ ir<0>, vector.ph ], [ vp<%index.next>, vector.body ] +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<[[RDX:%.+]]> = phi vp<[[RDX_START]]>, ir<[[RDX_NEXT:%.+]]> (VF scaled by 1/4) ; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr ir<%a>, vp<%index> ; CHECK-NEXT: WIDEN ir<%load.a> = load ir<%gep.a> ; CHECK-NEXT: CLONE ir<%gep.b> = getelementptr ir<%b>, vp<%index> @@ -89,13 +89,13 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) "target-features"="+neon,+do ; CHECK-NEXT: WIDEN-CAST ir<%ext.b> = zext ir<%load.b> to i32 ; CHECK-NEXT: WIDEN-CAST ir<%ext.a> = zext ir<%load.a> to i32 ; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%ext.b>, ir<%ext.a> -; CHECK-NEXT: PARTIAL-REDUCE ir<%add> = add ir<%accum>, ir<%mul> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%index>, ir<16> -; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, ir<1024> +; CHECK-NEXT: PARTIAL-REDUCE ir<[[RDX_NEXT]]> = ir<[[RDX]]> + reduce.add (ir<%mul>) +; CHECK-NEXT: EMIT vp<[[EP_IV_NEXT:%.+]]> = add nuw vp<[[EP_IV]]>, ir<16> +; CHECK-NEXT: EMIT branch-on-count vp<[[EP_IV_NEXT]]>, ir<1024> ; CHECK-NEXT: Successor(s): middle.block, vector.body ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[RED_RESULT:%[0-9]+]]> = compute-reduction-result ir<%accum>, ir<%add> +; CHECK-NEXT: EMIT vp<[[RED_RESULT:%[0-9]+]]> = compute-reduction-result ir<[[RDX]]>, ir<[[RDX_NEXT]]> ; CHECK-NEXT: Successor(s): ir-bb ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/widen-gep-all-indices-invariant.ll b/llvm/test/Transforms/LoopVectorize/AArch64/widen-gep-all-indices-invariant.ll new file mode 100644 index 0000000000000..97cc6929e44d5 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/AArch64/widen-gep-all-indices-invariant.ll @@ -0,0 +1,71 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -passes=loop-vectorize -mtriple=arm64-apple-macosx -S %s | FileCheck %s + +; Test case for https://github.com/llvm/llvm-project/issues/169668. +define i32 @gep_with_all_invariant_operands(ptr %src.0, ptr %src.1, i64 %n, i1 %cond) #0 { +; CHECK-LABEL: define i32 @gep_with_all_invariant_operands( +; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]], i1 [[COND:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1 +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4 +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 2 +; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP0]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[TMP0]], [[TMP4]] +; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP6]], i64 [[TMP5]], i64 0 +; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[TMP0]]) +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[SRC_0]], i64 [[N]] +; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[COND]], ptr [[SRC_1]], ptr [[TMP8]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[TMP9]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( align 4 [[BROADCAST_SPLAT]], [[ACTIVE_LANE_MASK]], poison) +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP2]] +; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP7]]) +; CHECK-NEXT: [[TMP10:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; CHECK-NEXT: [[TMP11:%.*]] = xor i1 [[TMP10]], true +; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP12:%.*]] = xor [[ACTIVE_LANE_MASK]], splat (i1 true) +; CHECK-NEXT: [[FIRST_INACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv4i1( [[TMP12]], i1 false) +; CHECK-NEXT: [[LAST_ACTIVE_LANE:%.*]] = sub i64 [[FIRST_INACTIVE_LANE]], 1 +; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 4 +; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 0 +; CHECK-NEXT: [[TMP16:%.*]] = extractelement [[WIDE_MASKED_GATHER]], i64 [[LAST_ACTIVE_LANE]] +; CHECK-NEXT: br label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret i32 [[TMP16]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] + %gep = getelementptr i32, ptr %src.0, i64 %n + %ptr = select i1 %cond, ptr %src.1, ptr %gep + %val = load i32, ptr %ptr, align 4 + %iv.next = add i64 %iv, 1 + %cmp = icmp ult i64 %iv, %n + br i1 %cmp, label %loop, label %exit, !llvm.loop !0 + +exit: + ret i32 %val +} + +attributes #0 = { "target-cpu"="neoverse-v2" } + +!0 = distinct !{!0, !1, !2} +!1 = !{!"llvm.loop.vectorize.enable", i1 true} +!2 = !{!"llvm.loop.vectorize.predicate.enable", i1 true} +;. +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} +; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} +;. diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll b/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll index fc0b19da47f4b..82f272ad853a8 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll @@ -47,38 +47,37 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #1 { ; NARROW-NEXT: entry: ; NARROW-NEXT: br label [[VECTOR_PH:%.*]] ; NARROW: vector.ph: +; NARROW-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NARROW-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP2]], 4 +; NARROW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP1]] +; NARROW-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; NARROW-NEXT: br label [[VECTOR_BODY:%.*]] ; NARROW: vector.body: ; NARROW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; NARROW-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[B:%.*]], i64 [[INDEX]] -; NARROW-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 8 -; NARROW-NEXT: [[TMP1:%.*]] = fptrunc <2 x double> [[WIDE_LOAD]] to <2 x float> -; NARROW-NEXT: [[TMP2:%.*]] = extractelement <2 x float> [[TMP1]], i32 0 -; NARROW-NEXT: [[TMP4:%.*]] = extractelement <2 x float> [[TMP1]], i32 1 -; NARROW-NEXT: [[TMP3:%.*]] = call float @foo(float [[TMP2]]) #[[ATTR1:[0-9]+]] -; NARROW-NEXT: [[TMP5:%.*]] = call float @foo(float [[TMP4]]) #[[ATTR1]] -; NARROW-NEXT: [[TMP6:%.*]] = insertelement <2 x float> poison, float [[TMP3]], i32 0 -; NARROW-NEXT: [[TMP7:%.*]] = insertelement <2 x float> [[TMP6]], float [[TMP5]], i32 1 +; NARROW-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP0]], align 8 +; NARROW-NEXT: [[TMP3:%.*]] = fptrunc [[WIDE_LOAD]] to +; NARROW-NEXT: [[TMP4:%.*]] = call @foo_vector( [[TMP3]], splat (i1 true)) ; NARROW-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; NARROW-NEXT: store <2 x float> [[TMP7]], ptr [[TMP8]], align 4 -; NARROW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 -; NARROW-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; NARROW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NARROW-NEXT: store [[TMP4]], ptr [[TMP8]], align 4 +; NARROW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; NARROW-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NARROW-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NARROW: middle.block: -; NARROW-NEXT: br label [[SCALAR_PH:%.*]] +; NARROW-NEXT: br i1 false, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH:%.*]] ; NARROW: scalar.ph: ; NARROW-NEXT: br label [[FOR_BODY:%.*]] ; NARROW: for.body: -; NARROW-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 1024, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; NARROW-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[N_VEC]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; NARROW-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[INDVARS_IV]] ; NARROW-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8 ; NARROW-NEXT: [[TRUNC:%.*]] = fptrunc double [[LOAD]] to float -; NARROW-NEXT: [[CALL:%.*]] = call float @foo(float [[TRUNC]]) #[[ATTR1]] +; NARROW-NEXT: [[CALL:%.*]] = call float @foo(float [[TRUNC]]) #[[ATTR2:[0-9]+]] ; NARROW-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] ; NARROW-NEXT: store float [[CALL]], ptr [[ARRAYIDX]], align 4 ; NARROW-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; NARROW-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1025 -; NARROW-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; NARROW-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; NARROW: for.cond.cleanup: ; NARROW-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll b/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll index 7afa8ce998121..e05332abcee61 100644 --- a/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll +++ b/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll @@ -22,7 +22,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v) { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[TMP6:%.*]] = add <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/exit-branch-cost.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/exit-branch-cost.ll index abbd176a1df6e..478c9c1141949 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/exit-branch-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/exit-branch-cost.ll @@ -51,17 +51,17 @@ define i1 @select_exit_cond(ptr %start, ptr %end, i64 %N) { ; CHECK-NEXT: [[STEP_ADD_10:%.*]] = add <2 x i64> [[STEP_ADD_9]], splat (i64 2) ; CHECK-NEXT: [[STEP_ADD_11:%.*]] = add <2 x i64> [[STEP_ADD_10]], splat (i64 2) ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 2 -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 4 -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 6 -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 8 -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 10 -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 12 -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 14 -; CHECK-NEXT: [[TMP68:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16 -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 18 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 20 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 22 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 2 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 4 +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 6 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 8 +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 10 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 12 +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 14 +; CHECK-NEXT: [[TMP68:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 16 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 18 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 20 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 22 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i8>, ptr [[NEXT_GEP]], align 1 ; CHECK-NEXT: [[WIDE_LOAD25:%.*]] = load <2 x i8>, ptr [[TMP12]], align 1 ; CHECK-NEXT: [[WIDE_LOAD26:%.*]] = load <2 x i8>, ptr [[TMP13]], align 1 @@ -193,6 +193,7 @@ define i1 @select_exit_cond(ptr %start, ptr %end, i64 %N) { ; CHECK-NEXT: [[CMP_I166_I:%.*]] = icmp ult ptr [[PTR_IV]], [[END]] ; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i64 [[IV]], [[N]] ; CHECK-NEXT: [[AND:%.*]] = select i1 [[CMP_I166_I]], i1 [[CMP2]], i1 false +; CHECK-NEXT: br i1 [[AND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i64 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP52]], %[[MIDDLE_BLOCK]] ], [ [[TMP55]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: [[RES:%.*]] = icmp eq i64 [[RED_NEXT_LCSSA]], 0 @@ -226,4 +227,6 @@ exit: ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK: [[PROF3]] = !{!"branch_weights", i32 2, i32 22} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} +; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll index 7677c9666455a..f1fbf1dd5d942 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll @@ -22,13 +22,13 @@ define void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 %N) { ; VF-TWO-CHECK: [[VECTOR_BODY]]: ; VF-TWO-CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; VF-TWO-CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[INDEX]] -; VF-TWO-CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 4 -; VF-TWO-CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 8 -; VF-TWO-CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 12 -; VF-TWO-CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 16 -; VF-TWO-CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 20 -; VF-TWO-CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 24 -; VF-TWO-CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 28 +; VF-TWO-CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 4 +; VF-TWO-CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 8 +; VF-TWO-CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 12 +; VF-TWO-CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 16 +; VF-TWO-CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 20 +; VF-TWO-CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 24 +; VF-TWO-CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 28 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP8]], align 4 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP17]], align 4 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP18]], align 4 @@ -38,13 +38,13 @@ define void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 %N) { ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP22]], align 4 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP23]], align 4 ; VF-TWO-CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[INDEX]] -; VF-TWO-CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 4 -; VF-TWO-CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 8 -; VF-TWO-CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 12 -; VF-TWO-CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 16 -; VF-TWO-CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 20 -; VF-TWO-CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 24 -; VF-TWO-CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 28 +; VF-TWO-CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 4 +; VF-TWO-CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 8 +; VF-TWO-CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 12 +; VF-TWO-CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 16 +; VF-TWO-CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 20 +; VF-TWO-CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 24 +; VF-TWO-CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 28 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x float>, ptr [[TMP24]], align 4 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP33]], align 4 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD11:%.*]] = load <4 x float>, ptr [[TMP34]], align 4 @@ -62,13 +62,13 @@ define void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 %N) { ; VF-TWO-CHECK-NEXT: [[TMP46:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD15]] ; VF-TWO-CHECK-NEXT: [[TMP47:%.*]] = fadd fast <4 x float> [[WIDE_LOAD8]], [[WIDE_LOAD16]] ; VF-TWO-CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[INDEX]] -; VF-TWO-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 4 -; VF-TWO-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 8 -; VF-TWO-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 12 -; VF-TWO-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 16 -; VF-TWO-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 20 -; VF-TWO-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 24 -; VF-TWO-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 28 +; VF-TWO-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 4 +; VF-TWO-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 8 +; VF-TWO-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 12 +; VF-TWO-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 16 +; VF-TWO-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 20 +; VF-TWO-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 24 +; VF-TWO-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 28 ; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP40]], ptr [[TMP48]], align 4 ; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP41]], ptr [[TMP57]], align 4 ; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP42]], ptr [[TMP58]], align 4 @@ -124,13 +124,13 @@ define void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 %N) { ; VF-FOUR-CHECK: [[VECTOR_BODY]]: ; VF-FOUR-CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; VF-FOUR-CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[INDEX]] -; VF-FOUR-CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 4 -; VF-FOUR-CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 8 -; VF-FOUR-CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 12 -; VF-FOUR-CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 16 -; VF-FOUR-CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 20 -; VF-FOUR-CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 24 -; VF-FOUR-CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 28 +; VF-FOUR-CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 4 +; VF-FOUR-CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 8 +; VF-FOUR-CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 12 +; VF-FOUR-CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 16 +; VF-FOUR-CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 20 +; VF-FOUR-CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 24 +; VF-FOUR-CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 28 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP8]], align 4 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP17]], align 4 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP18]], align 4 @@ -140,13 +140,13 @@ define void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 %N) { ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP22]], align 4 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP23]], align 4 ; VF-FOUR-CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[INDEX]] -; VF-FOUR-CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 4 -; VF-FOUR-CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 8 -; VF-FOUR-CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 12 -; VF-FOUR-CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 16 -; VF-FOUR-CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 20 -; VF-FOUR-CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 24 -; VF-FOUR-CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 28 +; VF-FOUR-CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 4 +; VF-FOUR-CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 8 +; VF-FOUR-CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 12 +; VF-FOUR-CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 16 +; VF-FOUR-CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 20 +; VF-FOUR-CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 24 +; VF-FOUR-CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 28 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x float>, ptr [[TMP24]], align 4 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP33]], align 4 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD11:%.*]] = load <4 x float>, ptr [[TMP34]], align 4 @@ -164,13 +164,13 @@ define void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 %N) { ; VF-FOUR-CHECK-NEXT: [[TMP46:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD15]] ; VF-FOUR-CHECK-NEXT: [[TMP47:%.*]] = fadd fast <4 x float> [[WIDE_LOAD8]], [[WIDE_LOAD16]] ; VF-FOUR-CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[INDEX]] -; VF-FOUR-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 4 -; VF-FOUR-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 8 -; VF-FOUR-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 12 -; VF-FOUR-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 16 -; VF-FOUR-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 20 -; VF-FOUR-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 24 -; VF-FOUR-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 28 +; VF-FOUR-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 4 +; VF-FOUR-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 8 +; VF-FOUR-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 12 +; VF-FOUR-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 16 +; VF-FOUR-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 20 +; VF-FOUR-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 24 +; VF-FOUR-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 28 ; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP40]], ptr [[TMP48]], align 4 ; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP41]], ptr [[TMP57]], align 4 ; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP42]], ptr [[TMP58]], align 4 @@ -261,22 +261,22 @@ define void @f2(ptr noalias %A, ptr noalias %B, i32 %n) { ; VF-TWO-CHECK-NEXT: [[TMP32:%.*]] = add i32 [[TMP24]], [[N]] ; VF-TWO-CHECK-NEXT: [[TMP40:%.*]] = sext i32 [[TMP32]] to i64 ; VF-TWO-CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP40]] -; VF-TWO-CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 0 -; VF-TWO-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP56]], i32 -3 -; VF-TWO-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -4 -; VF-TWO-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP58]], i32 -3 -; VF-TWO-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -8 -; VF-TWO-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP60]], i32 -3 -; VF-TWO-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -12 -; VF-TWO-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP62]], i32 -3 -; VF-TWO-CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -16 -; VF-TWO-CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds float, ptr [[TMP64]], i32 -3 -; VF-TWO-CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -20 -; VF-TWO-CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds float, ptr [[TMP66]], i32 -3 -; VF-TWO-CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -24 -; VF-TWO-CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds float, ptr [[TMP68]], i32 -3 -; VF-TWO-CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -28 -; VF-TWO-CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds float, ptr [[TMP70]], i32 -3 +; VF-TWO-CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 0 +; VF-TWO-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 -3 +; VF-TWO-CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -4 +; VF-TWO-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 -3 +; VF-TWO-CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -8 +; VF-TWO-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 -3 +; VF-TWO-CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -12 +; VF-TWO-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 -3 +; VF-TWO-CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -16 +; VF-TWO-CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 -3 +; VF-TWO-CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -20 +; VF-TWO-CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i64 -3 +; VF-TWO-CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -24 +; VF-TWO-CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds float, ptr [[TMP25]], i64 -3 +; VF-TWO-CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -28 +; VF-TWO-CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 -3 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP57]], align 4 ; VF-TWO-CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x float> [[WIDE_LOAD]], <4 x float> poison, <4 x i32> ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP59]], align 4 @@ -302,13 +302,13 @@ define void @f2(ptr noalias %A, ptr noalias %B, i32 %n) { ; VF-TWO-CHECK-NEXT: [[TMP78:%.*]] = fadd fast <4 x float> [[REVERSE13]], splat (float 1.000000e+00) ; VF-TWO-CHECK-NEXT: [[TMP79:%.*]] = fadd fast <4 x float> [[REVERSE15]], splat (float 1.000000e+00) ; VF-TWO-CHECK-NEXT: [[TMP80:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; VF-TWO-CHECK-NEXT: [[TMP89:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 4 -; VF-TWO-CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 8 -; VF-TWO-CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 12 -; VF-TWO-CHECK-NEXT: [[TMP92:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 16 -; VF-TWO-CHECK-NEXT: [[TMP93:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 20 -; VF-TWO-CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 24 -; VF-TWO-CHECK-NEXT: [[TMP95:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 28 +; VF-TWO-CHECK-NEXT: [[TMP89:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 4 +; VF-TWO-CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 8 +; VF-TWO-CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 12 +; VF-TWO-CHECK-NEXT: [[TMP92:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 16 +; VF-TWO-CHECK-NEXT: [[TMP93:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 20 +; VF-TWO-CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 24 +; VF-TWO-CHECK-NEXT: [[TMP95:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 28 ; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP72]], ptr [[TMP80]], align 4 ; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP73]], ptr [[TMP89]], align 4 ; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP74]], ptr [[TMP90]], align 4 @@ -340,8 +340,8 @@ define void @f2(ptr noalias %A, ptr noalias %B, i32 %n) { ; VF-TWO-CHECK-NEXT: [[TMP100:%.*]] = add i32 [[TMP99]], [[N]] ; VF-TWO-CHECK-NEXT: [[TMP101:%.*]] = sext i32 [[TMP100]] to i64 ; VF-TWO-CHECK-NEXT: [[TMP102:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP101]] -; VF-TWO-CHECK-NEXT: [[TMP103:%.*]] = getelementptr inbounds float, ptr [[TMP102]], i32 0 -; VF-TWO-CHECK-NEXT: [[TMP104:%.*]] = getelementptr inbounds float, ptr [[TMP103]], i32 -1 +; VF-TWO-CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds float, ptr [[TMP102]], i64 0 +; VF-TWO-CHECK-NEXT: [[TMP104:%.*]] = getelementptr inbounds float, ptr [[TMP50]], i64 -1 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD23:%.*]] = load <2 x float>, ptr [[TMP104]], align 4 ; VF-TWO-CHECK-NEXT: [[REVERSE24:%.*]] = shufflevector <2 x float> [[WIDE_LOAD23]], <2 x float> poison, <2 x i32> ; VF-TWO-CHECK-NEXT: [[TMP105:%.*]] = fadd fast <2 x float> [[REVERSE24]], splat (float 1.000000e+00) @@ -384,22 +384,22 @@ define void @f2(ptr noalias %A, ptr noalias %B, i32 %n) { ; VF-FOUR-CHECK-NEXT: [[TMP32:%.*]] = add i32 [[TMP24]], [[N]] ; VF-FOUR-CHECK-NEXT: [[TMP40:%.*]] = sext i32 [[TMP32]] to i64 ; VF-FOUR-CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP40]] -; VF-FOUR-CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 0 -; VF-FOUR-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP56]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -4 -; VF-FOUR-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP58]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -8 -; VF-FOUR-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP60]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -12 -; VF-FOUR-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP62]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -16 -; VF-FOUR-CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds float, ptr [[TMP64]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -20 -; VF-FOUR-CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds float, ptr [[TMP66]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -24 -; VF-FOUR-CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds float, ptr [[TMP68]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -28 -; VF-FOUR-CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds float, ptr [[TMP70]], i32 -3 +; VF-FOUR-CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 0 +; VF-FOUR-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 -3 +; VF-FOUR-CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -4 +; VF-FOUR-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 -3 +; VF-FOUR-CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -8 +; VF-FOUR-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 -3 +; VF-FOUR-CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -12 +; VF-FOUR-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 -3 +; VF-FOUR-CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -16 +; VF-FOUR-CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 -3 +; VF-FOUR-CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -20 +; VF-FOUR-CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i64 -3 +; VF-FOUR-CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -24 +; VF-FOUR-CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds float, ptr [[TMP25]], i64 -3 +; VF-FOUR-CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -28 +; VF-FOUR-CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 -3 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP57]], align 4 ; VF-FOUR-CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x float> [[WIDE_LOAD]], <4 x float> poison, <4 x i32> ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP59]], align 4 @@ -425,13 +425,13 @@ define void @f2(ptr noalias %A, ptr noalias %B, i32 %n) { ; VF-FOUR-CHECK-NEXT: [[TMP78:%.*]] = fadd fast <4 x float> [[REVERSE13]], splat (float 1.000000e+00) ; VF-FOUR-CHECK-NEXT: [[TMP79:%.*]] = fadd fast <4 x float> [[REVERSE15]], splat (float 1.000000e+00) ; VF-FOUR-CHECK-NEXT: [[TMP80:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; VF-FOUR-CHECK-NEXT: [[TMP89:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 4 -; VF-FOUR-CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 8 -; VF-FOUR-CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 12 -; VF-FOUR-CHECK-NEXT: [[TMP92:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 16 -; VF-FOUR-CHECK-NEXT: [[TMP93:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 20 -; VF-FOUR-CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 24 -; VF-FOUR-CHECK-NEXT: [[TMP95:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 28 +; VF-FOUR-CHECK-NEXT: [[TMP89:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 4 +; VF-FOUR-CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 8 +; VF-FOUR-CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 12 +; VF-FOUR-CHECK-NEXT: [[TMP92:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 16 +; VF-FOUR-CHECK-NEXT: [[TMP93:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 20 +; VF-FOUR-CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 24 +; VF-FOUR-CHECK-NEXT: [[TMP95:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 28 ; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP72]], ptr [[TMP80]], align 4 ; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP73]], ptr [[TMP89]], align 4 ; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP74]], ptr [[TMP90]], align 4 @@ -463,8 +463,8 @@ define void @f2(ptr noalias %A, ptr noalias %B, i32 %n) { ; VF-FOUR-CHECK-NEXT: [[TMP100:%.*]] = add i32 [[TMP99]], [[N]] ; VF-FOUR-CHECK-NEXT: [[TMP101:%.*]] = sext i32 [[TMP100]] to i64 ; VF-FOUR-CHECK-NEXT: [[TMP102:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP101]] -; VF-FOUR-CHECK-NEXT: [[TMP103:%.*]] = getelementptr inbounds float, ptr [[TMP102]], i32 0 -; VF-FOUR-CHECK-NEXT: [[TMP104:%.*]] = getelementptr inbounds float, ptr [[TMP103]], i32 -3 +; VF-FOUR-CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds float, ptr [[TMP102]], i64 0 +; VF-FOUR-CHECK-NEXT: [[TMP104:%.*]] = getelementptr inbounds float, ptr [[TMP50]], i64 -3 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD23:%.*]] = load <4 x float>, ptr [[TMP104]], align 4 ; VF-FOUR-CHECK-NEXT: [[REVERSE24:%.*]] = shufflevector <4 x float> [[WIDE_LOAD23]], <4 x float> poison, <4 x i32> ; VF-FOUR-CHECK-NEXT: [[TMP105:%.*]] = fadd fast <4 x float> [[REVERSE24]], splat (float 1.000000e+00) diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll index d82a3cde4639a..dc9c154b3fe05 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll @@ -34,13 +34,13 @@ define void @test(ptr %arr, i32 %len) { ; CHECK-NEXT: [[VEC_PHI7:%.*]] = phi <2 x double> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP18:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI8:%.*]] = phi <2 x double> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP19:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds double, ptr [[ARR]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i32 2 -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i32 4 -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i32 6 -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i32 8 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i32 10 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i32 12 -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i32 14 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i64 2 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i64 4 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i64 6 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i64 8 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i64 10 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i64 12 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i64 14 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP3]], align 8 ; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <2 x double>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <2 x double>, ptr [[TMP6]], align 8 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll index 9f6f79d9030ed..9daf4236982bd 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll @@ -69,51 +69,48 @@ exit: define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) { ; CHECK-LABEL: define i8 @dead_live_out_due_to_scalar_epilogue_required( ; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DST:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 2 -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.umax.i32(i32 [[TMP1]], i32 6) -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 252, [[TMP2]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 1005 ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SRC]], i64 1005 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP1]] ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] -; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 252, [[TMP4]] -; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 -; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 [[N_MOD_VF]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 252, [[TMP6]] -; CHECK-NEXT: [[IND_END:%.*]] = mul i32 [[N_VEC]], 4 -; CHECK-NEXT: [[TMP9:%.*]] = call @llvm.stepvector.nxv4i32() -; CHECK-NEXT: [[TMP11:%.*]] = mul [[TMP9]], splat (i32 4) -; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP11]] -; CHECK-NEXT: [[TMP14:%.*]] = mul i32 4, [[TMP4]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i32 [[TMP14]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP0:%.*]] = call @llvm.stepvector.nxv16i32() +; CHECK-NEXT: [[TMP1:%.*]] = mul [[TMP0]], splat (i32 4) +; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP1]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP15:%.*]] = sext [[VEC_IND]] to -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[DST]], [[TMP15]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i8.nxv4p0( zeroinitializer, align 1 [[TMP16]], splat (i1 true)), !alias.scope [[META3:![0-9]+]], !noalias [[META6:![0-9]+]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 252, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 16, i1 true) +; CHECK-NEXT: [[TMP3:%.*]] = mul i32 4, [[TMP2]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[TMP3]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP9:%.*]] = sext [[VEC_IND]] to +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[SRC]], [[TMP9]] +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv16i8.nxv16p0( align 1 [[TMP6]], splat (i1 true), i32 [[TMP2]]), !alias.scope [[META3:![0-9]+]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[DST]], [[TMP9]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0( zeroinitializer, align 1 [[TMP7]], splat (i1 true), i32 [[TMP2]]), !alias.scope [[META6:![0-9]+]], !noalias [[META3]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP2]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], 1 +; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 16 +; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP13]], 0 +; CHECK-NEXT: [[TMP15:%.*]] = extractelement [[WIDE_MASKED_GATHER]], i64 [[TMP11]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[IV]] to i64 ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IDXPROM]] ; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[GEP_SRC]], align 1 @@ -121,9 +118,9 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) { ; CHECK-NEXT: store i8 0, ptr [[GEP_DST]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 4 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV]], 1001 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[R:%.*]] = phi i8 [ [[L]], %[[LOOP]] ] +; CHECK-NEXT: [[R:%.*]] = phi i8 [ [[L]], %[[LOOP]] ], [ [[TMP15]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i8 [[R]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll index 8e71718061c9b..e4ba6fe9d757d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll @@ -639,73 +639,46 @@ for.end: define i32 @udiv_sdiv_with_invariant_divisors(i8 %x, i16 %y, i1 %c) { ; CHECK-LABEL: @udiv_sdiv_with_invariant_divisors( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 1 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 12, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 12, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 12, [[N_MOD_VF]] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i16 [[Y:%.*]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i8 [[X:%.*]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer -; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i16 -; CHECK-NEXT: [[TMP4:%.*]] = add i16 -12, [[DOTCAST]] -; CHECK-NEXT: [[DOTCAST5:%.*]] = trunc i32 [[N_VEC]] to i8 -; CHECK-NEXT: [[TMP5:%.*]] = add i8 -12, [[DOTCAST5]] -; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[C:%.*]], splat (i8 1), [[BROADCAST_SPLAT2]] -; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[C]], splat (i16 1), [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP8:%.*]] = call @llvm.stepvector.nxv2i8() -; CHECK-NEXT: [[TMP9:%.*]] = mul [[TMP8]], splat (i8 1) -; CHECK-NEXT: [[INDUCTION:%.*]] = add splat (i8 -12), [[TMP9]] -; CHECK-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP3]] to i8 -; CHECK-NEXT: [[BROADCAST_SPLATINSERT6:%.*]] = insertelement poison, i8 [[TMP10]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT7:%.*]] = shufflevector [[BROADCAST_SPLATINSERT6]], poison, zeroinitializer +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i1 [[C:%.*]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP0:%.*]] = xor [[BROADCAST_SPLAT]], splat (i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i8 [[X:%.*]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement poison, i16 [[Y:%.*]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector [[BROADCAST_SPLATINSERT3]], poison, zeroinitializer +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.stepvector.nxv8i8() +; CHECK-NEXT: [[TMP2:%.*]] = mul [[TMP1]], splat (i8 1) +; CHECK-NEXT: [[INDUCTION:%.*]] = add splat (i8 -12), [[TMP2]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP11:%.*]] = udiv [[VEC_IND]], [[TMP6]] -; CHECK-NEXT: [[TMP12:%.*]] = zext [[TMP11]] to -; CHECK-NEXT: [[TMP13:%.*]] = sdiv [[TMP12]], [[TMP7]] -; CHECK-NEXT: [[TMP14:%.*]] = sext [[TMP13]] to -; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[C]], zeroinitializer, [[TMP14]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT7]] -; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 12, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 8, i1 true) +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement poison, i8 [[TMP4]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector [[BROADCAST_SPLATINSERT5]], poison, zeroinitializer +; CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vp.merge.nxv8i8( [[TMP0]], [[BROADCAST_SPLAT2]], splat (i8 1), i32 [[TMP3]]) +; CHECK-NEXT: [[TMP9:%.*]] = udiv [[VEC_IND]], [[TMP8]] +; CHECK-NEXT: [[TMP10:%.*]] = zext [[TMP9]] to +; CHECK-NEXT: [[TMP11:%.*]] = call @llvm.vp.merge.nxv8i16( [[TMP0]], [[BROADCAST_SPLAT4]], splat (i16 1), i32 [[TMP3]]) +; CHECK-NEXT: [[TMP12:%.*]] = sdiv [[TMP10]], [[TMP11]] +; CHECK-NEXT: [[TMP13:%.*]] = sext [[TMP12]] to +; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[C]], zeroinitializer, [[TMP13]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP3]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT6]] +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP16:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP17:%.*]] = mul nuw i32 [[TMP16]], 2 -; CHECK-NEXT: [[TMP18:%.*]] = sub i32 [[TMP17]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = extractelement [[PREDPHI]], i32 [[TMP18]] -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 12, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] -; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ [[TMP4]], [[MIDDLE_BLOCK]] ], [ -12, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_RESUME_VAL8:%.*]] = phi i8 [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ -12, [[ENTRY]] ] -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[NARROW_IV:%.*]] = phi i8 [ [[BC_RESUME_VAL8]], [[SCALAR_PH]] ], [ [[IV_NEXT_TRUNC:%.*]], [[LOOP_LATCH]] ] -; CHECK-NEXT: br i1 [[C]], label [[LOOP_LATCH]], label [[THEN:%.*]] -; CHECK: then: -; CHECK-NEXT: [[UD:%.*]] = udiv i8 [[NARROW_IV]], [[X]] -; CHECK-NEXT: [[UD_EXT:%.*]] = zext i8 [[UD]] to i16 -; CHECK-NEXT: [[SD:%.*]] = sdiv i16 [[UD_EXT]], [[Y]] -; CHECK-NEXT: [[SD_EXT:%.*]] = sext i16 [[SD]] to i32 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ 0, [[LOOP_HEADER]] ], [ [[SD_EXT]], [[THEN]] ] -; CHECK-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; CHECK-NEXT: [[IV_NEXT_TRUNC]] = trunc i16 [[IV_NEXT]] to i8 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP3]] to i64 +; CHECK-NEXT: [[TMP17:%.*]] = sub i64 [[TMP16]], 1 +; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 8 +; CHECK-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 0 +; CHECK-NEXT: [[MERGE_LCSSA:%.*]] = extractelement [[PREDPHI]], i64 [[TMP17]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: -; CHECK-NEXT: [[MERGE_LCSSA:%.*]] = phi i32 [ [[MERGE]], [[LOOP_LATCH]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[MERGE_LCSSA]] ; ; FIXED-LABEL: @udiv_sdiv_with_invariant_divisors( diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll b/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll index 7eb3d7fc5a36d..e35db479dc963 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll @@ -7,55 +7,49 @@ target triple = "riscv64-unknown-linux-gnu" define i64 @pr97452_scalable_vf1_for(ptr %src, ptr noalias %dst) #0 { ; CHECK-LABEL: define i64 @pr97452_scalable_vf1_for( ; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 23, [[TMP0]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 23, [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 23, [[N_MOD_VF]] +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[TMP3]], 1 -; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement poison, i64 0, i32 [[TMP4]] +; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 2 +; CHECK-NEXT: [[TMP5:%.*]] = sub i32 [[TMP4]], 1 +; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement poison, i64 0, i32 [[TMP5]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD]] = load , ptr [[TMP5]], align 8 -; CHECK-NEXT: [[TMP7:%.*]] = call @llvm.vector.splice.nxv1i64( [[VECTOR_RECUR]], [[WIDE_LOAD]], i32 -1) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: store [[TMP7]], ptr [[TMP8]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[VP_OP_LOAD:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 23, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP6]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[EVL_BASED_IV]] +; CHECK-NEXT: [[VP_OP_LOAD]] = call @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP9]], splat (i1 true), i32 [[TMP6]]) +; CHECK-NEXT: [[TMP10:%.*]] = call @llvm.experimental.vp.splice.nxv2i64( [[VECTOR_RECUR]], [[VP_OP_LOAD]], i32 -1, splat (i1 true), i32 [[PREV_EVL]], i32 [[TMP6]]) +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[EVL_BASED_IV]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[TMP10]], ptr align 8 [[TMP11]], splat (i1 true), i32 [[TMP6]]) +; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP6]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP12]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] +; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP12:%.*]] = sub i32 [[TMP11]], 1 -; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement [[WIDE_LOAD]], i32 [[TMP12]] -; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP15:%.*]] = sub i32 [[TMP14]], 1 -; CHECK-NEXT: [[TMP16:%.*]] = extractelement [[TMP7]], i32 [[TMP15]] -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 23, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] -; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[FOR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[L:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L]] = load i64, ptr [[GEP_SRC]], align 8 -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i64 [[FOR]], ptr [[GEP_DST]], align 8 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 22 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: [[TMP15:%.*]] = sub i64 [[TMP12]], 1 +; CHECK-NEXT: [[TMP16:%.*]] = sub i64 [[TMP15]], 1 +; CHECK-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 2 +; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 0 +; CHECK-NEXT: [[TMP20:%.*]] = extractelement [[VP_OP_LOAD]], i64 [[TMP16]] +; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP22:%.*]] = mul nuw i32 [[TMP21]], 2 +; CHECK-NEXT: [[TMP23:%.*]] = sub i32 [[TMP22]], 1 +; CHECK-NEXT: [[TMP24:%.*]] = extractelement [[VECTOR_RECUR]], i32 [[TMP23]] +; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[TMP15]], 0 +; CHECK-NEXT: [[TMP26:%.*]] = select i1 [[TMP25]], i64 [[TMP24]], i64 [[TMP20]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[FOR]], %[[LOOP]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 [[TMP26]] ; entry: br label %loop @@ -81,5 +75,4 @@ attributes #0 = { "target-features"="+64bit,+v,+zvl128b,+zvl256b" } ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll index 877484f5159fd..36ebd422b5d7b 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll @@ -219,3 +219,119 @@ loop: exit: ret void } + +; Test for https://github.com/llvm/llvm-project/issues/169948. +define i8 @mixed_gather_scatters(ptr %A, ptr %B, ptr %C) #0 { +; RVA23-LABEL: @mixed_gather_scatters( +; RVA23-NEXT: entry: +; RVA23-NEXT: br label [[VECTOR_PH:%.*]] +; RVA23: vector.ph: +; RVA23-NEXT: br label [[VECTOR_BODY:%.*]] +; RVA23: vector.body: +; RVA23-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] +; RVA23-NEXT: [[AVL:%.*]] = phi i32 [ 10, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; RVA23-NEXT: [[TMP0:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 2, i1 true) +; RVA23-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A:%.*]], align 8 +; RVA23-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[TMP1]], i64 0 +; RVA23-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; RVA23-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv2i64.nxv2p0( align 8 [[BROADCAST_SPLAT]], splat (i1 true), i32 [[TMP0]]) +; RVA23-NEXT: [[TMP2:%.*]] = icmp sgt [[WIDE_MASKED_GATHER]], zeroinitializer +; RVA23-NEXT: [[TMP3:%.*]] = zext [[TMP2]] to +; RVA23-NEXT: [[TMP4:%.*]] = or [[VEC_PHI]], [[TMP3]] +; RVA23-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B:%.*]], align 8 +; RVA23-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, ptr [[TMP5]], i64 0 +; RVA23-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; RVA23-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call @llvm.vp.gather.nxv2i64.nxv2p0( align 8 [[BROADCAST_SPLAT2]], splat (i1 true), i32 [[TMP0]]) +; RVA23-NEXT: [[TMP6:%.*]] = icmp sgt [[WIDE_MASKED_GATHER3]], zeroinitializer +; RVA23-NEXT: [[TMP7:%.*]] = zext [[TMP6]] to +; RVA23-NEXT: [[TMP8:%.*]] = or [[TMP4]], [[TMP7]] +; RVA23-NEXT: [[TMP9:%.*]] = or [[TMP8]], splat (i8 1) +; RVA23-NEXT: [[TMP10:%.*]] = load ptr, ptr [[C:%.*]], align 8 +; RVA23-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement poison, ptr [[TMP10]], i64 0 +; RVA23-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector [[BROADCAST_SPLATINSERT4]], poison, zeroinitializer +; RVA23-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call @llvm.vp.gather.nxv2i64.nxv2p0( align 8 [[BROADCAST_SPLAT5]], splat (i1 true), i32 [[TMP0]]) +; RVA23-NEXT: [[TMP11:%.*]] = icmp sgt [[WIDE_MASKED_GATHER6]], zeroinitializer +; RVA23-NEXT: [[TMP12:%.*]] = zext [[TMP11]] to +; RVA23-NEXT: [[TMP13:%.*]] = or [[TMP9]], [[TMP12]] +; RVA23-NEXT: [[TMP14]] = call @llvm.vp.merge.nxv2i8( splat (i1 true), [[TMP13]], [[VEC_PHI]], i32 [[TMP0]]) +; RVA23-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP0]] +; RVA23-NEXT: [[TMP15:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 +; RVA23-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; RVA23: middle.block: +; RVA23-NEXT: [[TMP16:%.*]] = call i8 @llvm.vector.reduce.or.nxv2i8( [[TMP14]]) +; RVA23-NEXT: br label [[EXIT:%.*]] +; RVA23: exit: +; RVA23-NEXT: ret i8 [[TMP16]] +; +; RVA23ZVL1024B-LABEL: @mixed_gather_scatters( +; RVA23ZVL1024B-NEXT: entry: +; RVA23ZVL1024B-NEXT: br label [[VECTOR_PH:%.*]] +; RVA23ZVL1024B: vector.ph: +; RVA23ZVL1024B-NEXT: br label [[VECTOR_BODY:%.*]] +; RVA23ZVL1024B: vector.body: +; RVA23ZVL1024B-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] +; RVA23ZVL1024B-NEXT: [[AVL:%.*]] = phi i32 [ 10, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; RVA23ZVL1024B-NEXT: [[TMP0:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 1, i1 true) +; RVA23ZVL1024B-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A:%.*]], align 8 +; RVA23ZVL1024B-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[TMP1]], i64 0 +; RVA23ZVL1024B-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; RVA23ZVL1024B-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv1i64.nxv1p0( align 8 [[BROADCAST_SPLAT]], splat (i1 true), i32 [[TMP0]]) +; RVA23ZVL1024B-NEXT: [[TMP2:%.*]] = icmp sgt [[WIDE_MASKED_GATHER]], zeroinitializer +; RVA23ZVL1024B-NEXT: [[TMP3:%.*]] = zext [[TMP2]] to +; RVA23ZVL1024B-NEXT: [[TMP4:%.*]] = or [[VEC_PHI]], [[TMP3]] +; RVA23ZVL1024B-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B:%.*]], align 8 +; RVA23ZVL1024B-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, ptr [[TMP5]], i64 0 +; RVA23ZVL1024B-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; RVA23ZVL1024B-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call @llvm.vp.gather.nxv1i64.nxv1p0( align 8 [[BROADCAST_SPLAT2]], splat (i1 true), i32 [[TMP0]]) +; RVA23ZVL1024B-NEXT: [[TMP6:%.*]] = icmp sgt [[WIDE_MASKED_GATHER3]], zeroinitializer +; RVA23ZVL1024B-NEXT: [[TMP7:%.*]] = zext [[TMP6]] to +; RVA23ZVL1024B-NEXT: [[TMP8:%.*]] = or [[TMP4]], [[TMP7]] +; RVA23ZVL1024B-NEXT: [[TMP9:%.*]] = or [[TMP8]], splat (i8 1) +; RVA23ZVL1024B-NEXT: [[TMP10:%.*]] = load ptr, ptr [[C:%.*]], align 8 +; RVA23ZVL1024B-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement poison, ptr [[TMP10]], i64 0 +; RVA23ZVL1024B-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector [[BROADCAST_SPLATINSERT4]], poison, zeroinitializer +; RVA23ZVL1024B-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call @llvm.vp.gather.nxv1i64.nxv1p0( align 8 [[BROADCAST_SPLAT5]], splat (i1 true), i32 [[TMP0]]) +; RVA23ZVL1024B-NEXT: [[TMP11:%.*]] = icmp sgt [[WIDE_MASKED_GATHER6]], zeroinitializer +; RVA23ZVL1024B-NEXT: [[TMP12:%.*]] = zext [[TMP11]] to +; RVA23ZVL1024B-NEXT: [[TMP13:%.*]] = or [[TMP9]], [[TMP12]] +; RVA23ZVL1024B-NEXT: [[TMP14]] = call @llvm.vp.merge.nxv1i8( splat (i1 true), [[TMP13]], [[VEC_PHI]], i32 [[TMP0]]) +; RVA23ZVL1024B-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP0]] +; RVA23ZVL1024B-NEXT: [[TMP15:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 +; RVA23ZVL1024B-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; RVA23ZVL1024B: middle.block: +; RVA23ZVL1024B-NEXT: [[TMP16:%.*]] = call i8 @llvm.vector.reduce.or.nxv1i8( [[TMP14]]) +; RVA23ZVL1024B-NEXT: br label [[EXIT:%.*]] +; RVA23ZVL1024B: exit: +; RVA23ZVL1024B-NEXT: ret i8 [[TMP16]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %accum = phi i8 [ 0, %entry ], [ %or.4, %loop ] + %ptr.0 = load ptr, ptr %A, align 8 + %val.0 = load i64, ptr %ptr.0, align 8 + %cmp.0 = icmp sgt i64 %val.0, 0 + %ext.0 = zext i1 %cmp.0 to i8 + %or.0 = or i8 %accum, %ext.0 + %ptr.1 = load ptr, ptr %B, align 8 + %val.1 = load i64, ptr %ptr.1, align 8 + %cmp.1 = icmp sgt i64 %val.1, 0 + %ext.1 = zext i1 %cmp.1 to i8 + %or.1 = or i8 %or.0, %ext.1 + %or.2 = or i8 %or.1, 1 + %ptr.4 = load ptr, ptr %C, align 8 + %val.4 = load i64, ptr %ptr.4, align 8 + %cmp.4 = icmp sgt i64 %val.4, 0 + %ext.4 = zext i1 %cmp.4 to i8 + %or.4 = or i8 %or.2, %ext.4 + %iv.next = add i32 %iv, 1 + %exitcond = icmp eq i32 %iv, 9 + br i1 %exitcond, label %exit, label %loop + +exit: + ret i8 %or.4 +} + +attributes #0 = { "target-features"="+zve64x,+zvl256b" } diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll index 8d3026e63748a..1ae1ba6795c01 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll @@ -87,13 +87,13 @@ define i32 @vqdot(ptr %a, ptr %b) #0 { ; FIXED-V-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] ; FIXED-V-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; FIXED-V-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; FIXED-V-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8 +; FIXED-V-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8 ; FIXED-V-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 ; FIXED-V-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1 ; FIXED-V-NEXT: [[TMP3:%.*]] = sext <8 x i8> [[WIDE_LOAD]] to <8 x i32> ; FIXED-V-NEXT: [[TMP4:%.*]] = sext <8 x i8> [[WIDE_LOAD2]] to <8 x i32> ; FIXED-V-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; FIXED-V-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; FIXED-V-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8 ; FIXED-V-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1 ; FIXED-V-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1 ; FIXED-V-NEXT: [[TMP8:%.*]] = sext <8 x i8> [[WIDE_LOAD3]] to <8 x i32> @@ -123,11 +123,11 @@ define i32 @vqdot(ptr %a, ptr %b) #0 { ; FIXED-ZVQDOTQ-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; FIXED-ZVQDOTQ-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; FIXED-ZVQDOTQ-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8 +; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[TMP8:%.*]] = sext <8 x i8> [[WIDE_LOAD3]] to <8 x i32> @@ -280,13 +280,13 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 { ; FIXED-V-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] ; FIXED-V-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; FIXED-V-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; FIXED-V-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8 +; FIXED-V-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8 ; FIXED-V-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 ; FIXED-V-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1 ; FIXED-V-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[WIDE_LOAD]] to <8 x i32> ; FIXED-V-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[WIDE_LOAD2]] to <8 x i32> ; FIXED-V-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; FIXED-V-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; FIXED-V-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8 ; FIXED-V-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1 ; FIXED-V-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1 ; FIXED-V-NEXT: [[TMP8:%.*]] = zext <8 x i8> [[WIDE_LOAD3]] to <8 x i32> @@ -316,11 +316,11 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 { ; FIXED-ZVQDOTQ-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; FIXED-ZVQDOTQ-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; FIXED-ZVQDOTQ-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8 +; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[TMP8:%.*]] = zext <8 x i8> [[WIDE_LOAD3]] to <8 x i32> @@ -473,13 +473,13 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 { ; FIXED-V-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] ; FIXED-V-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; FIXED-V-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; FIXED-V-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8 +; FIXED-V-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8 ; FIXED-V-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 ; FIXED-V-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1 ; FIXED-V-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[WIDE_LOAD]] to <8 x i32> ; FIXED-V-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[WIDE_LOAD2]] to <8 x i32> ; FIXED-V-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; FIXED-V-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; FIXED-V-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8 ; FIXED-V-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1 ; FIXED-V-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1 ; FIXED-V-NEXT: [[TMP8:%.*]] = sext <8 x i8> [[WIDE_LOAD3]] to <8 x i32> @@ -509,11 +509,11 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 { ; FIXED-ZVQDOTQ-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; FIXED-ZVQDOTQ-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; FIXED-ZVQDOTQ-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8 +; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = sext <8 x i8> [[WIDE_LOAD3]] to <8 x i32> @@ -665,13 +665,13 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 { ; FIXED-V-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] ; FIXED-V-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; FIXED-V-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; FIXED-V-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8 +; FIXED-V-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8 ; FIXED-V-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 ; FIXED-V-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1 ; FIXED-V-NEXT: [[TMP3:%.*]] = sext <8 x i8> [[WIDE_LOAD]] to <8 x i32> ; FIXED-V-NEXT: [[TMP4:%.*]] = sext <8 x i8> [[WIDE_LOAD2]] to <8 x i32> ; FIXED-V-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; FIXED-V-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; FIXED-V-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8 ; FIXED-V-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1 ; FIXED-V-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1 ; FIXED-V-NEXT: [[TMP8:%.*]] = zext <8 x i8> [[WIDE_LOAD3]] to <8 x i32> @@ -701,11 +701,11 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 { ; FIXED-ZVQDOTQ-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; FIXED-ZVQDOTQ-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; FIXED-ZVQDOTQ-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8 +; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = zext <8 x i8> [[WIDE_LOAD3]] to <8 x i32> diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pointer-induction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pointer-induction.ll index fa710cb8d65b1..164a5cd1ae3c0 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/pointer-induction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/pointer-induction.ll @@ -61,4 +61,104 @@ exit: ret void } +define i1 @scalarize_ptr_induction(ptr %start, ptr %end, ptr noalias %dst, i1 %c) #1 { +; CHECK-LABEL: define i1 @scalarize_ptr_induction( +; CHECK-SAME: ptr [[START:%.*]], ptr [[END:%.*]], ptr noalias [[DST:%.*]], i1 [[C:%.*]]) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[START5:%.*]] = ptrtoint ptr [[START]] to i64 +; CHECK-NEXT: [[END4:%.*]] = ptrtoint ptr [[END]] to i64 +; CHECK-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64 +; CHECK-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[END4]], -12 +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START5]] +; CHECK-NEXT: [[TMP2:%.*]] = udiv i64 [[TMP1]], 12 +; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1 +; CHECK-NEXT: br label %[[VECTOR_MEMCHECK:.*]] +; CHECK: [[VECTOR_MEMCHECK]]: +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 8 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[END1]], -12 +; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], [[START2]] +; CHECK-NEXT: [[TMP8:%.*]] = udiv i64 [[TMP7]], 12 +; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 12 +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], 8 +; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP10]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP3]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[START]], [[SCEVGEP]] +; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] +; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[DST]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[BROADCAST_SPLATINSERT6:%.*]] = insertelement poison, ptr [[END]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT7:%.*]] = shufflevector [[BROADCAST_SPLATINSERT6]], poison, zeroinitializer +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], %[[VECTOR_PH]] ], [ [[PTR_IND:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP3]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP13:%.*]] = call @llvm.stepvector.nxv2i64() +; CHECK-NEXT: [[TMP14:%.*]] = mul [[TMP13]], splat (i64 12) +; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], [[TMP14]] +; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, [[VECTOR_GEP]], i64 4 +; CHECK-NEXT: [[TMP18:%.*]] = call @llvm.vp.gather.nxv2i32.nxv2p0( align 4 [[TMP12]], splat (i1 true), i32 [[TMP11]]), !alias.scope [[META3:![0-9]+]] +; CHECK-NEXT: [[TMP19:%.*]] = zext [[TMP18]] to +; CHECK-NEXT: [[TMP20:%.*]] = mul [[TMP19]], splat (i64 -7070675565921424023) +; CHECK-NEXT: [[TMP21:%.*]] = add [[TMP20]], splat (i64 -4) +; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0( [[TMP21]], align 1 [[BROADCAST_SPLAT]], splat (i1 true), i32 [[TMP11]]), !alias.scope [[META6:![0-9]+]], !noalias [[META3]] +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr nusw i8, [[VECTOR_GEP]], i64 12 +; CHECK-NEXT: [[TMP17:%.*]] = icmp eq [[TMP16]], [[BROADCAST_SPLAT7]] +; CHECK-NEXT: [[TMP26:%.*]] = zext i32 [[TMP11]] to i64 +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP26]] +; CHECK-NEXT: [[TMP27:%.*]] = mul i64 12, [[TMP26]] +; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP27]] +; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP29:%.*]] = sub i64 [[TMP26]], 1 +; CHECK-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP23:%.*]] = mul nuw i64 [[TMP22]], 2 +; CHECK-NEXT: [[TMP24:%.*]] = mul i64 [[TMP23]], 0 +; CHECK-NEXT: [[TMP25:%.*]] = extractelement [[TMP17]], i64 [[TMP29]] +; CHECK-NEXT: br label %[[EXIT:.*]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[PTR_IV]], i64 4 +; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP]], align 4 +; CHECK-NEXT: [[EXT:%.*]] = zext i32 [[L]] to i64 +; CHECK-NEXT: [[UNUSED:%.*]] = load i32, ptr [[PTR_IV]], align 4 +; CHECK-NEXT: [[MUL1:%.*]] = mul i64 [[EXT]], -7070675565921424023 +; CHECK-NEXT: [[MUL2:%.*]] = add i64 [[MUL1]], -4 +; CHECK-NEXT: store i64 [[MUL2]], ptr [[DST]], align 1 +; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr nusw i8, ptr [[PTR_IV]], i64 12 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]] +; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[CMP]], i1 true, i1 false +; CHECK-NEXT: br i1 [[OR_COND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[CMP_LCSSA:%.*]] = phi i1 [ [[CMP]], %[[LOOP]] ], [ [[TMP25]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i1 [[CMP_LCSSA]] +; +entry: + br label %loop + +loop: + %ptr.iv = phi ptr [ %start, %entry ], [ %ptr.iv.next, %loop ] + %gep = getelementptr i8, ptr %ptr.iv, i64 4 + %l = load i32, ptr %gep, align 4 + %ext = zext i32 %l to i64 + %unused = load i32, ptr %ptr.iv, align 4 + %mul1 = mul i64 %ext, -7070675565921424023 + %mul2 = add i64 %mul1, -4 + store i64 %mul2, ptr %dst, align 1 + %ptr.iv.next = getelementptr nusw i8, ptr %ptr.iv, i64 12 + %cmp = icmp eq ptr %ptr.iv.next, %end + %or.cond = select i1 %cmp, i1 true, i1 false + br i1 %or.cond, label %exit, label %loop + +exit: + ret i1 %cmp +} + attributes #0 = { "target-features"="+v" } +attributes #1 = { "target-cpu"="sifive-p670" } diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll index 8d4d282a5236d..0723f16677090 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll @@ -9,10 +9,10 @@ define void @test(ptr %p, i64 %a, i8 %b) { ; CHECK: vector.ph: ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i8 [[B]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i64 [[A]], i64 0 +; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[A]], 48 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i64 [[TMP0]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer -; CHECK-NEXT: [[TMP5:%.*]] = shl [[BROADCAST_SPLAT2]], splat (i64 48) -; CHECK-NEXT: [[TMP6:%.*]] = ashr [[TMP5]], splat (i64 52) +; CHECK-NEXT: [[TMP6:%.*]] = ashr [[BROADCAST_SPLAT2]], splat (i64 52) ; CHECK-NEXT: [[TMP7:%.*]] = trunc [[TMP6]] to ; CHECK-NEXT: [[TMP8:%.*]] = zext [[BROADCAST_SPLAT]] to ; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement poison, ptr [[P]], i64 0 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll index 735fb769de8b9..671a929e6fa35 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll @@ -69,7 +69,7 @@ define i32 @sub(ptr %a, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP4]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP4]] ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP3]]) ; CHECK-NEXT: br label %[[EXIT:.*]] @@ -116,7 +116,7 @@ define i32 @addsub(ptr %a, ptr %b, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP6]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP6]] ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP5]]) ; CHECK-NEXT: br label %[[EXIT:.*]] @@ -166,7 +166,7 @@ define i32 @or(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32( [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -212,7 +212,7 @@ define i32 @and(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32( [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -258,7 +258,7 @@ define i32 @xor(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32( [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -305,7 +305,7 @@ define i32 @smin(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -353,7 +353,7 @@ define i32 @umax(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -400,7 +400,7 @@ define float @fadd_fast(ptr noalias nocapture readonly %a, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -444,7 +444,7 @@ define half @fadd_fast_half_zvfh(ptr noalias nocapture readonly %a, i64 %n) "tar ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call fast half @llvm.vector.reduce.fadd.nxv8f16(half 0xH0000, [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -483,14 +483,14 @@ define half @fadd_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) " ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP3:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds half, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds half, ptr [[TMP0]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x half>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x half>, ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP2]] = fadd fast <16 x half> [[WIDE_LOAD]], [[VEC_PHI]] ; CHECK-NEXT: [[TMP3]] = fadd fast <16 x half> [[WIDE_LOAD2]], [[VEC_PHI1]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x half> [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> [[BIN_RDX]]) @@ -508,7 +508,7 @@ define half @fadd_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) " ; CHECK-NEXT: [[ADD]] = fadd fast half [[TMP6]], [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret half [[ADD_LCSSA]] @@ -545,14 +545,14 @@ define bfloat @fadd_fast_bfloat(ptr noalias nocapture readonly %a, i64 %n) "targ ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP3:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds bfloat, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds bfloat, ptr [[TMP0]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x bfloat>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x bfloat>, ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP2]] = fadd fast <16 x bfloat> [[WIDE_LOAD]], [[VEC_PHI]] ; CHECK-NEXT: [[TMP3]] = fadd fast <16 x bfloat> [[WIDE_LOAD2]], [[VEC_PHI1]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x bfloat> [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call fast bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR0000, <16 x bfloat> [[BIN_RDX]]) @@ -570,7 +570,7 @@ define bfloat @fadd_fast_bfloat(ptr noalias nocapture readonly %a, i64 %n) "targ ; CHECK-NEXT: [[ADD]] = fadd fast bfloat [[TMP6]], [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi bfloat [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret bfloat [[ADD_LCSSA]] @@ -615,7 +615,7 @@ define float @fmin_fast(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call float @llvm.vector.reduce.fmin.nxv4f32( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -661,7 +661,7 @@ define half @fmin_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) # ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call half @llvm.vector.reduce.fmin.nxv8f16( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -707,7 +707,7 @@ define bfloat @fmin_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call bfloat @llvm.vector.reduce.fmin.nxv8bf16( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -755,7 +755,7 @@ define float @fmax_fast(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -801,7 +801,7 @@ define half @fmax_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) # ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call fast half @llvm.vector.reduce.fmax.nxv8f16( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -847,7 +847,7 @@ define bfloat @fmax_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call fast bfloat @llvm.vector.reduce.fmax.nxv8bf16( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -891,14 +891,14 @@ define i32 @mul(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ splat (i32 1), %[[VECTOR_PH]] ], [ [[TMP3:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 8 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 8 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP2]] = mul <8 x i32> [[WIDE_LOAD]], [[VEC_PHI]] ; CHECK-NEXT: [[TMP3]] = mul <8 x i32> [[WIDE_LOAD2]], [[VEC_PHI1]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[BIN_RDX:%.*]] = mul <8 x i32> [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[BIN_RDX]]) @@ -916,7 +916,7 @@ define i32 @mul(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-NEXT: [[MUL]] = mul nsw i32 [[TMP6]], [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[MUL_LCSSA]] @@ -963,7 +963,7 @@ define i32 @memory_dependence(ptr noalias nocapture %a, ptr noalias nocapture re ; CHECK-NEXT: [[TMP5]] = mul <8 x i32> [[WIDE_LOAD1]], [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[TMP5]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -986,7 +986,7 @@ define i32 @memory_dependence(ptr noalias nocapture %a, ptr noalias nocapture re ; CHECK-NEXT: [[MUL]] = mul nsw i32 [[TMP9]], [[SUM]] ; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[MUL_LCSSA]] @@ -1036,7 +1036,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP16:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -1084,7 +1084,7 @@ define half @fmuladd_f16_zvfh(ptr %a, ptr %b, i64 %n) "target-features"="+zvfh" ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP16:%.*]] = call reassoc half @llvm.vector.reduce.fadd.nxv8f16(half 0xH8000, [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -1128,18 +1128,18 @@ define half @fmuladd_f16_zvfhmin(ptr %a, ptr %b, i64 %n) "target-features"="+zvf ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x half> [ , %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x half> [ splat (half 0xH8000), %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds half, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds half, ptr [[TMP0]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x half>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x half>, ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds half, ptr [[TMP2]], i32 16 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds half, ptr [[TMP2]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x half>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x half>, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[TMP4]] = call reassoc <16 x half> @llvm.fmuladd.v16f16(<16 x half> [[WIDE_LOAD]], <16 x half> [[WIDE_LOAD3]], <16 x half> [[VEC_PHI]]) ; CHECK-NEXT: [[TMP5]] = call reassoc <16 x half> @llvm.fmuladd.v16f16(<16 x half> [[WIDE_LOAD2]], <16 x half> [[WIDE_LOAD4]], <16 x half> [[VEC_PHI1]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <16 x half> [[TMP5]], [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH8000, <16 x half> [[BIN_RDX]]) @@ -1159,7 +1159,7 @@ define half @fmuladd_f16_zvfhmin(ptr %a, ptr %b, i64 %n) "target-features"="+zvf ; CHECK-NEXT: [[MULADD]] = tail call reassoc half @llvm.fmuladd.f16(half [[TMP8]], half [[TMP9]], half [[SUM_07]]) ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret half [[MULADD_LCSSA]] @@ -1198,18 +1198,18 @@ define bfloat @fmuladd_bf16(ptr %a, ptr %b, i64 %n) "target-features"="+zvfbfmin ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x bfloat> [ , %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x bfloat> [ splat (bfloat 0xR8000), %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds bfloat, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds bfloat, ptr [[TMP0]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x bfloat>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x bfloat>, ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds bfloat, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds bfloat, ptr [[TMP2]], i32 16 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds bfloat, ptr [[TMP2]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x bfloat>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x bfloat>, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[TMP4]] = call reassoc <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> [[WIDE_LOAD]], <16 x bfloat> [[WIDE_LOAD3]], <16 x bfloat> [[VEC_PHI]]) ; CHECK-NEXT: [[TMP5]] = call reassoc <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> [[WIDE_LOAD2]], <16 x bfloat> [[WIDE_LOAD4]], <16 x bfloat> [[VEC_PHI1]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <16 x bfloat> [[TMP5]], [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = call reassoc bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR8000, <16 x bfloat> [[BIN_RDX]]) @@ -1229,7 +1229,7 @@ define bfloat @fmuladd_bf16(ptr %a, ptr %b, i64 %n) "target-features"="+zvfbfmin ; CHECK-NEXT: [[MULADD]] = tail call reassoc bfloat @llvm.fmuladd.bf16(bfloat [[TMP8]], bfloat [[TMP9]], bfloat [[SUM_07]]) ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi bfloat [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret bfloat [[MULADD_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll index 3c90908b0a08f..549222cd919da 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll @@ -71,7 +71,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP12]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.end: @@ -115,7 +115,7 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[TMP11]]) ; CHECK-NEXT: br label [[FOR_BODY:%.*]] @@ -159,7 +159,7 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.end: @@ -199,7 +199,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP9]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.end: @@ -224,43 +224,32 @@ for.end: define i64 @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %n) { ; CHECK-LABEL: @uniform_load( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; CHECK-NEXT: br label [[ENTRY:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[B:%.*]], align 8 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[V]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[IV]] -; CHECK-NEXT: store [[BROADCAST_SPLAT]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]] -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT]], ptr align 8 [[ARRAYIDX]], splat (i1 true), i32 [[TMP0]]) +; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP0]] to i64 +; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[TMP5]], [[IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] -; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[TMP5]], 1 +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 0 +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[BROADCAST_SPLAT]], i64 [[TMP8]] ; CHECK-NEXT: br label [[FOR_BODY1:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] -; CHECK-NEXT: [[V1:%.*]] = load i64, ptr [[B]], align 8 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]] -; CHECK-NEXT: store i64 [[V1]], ptr [[ARRAYIDX1]], align 8 -; CHECK-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: for.end: -; CHECK-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V1]], [[FOR_BODY1]] ], [ [[V]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[V_LCSSA]] +; CHECK-NEXT: ret i64 [[TMP12]] ; entry: br label %for.body @@ -299,7 +288,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.end: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll index a3bec999425a3..b95691f6e7c04 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll @@ -400,61 +400,49 @@ for.end: define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-LABEL: define i32 @FOR_reduction( ; IF-EVL-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[TC:%.*]]) #[[ATTR0]] { -; IF-EVL-NEXT: [[ENTRY:.*]]: -; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP9]], 2 -; IF-EVL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TC]], [[TMP1]] -; IF-EVL-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; IF-EVL-NEXT: [[ENTRY:.*:]] +; IF-EVL-NEXT: br label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: ; IF-EVL-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 ; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() ; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 4 ; IF-EVL-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1 ; IF-EVL-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement poison, i32 33, i32 [[TMP8]] ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] ; IF-EVL: [[VECTOR_BODY]]: -; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], %[[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP4]], %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[TMP9]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]] -; IF-EVL-NEXT: [[WIDE_LOAD]] = load , ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[TMP10:%.*]] = call @llvm.vector.splice.nxv4i32( [[VECTOR_RECUR]], [[WIDE_LOAD]], i32 -1) +; IF-EVL-NEXT: [[WIDE_LOAD]] = call @llvm.vp.load.nxv4i32.p0(ptr align 4 [[ARRAYIDX]], splat (i1 true), i32 [[TMP9]]) +; IF-EVL-NEXT: [[TMP10:%.*]] = call @llvm.experimental.vp.splice.nxv4i32( [[VECTOR_RECUR]], [[WIDE_LOAD]], i32 -1, splat (i1 true), i32 [[PREV_EVL]], i32 [[TMP9]]) ; IF-EVL-NEXT: [[TMP11:%.*]] = add nsw [[TMP10]], [[WIDE_LOAD]] ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]] -; IF-EVL-NEXT: store [[TMP11]], ptr [[TMP12]], align 4 -; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDVARS]], [[TMP3]] -; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0( [[TMP11]], ptr align 4 [[TMP12]], splat (i1 true), i32 [[TMP9]]) +; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP9]] to i64 +; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP13]], [[INDVARS]] +; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] +; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; IF-EVL-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: +; IF-EVL-NEXT: [[TMP28:%.*]] = sub i64 [[TMP13]], 1 +; IF-EVL-NEXT: [[TMP17:%.*]] = sub i64 [[TMP28]], 1 +; IF-EVL-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4 +; IF-EVL-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 0 +; IF-EVL-NEXT: [[TMP21:%.*]] = extractelement [[WIDE_LOAD]], i64 [[TMP17]] ; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() ; IF-EVL-NEXT: [[TMP15:%.*]] = mul nuw i32 [[TMP14]], 4 ; IF-EVL-NEXT: [[TMP16:%.*]] = sub i32 [[TMP15]], 1 -; IF-EVL-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement [[WIDE_LOAD]], i32 [[TMP16]] -; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() -; IF-EVL-NEXT: [[TMP18:%.*]] = mul nuw i32 [[TMP17]], 4 -; IF-EVL-NEXT: [[TMP19:%.*]] = sub i32 [[TMP18]], 2 -; IF-EVL-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement [[WIDE_LOAD]], i32 [[TMP19]] -; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TC]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] -; IF-EVL: [[SCALAR_PH]]: -; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; IF-EVL-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 33, %[[ENTRY]] ] -; IF-EVL-NEXT: br label %[[FOR_BODY:.*]] -; IF-EVL: [[FOR_BODY]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP0:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP0]] = load i32, ptr [[ARRAYIDX1]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[TMP0]] -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-NEXT: [[TMP25:%.*]] = extractelement [[VECTOR_RECUR]], i32 [[TMP16]] +; IF-EVL-NEXT: [[TMP26:%.*]] = icmp eq i64 [[TMP28]], 0 +; IF-EVL-NEXT: [[FOR1_LCSSA:%.*]] = select i1 [[TMP26]], i32 [[TMP25]], i32 [[TMP21]] +; IF-EVL-NEXT: br label %[[FOR_END:.*]] ; IF-EVL: [[FOR_END]]: -; IF-EVL-NEXT: [[FOR1_LCSSA:%.*]] = phi i32 [ [[FOR1]], %[[FOR_BODY]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], %[[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[FOR1_LCSSA]] ; ; NO-VP-LABEL: define i32 @FOR_reduction( @@ -570,7 +558,7 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) { ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[FOR_END:.*]] ; IF-EVL: [[FOR_END]]: @@ -662,8 +650,7 @@ for.end: ; IF-EVL: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} ; IF-EVL: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} ; IF-EVL: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} -; IF-EVL: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]} -; IF-EVL: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} +; IF-EVL: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} ;. ; NO-VP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; NO-VP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll index b9a4e97cd9f24..cc1b2380bc532 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll @@ -108,7 +108,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START:%.*]], [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 1, [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 4 +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 4 ; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD2]]) @@ -117,7 +117,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[TMP5]] = mul i32 [[VEC_PHI1]], [[TMP4]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP5]], [[MUL]] ; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]] @@ -134,7 +134,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[MUL1]] = mul nsw i32 [[TMP0]], [[RDX1]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL1]], [[FOR_BODY1]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[MUL_LCSSA]] @@ -152,7 +152,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 1, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 4 +; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 4 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 ; NO-VP-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD]]) @@ -219,7 +219,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -303,7 +303,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -387,7 +387,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -471,7 +471,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -557,7 +557,7 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -643,7 +643,7 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -729,7 +729,7 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -815,7 +815,7 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -895,7 +895,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START:%.*]], [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi float [ 1.000000e+00, [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 4 +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 4 ; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP8:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD2]]) @@ -904,7 +904,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP5]] = fmul reassoc float [[VEC_PHI1]], [[TMP4]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[BIN_RDX:%.*]] = fmul reassoc float [[TMP5]], [[MUL]] ; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]] @@ -921,7 +921,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MUL1]] = fmul reassoc float [[TMP0]], [[RDX1]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP15:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP14:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL1]], [[FOR_BODY1]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MUL_LCSSA]] @@ -939,7 +939,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[VEC_PHI1:%.*]] = phi float [ 1.000000e+00, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 4 +; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 4 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; NO-VP-NEXT: [[TMP6:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD]]) @@ -1007,7 +1007,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -1095,7 +1095,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -1179,14 +1179,14 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI2:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 8 ; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x float>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]]) ; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI2]], <8 x float> [[WIDE_LOAD3]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP3]], <8 x float> [[TMP4]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]]) @@ -1204,7 +1204,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MIN]] = tail call float @llvm.minimum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP19:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP18:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MIN_LCSSA]] @@ -1224,7 +1224,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[MINMAX_IDENT_SPLAT]], [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[MINMAX_IDENT_SPLAT]], [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 8 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP5]], align 4 ; NO-VP-NEXT: [[TMP6]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_LOAD]]) @@ -1287,14 +1287,14 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI2:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 8 ; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x float>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]]) ; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI2]], <8 x float> [[WIDE_LOAD3]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP3]], <8 x float> [[TMP4]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]]) @@ -1312,7 +1312,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MAX]] = tail call float @llvm.maximum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP21:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP20:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MAX_LCSSA]] @@ -1332,7 +1332,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[MINMAX_IDENT_SPLAT]], [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[MINMAX_IDENT_SPLAT]], [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 8 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP5]], align 4 ; NO-VP-NEXT: [[TMP6]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_LOAD]]) @@ -1401,7 +1401,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -1492,7 +1492,7 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[TMP16]]) ; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]] @@ -1584,7 +1584,7 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[TMP16]]) ; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll index 7179e7dc48c8d..d1a2303e35e68 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll @@ -110,14 +110,14 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ [[TMP9]], [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ splat (i32 1), [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 8 +; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 8 ; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4 ; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i32>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[TMP5]] = mul <8 x i32> [[WIDE_MASKED_LOAD]], [[VEC_PHI]] ; IF-EVL-NEXT: [[TMP4]] = mul <8 x i32> [[WIDE_LOAD2]], [[VEC_PHI1]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP6:%.*]] = mul <8 x i32> [[TMP4]], [[TMP5]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[TMP6]]) @@ -135,7 +135,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[MUL]] = mul nsw i32 [[TMP0]], [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], [[FOR_BODY1]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[MUL_LCSSA]] @@ -154,7 +154,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ [[TMP0]], [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ splat (i32 1), [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 8 +; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 8 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP2]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i32>, ptr [[TMP4]], align 4 ; NO-VP-NEXT: [[TMP5]] = mul <8 x i32> [[WIDE_LOAD]], [[VEC_PHI]] @@ -221,7 +221,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32( [[TMP14]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -308,7 +308,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32( [[TMP14]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -395,7 +395,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32( [[TMP14]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -484,7 +484,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32( [[TMP15]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -577,7 +577,7 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.smax.nxv4i32( [[TMP15]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -670,7 +670,7 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.umin.nxv4i32( [[TMP15]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -763,7 +763,7 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32( [[TMP15]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -854,7 +854,7 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, [[TMP14]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -937,14 +937,14 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[TMP9]], [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ splat (float 1.000000e+00), [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 8 +; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i64 8 ; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP3]], align 4 ; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[TMP5]] = fmul reassoc <8 x float> [[WIDE_MASKED_LOAD]], [[VEC_PHI]] ; IF-EVL-NEXT: [[TMP4]] = fmul reassoc <8 x float> [[WIDE_LOAD2]], [[VEC_PHI1]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP6:%.*]] = fmul reassoc <8 x float> [[TMP4]], [[TMP5]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v8f32(float 1.000000e+00, <8 x float> [[TMP6]]) @@ -962,7 +962,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MUL]] = fmul reassoc float [[TMP0]], [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP15:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP14:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL]], [[FOR_BODY1]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MUL_LCSSA]] @@ -981,7 +981,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[TMP0]], [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ splat (float 1.000000e+00), [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 8 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP4]], align 4 ; NO-VP-NEXT: [[TMP5]] = fmul reassoc <8 x float> [[WIDE_LOAD]], [[VEC_PHI]] @@ -1050,7 +1050,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call fast float @llvm.vector.reduce.fmin.nxv4f32( [[TMP15]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -1143,7 +1143,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32( [[TMP15]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -1230,14 +1230,14 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 8 ; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]]) ; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]]) @@ -1255,7 +1255,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MIN]] = tail call float @llvm.minimum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP19:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP18:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MIN_LCSSA]] @@ -1275,7 +1275,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 8 +; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 8 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[TMP1]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP3]], align 4 ; NO-VP-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_LOAD]]) @@ -1338,14 +1338,14 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 8 ; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]]) ; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]]) @@ -1363,7 +1363,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MAX]] = tail call float @llvm.maximum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP21:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP20:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MAX_LCSSA]] @@ -1383,7 +1383,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 8 +; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 8 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[TMP1]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP3]], align 4 ; NO-VP-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_LOAD]]) @@ -1452,7 +1452,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]] ; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP20:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, [[TMP17]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -1544,7 +1544,7 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[TMP15]]) ; IF-EVL-NEXT: [[TMP19:%.*]] = freeze i1 [[TMP18]] @@ -1636,7 +1636,7 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[TMP15]]) ; IF-EVL-NEXT: [[TMP19:%.*]] = freeze i1 [[TMP18]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll index 7b0ac78fb365c..13990000585ea 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll @@ -331,20 +331,20 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ] ; NO-VP-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1024, [[INDEX]] ; NO-VP-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[OFFSET_IDX]] -; NO-VP-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 0 -; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i32 -15 +; NO-VP-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i64 0 +; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i64 -15 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; NO-VP-NEXT: [[REVERSE:%.*]] = shufflevector <16 x i8> [[WIDE_LOAD]], <16 x i8> poison, <16 x i32> ; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[B:%.*]], <16 x i8> [[REVERSE]] ; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> align 1 [[TMP3]], <16 x i1> splat (i1 true), <16 x i8> poison) ; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[OFFSET_IDX]] -; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i32 0 -; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP5]], i32 -15 +; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i64 0 +; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP5]], i64 -15 ; NO-VP-NEXT: [[REVERSE1:%.*]] = shufflevector <16 x i8> [[WIDE_MASKED_GATHER]], <16 x i8> poison, <16 x i32> ; NO-VP-NEXT: store <16 x i8> [[REVERSE1]], ptr [[TMP6]], align 1 ; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[D:%.*]], i64 [[OFFSET_IDX]] -; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i32 0 -; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i32 -15 +; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i64 0 +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i64 -15 ; NO-VP-NEXT: store <16 x i8> [[REVERSE1]], ptr [[TMP9]], align 1 ; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll index 0375f0a8fd132..55e7018c49eec 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll @@ -44,7 +44,7 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6 ; FIXEDLEN-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP1]], i64 0 ; FIXEDLEN-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; FIXEDLEN-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; FIXEDLEN-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4 +; FIXEDLEN-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 4 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP2]], align 8 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8 ; FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -109,44 +109,33 @@ for.end: define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %n) { ; SCALABLE-LABEL: define i64 @uniform_load_outside_use( ; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; SCALABLE-NEXT: [[ENTRY:.*]]: -; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; SCALABLE-NEXT: [[ENTRY:.*:]] +; SCALABLE-NEXT: br label %[[VECTOR_PH:.*]] ; SCALABLE: [[VECTOR_PH]]: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; SCALABLE: [[VECTOR_BODY]]: -; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP0:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; SCALABLE-NEXT: [[TMP6:%.*]] = load i64, ptr [[B]], align 8 ; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[TMP6]], i64 0 ; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; SCALABLE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; SCALABLE-NEXT: store [[BROADCAST_SPLAT]], ptr [[TMP8]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] -; SCALABLE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT]], ptr align 8 [[TMP8]], splat (i1 true), i32 [[TMP0]]) +; SCALABLE-NEXT: [[TMP5:%.*]] = zext i32 [[TMP0]] to i64 +; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP5]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] +; SCALABLE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] -; SCALABLE: [[SCALAR_PH]]: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; SCALABLE-NEXT: [[LAST_ACTIVE_LANE:%.*]] = sub i64 [[TMP5]], 1 +; SCALABLE-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; SCALABLE-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP7]], 2 +; SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 [[TMP11]], 0 +; SCALABLE-NEXT: [[TMP12:%.*]] = extractelement [[BROADCAST_SPLAT]], i64 [[LAST_ACTIVE_LANE]] +; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: -; SCALABLE-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V]], %[[FOR_BODY]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; SCALABLE-NEXT: ret i64 [[V_LCSSA]] +; SCALABLE-NEXT: ret i64 [[TMP12]] ; ; FIXEDLEN-LABEL: define i64 @uniform_load_outside_use( ; FIXEDLEN-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { @@ -160,7 +149,7 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap ; FIXEDLEN-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP1]], i64 0 ; FIXEDLEN-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; FIXEDLEN-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; FIXEDLEN-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4 +; FIXEDLEN-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 4 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP2]], align 8 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8 ; FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -184,44 +173,33 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap ; ; TF-SCALABLE-LABEL: define i64 @uniform_load_outside_use( ; TF-SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; TF-SCALABLE-NEXT: [[ENTRY:.*]]: -; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; TF-SCALABLE-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1 -; TF-SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] -; TF-SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; TF-SCALABLE-NEXT: [[ENTRY:.*:]] +; TF-SCALABLE-NEXT: br label %[[VECTOR_PH:.*]] ; TF-SCALABLE: [[VECTOR_PH]]: -; TF-SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; TF-SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] -; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; TF-SCALABLE: [[VECTOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[V]], i64 0 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store [[BROADCAST_SPLAT]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP3]] -; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT]], ptr align 8 [[ARRAYIDX]], splat (i1 true), i32 [[TMP0]]) +; TF-SCALABLE-NEXT: [[TMP5:%.*]] = zext i32 [[TMP0]] to i64 +; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP5]], [[IV]] +; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] +; TF-SCALABLE-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; TF-SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: -; TF-SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] -; TF-SCALABLE: [[SCALAR_PH]]: -; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-SCALABLE-NEXT: [[V1:%.*]] = load i64, ptr [[B]], align 8 -; TF-SCALABLE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]] -; TF-SCALABLE-NEXT: store i64 [[V1]], ptr [[ARRAYIDX1]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TF-SCALABLE-NEXT: [[LAST_ACTIVE_LANE:%.*]] = sub i64 [[TMP5]], 1 +; TF-SCALABLE-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 +; TF-SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 0 +; TF-SCALABLE-NEXT: [[TMP12:%.*]] = extractelement [[BROADCAST_SPLAT]], i64 [[LAST_ACTIVE_LANE]] +; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: -; TF-SCALABLE-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V1]], %[[FOR_BODY]] ], [ [[V]], %[[MIDDLE_BLOCK]] ] -; TF-SCALABLE-NEXT: ret i64 [[V_LCSSA]] +; TF-SCALABLE-NEXT: ret i64 [[TMP12]] ; entry: br label %for.body @@ -269,7 +247,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -294,7 +272,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; FIXEDLEN-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP1]], <4 x i64> [[WIDE_MASKED_GATHER]], <4 x i64> zeroinitializer ; FIXEDLEN-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP2]], <4 x i64> [[WIDE_MASKED_GATHER1]], <4 x i64> zeroinitializer ; FIXEDLEN-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; FIXEDLEN-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP3]], i32 4 +; FIXEDLEN-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP3]], i64 4 ; FIXEDLEN-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP3]], align 8 ; FIXEDLEN-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP5]], align 8 ; FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -350,7 +328,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; TF-SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: @@ -399,7 +377,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; SCALABLE-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -417,7 +395,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; FIXEDLEN-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP1]], i64 0 ; FIXEDLEN-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; FIXEDLEN-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; FIXEDLEN-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4 +; FIXEDLEN-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 4 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP2]], align 8 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8 ; FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -457,7 +435,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: @@ -499,7 +477,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; SCALABLE-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -517,7 +495,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; FIXEDLEN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FIXEDLEN-NEXT: store i64 [[V]], ptr [[B]], align 8 ; FIXEDLEN-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; FIXEDLEN-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4 +; FIXEDLEN-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i64 4 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP1]], align 8 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP3]], align 8 ; FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -557,7 +535,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]] ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: @@ -608,7 +586,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -630,7 +608,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; FIXEDLEN-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 7 ; FIXEDLEN-NEXT: store i64 [[TMP4]], ptr [[B]], align 8 ; FIXEDLEN-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; FIXEDLEN-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP5]], i32 4 +; FIXEDLEN-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP5]], i64 4 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP5]], align 8 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP7]], align 8 ; FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -679,7 +657,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT2]] ; TF-SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: @@ -731,7 +709,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; SCALABLE-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -756,7 +734,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; FIXEDLEN-NEXT: call void @llvm.masked.scatter.v4i64.v4p0(<4 x i64> [[BROADCAST_SPLAT]], <4 x ptr> align 8 [[BROADCAST_SPLAT2]], <4 x i1> [[TMP1]]) ; FIXEDLEN-NEXT: call void @llvm.masked.scatter.v4i64.v4p0(<4 x i64> [[BROADCAST_SPLAT]], <4 x ptr> align 8 [[BROADCAST_SPLAT2]], <4 x i1> [[TMP2]]) ; FIXEDLEN-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; FIXEDLEN-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP3]], i32 4 +; FIXEDLEN-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP3]], i64 4 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP3]], align 8 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP5]], align 8 ; FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -812,7 +790,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; TF-SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: @@ -860,7 +838,7 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; SCALABLE-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -878,7 +856,7 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; FIXEDLEN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FIXEDLEN-NEXT: store i64 [[V]], ptr [[B]], align 1 ; FIXEDLEN-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; FIXEDLEN-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4 +; FIXEDLEN-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i64 4 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP1]], align 8 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP3]], align 8 ; FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -918,7 +896,7 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]] ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: diff --git a/llvm/test/Transforms/LoopVectorize/X86/conversion-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/conversion-cost.ll index 0287645d9d7f9..94ebf01509ec2 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/conversion-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/conversion-cost.ll @@ -126,9 +126,9 @@ define void @conversion_cost2(i32 %n, ptr nocapture %A, ptr nocapture %B) nounwi ; CHECK-NEXT: [[TMP19:%.*]] = sitofp <2 x i64> [[TMP10]] to <2 x float> ; CHECK-NEXT: [[TMP20:%.*]] = sitofp <2 x i64> [[TMP11]] to <2 x float> ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 2 -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 4 -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 6 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 2 +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 4 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 6 ; CHECK-NEXT: store <2 x float> [[TMP12]], ptr [[TMP13]], align 4 ; CHECK-NEXT: store <2 x float> [[TMP18]], ptr [[TMP15]], align 4 ; CHECK-NEXT: store <2 x float> [[TMP19]], ptr [[TMP16]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/cost-conditional-branches.ll b/llvm/test/Transforms/LoopVectorize/X86/cost-conditional-branches.ll index 6ec010cdcc248..651e2ad5e74da 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/cost-conditional-branches.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/cost-conditional-branches.ll @@ -686,16 +686,256 @@ exit: ; Test for https://github.com/llvm/llvm-project/issues/129236. define i32 @cost_ashr_with_op_known_invariant_via_scev(i8 %a) { ; CHECK-LABEL: @cost_ashr_with_op_known_invariant_via_scev( -; CHECK-NEXT: entry: +; CHECK-NEXT: iter.check: ; CHECK-NEXT: [[CMP_I:%.*]] = icmp eq i16 0, 0 ; CHECK-NEXT: [[CONV_I:%.*]] = sext i16 0 to i32 ; CHECK-NEXT: [[CONV5_I:%.*]] = sext i8 [[A:%.*]] to i32 +; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] +; CHECK: vector.main.loop.iter.check: +; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <32 x i1> poison, i1 [[CMP_I]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <32 x i1> [[BROADCAST_SPLATINSERT]], <32 x i1> poison, <32 x i32> zeroinitializer +; CHECK-NEXT: [[TMP60:%.*]] = xor <32 x i1> [[BROADCAST_SPLAT]], splat (i1 true) ; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_UREM_CONTINUE62:%.*]] ] +; CHECK-NEXT: [[TMP61:%.*]] = extractelement <32 x i1> [[TMP60]], i32 0 +; CHECK-NEXT: br i1 [[TMP61]], label [[PRED_UREM_IF:%.*]], label [[PRED_UREM_CONTINUE:%.*]] +; CHECK: pred.urem.if: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE]] +; CHECK: pred.urem.continue: +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <32 x i1> [[TMP60]], i32 1 +; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_UREM_IF1:%.*]], label [[PRED_UREM_CONTINUE2:%.*]] +; CHECK: pred.urem.if1: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE2]] +; CHECK: pred.urem.continue2: +; CHECK-NEXT: [[TMP3:%.*]] = extractelement <32 x i1> [[TMP60]], i32 2 +; CHECK-NEXT: br i1 [[TMP3]], label [[PRED_UREM_IF3:%.*]], label [[PRED_UREM_CONTINUE4:%.*]] +; CHECK: pred.urem.if3: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE4]] +; CHECK: pred.urem.continue4: +; CHECK-NEXT: [[TMP4:%.*]] = extractelement <32 x i1> [[TMP60]], i32 3 +; CHECK-NEXT: br i1 [[TMP4]], label [[PRED_UREM_IF5:%.*]], label [[PRED_UREM_CONTINUE6:%.*]] +; CHECK: pred.urem.if5: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE6]] +; CHECK: pred.urem.continue6: +; CHECK-NEXT: [[TMP5:%.*]] = extractelement <32 x i1> [[TMP60]], i32 4 +; CHECK-NEXT: br i1 [[TMP5]], label [[PRED_UREM_IF7:%.*]], label [[PRED_UREM_CONTINUE8:%.*]] +; CHECK: pred.urem.if7: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE8]] +; CHECK: pred.urem.continue8: +; CHECK-NEXT: [[TMP6:%.*]] = extractelement <32 x i1> [[TMP60]], i32 5 +; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_UREM_IF9:%.*]], label [[PRED_UREM_CONTINUE10:%.*]] +; CHECK: pred.urem.if9: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE10]] +; CHECK: pred.urem.continue10: +; CHECK-NEXT: [[TMP7:%.*]] = extractelement <32 x i1> [[TMP60]], i32 6 +; CHECK-NEXT: br i1 [[TMP7]], label [[PRED_UREM_IF11:%.*]], label [[PRED_UREM_CONTINUE12:%.*]] +; CHECK: pred.urem.if11: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE12]] +; CHECK: pred.urem.continue12: +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <32 x i1> [[TMP60]], i32 7 +; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_UREM_IF13:%.*]], label [[PRED_UREM_CONTINUE14:%.*]] +; CHECK: pred.urem.if13: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE14]] +; CHECK: pred.urem.continue14: +; CHECK-NEXT: [[TMP9:%.*]] = extractelement <32 x i1> [[TMP60]], i32 8 +; CHECK-NEXT: br i1 [[TMP9]], label [[PRED_UREM_IF15:%.*]], label [[PRED_UREM_CONTINUE16:%.*]] +; CHECK: pred.urem.if15: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE16]] +; CHECK: pred.urem.continue16: +; CHECK-NEXT: [[TMP10:%.*]] = extractelement <32 x i1> [[TMP60]], i32 9 +; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_UREM_IF17:%.*]], label [[PRED_UREM_CONTINUE18:%.*]] +; CHECK: pred.urem.if17: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE18]] +; CHECK: pred.urem.continue18: +; CHECK-NEXT: [[TMP11:%.*]] = extractelement <32 x i1> [[TMP60]], i32 10 +; CHECK-NEXT: br i1 [[TMP11]], label [[PRED_UREM_IF19:%.*]], label [[PRED_UREM_CONTINUE20:%.*]] +; CHECK: pred.urem.if19: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE20]] +; CHECK: pred.urem.continue20: +; CHECK-NEXT: [[TMP12:%.*]] = extractelement <32 x i1> [[TMP60]], i32 11 +; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_UREM_IF21:%.*]], label [[PRED_UREM_CONTINUE22:%.*]] +; CHECK: pred.urem.if21: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE22]] +; CHECK: pred.urem.continue22: +; CHECK-NEXT: [[TMP13:%.*]] = extractelement <32 x i1> [[TMP60]], i32 12 +; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_UREM_IF23:%.*]], label [[PRED_UREM_CONTINUE24:%.*]] +; CHECK: pred.urem.if23: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE24]] +; CHECK: pred.urem.continue24: +; CHECK-NEXT: [[TMP14:%.*]] = extractelement <32 x i1> [[TMP60]], i32 13 +; CHECK-NEXT: br i1 [[TMP14]], label [[PRED_UREM_IF25:%.*]], label [[PRED_UREM_CONTINUE26:%.*]] +; CHECK: pred.urem.if25: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE26]] +; CHECK: pred.urem.continue26: +; CHECK-NEXT: [[TMP15:%.*]] = extractelement <32 x i1> [[TMP60]], i32 14 +; CHECK-NEXT: br i1 [[TMP15]], label [[PRED_UREM_IF27:%.*]], label [[PRED_UREM_CONTINUE28:%.*]] +; CHECK: pred.urem.if27: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE28]] +; CHECK: pred.urem.continue28: +; CHECK-NEXT: [[TMP16:%.*]] = extractelement <32 x i1> [[TMP60]], i32 15 +; CHECK-NEXT: br i1 [[TMP16]], label [[PRED_UREM_IF29:%.*]], label [[PRED_UREM_CONTINUE30:%.*]] +; CHECK: pred.urem.if29: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE30]] +; CHECK: pred.urem.continue30: +; CHECK-NEXT: [[TMP17:%.*]] = extractelement <32 x i1> [[TMP60]], i32 16 +; CHECK-NEXT: br i1 [[TMP17]], label [[PRED_UREM_IF31:%.*]], label [[PRED_UREM_CONTINUE32:%.*]] +; CHECK: pred.urem.if31: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE32]] +; CHECK: pred.urem.continue32: +; CHECK-NEXT: [[TMP18:%.*]] = extractelement <32 x i1> [[TMP60]], i32 17 +; CHECK-NEXT: br i1 [[TMP18]], label [[PRED_UREM_IF33:%.*]], label [[PRED_UREM_CONTINUE34:%.*]] +; CHECK: pred.urem.if33: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE34]] +; CHECK: pred.urem.continue34: +; CHECK-NEXT: [[TMP19:%.*]] = extractelement <32 x i1> [[TMP60]], i32 18 +; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_UREM_IF35:%.*]], label [[PRED_UREM_CONTINUE36:%.*]] +; CHECK: pred.urem.if35: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE36]] +; CHECK: pred.urem.continue36: +; CHECK-NEXT: [[TMP20:%.*]] = extractelement <32 x i1> [[TMP60]], i32 19 +; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_UREM_IF37:%.*]], label [[PRED_UREM_CONTINUE38:%.*]] +; CHECK: pred.urem.if37: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE38]] +; CHECK: pred.urem.continue38: +; CHECK-NEXT: [[TMP21:%.*]] = extractelement <32 x i1> [[TMP60]], i32 20 +; CHECK-NEXT: br i1 [[TMP21]], label [[PRED_UREM_IF39:%.*]], label [[PRED_UREM_CONTINUE40:%.*]] +; CHECK: pred.urem.if39: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE40]] +; CHECK: pred.urem.continue40: +; CHECK-NEXT: [[TMP22:%.*]] = extractelement <32 x i1> [[TMP60]], i32 21 +; CHECK-NEXT: br i1 [[TMP22]], label [[PRED_UREM_IF41:%.*]], label [[PRED_UREM_CONTINUE42:%.*]] +; CHECK: pred.urem.if41: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE42]] +; CHECK: pred.urem.continue42: +; CHECK-NEXT: [[TMP23:%.*]] = extractelement <32 x i1> [[TMP60]], i32 22 +; CHECK-NEXT: br i1 [[TMP23]], label [[PRED_UREM_IF43:%.*]], label [[PRED_UREM_CONTINUE44:%.*]] +; CHECK: pred.urem.if43: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE44]] +; CHECK: pred.urem.continue44: +; CHECK-NEXT: [[TMP24:%.*]] = extractelement <32 x i1> [[TMP60]], i32 23 +; CHECK-NEXT: br i1 [[TMP24]], label [[PRED_UREM_IF45:%.*]], label [[PRED_UREM_CONTINUE46:%.*]] +; CHECK: pred.urem.if45: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE46]] +; CHECK: pred.urem.continue46: +; CHECK-NEXT: [[TMP25:%.*]] = extractelement <32 x i1> [[TMP60]], i32 24 +; CHECK-NEXT: br i1 [[TMP25]], label [[PRED_UREM_IF47:%.*]], label [[PRED_UREM_CONTINUE48:%.*]] +; CHECK: pred.urem.if47: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE48]] +; CHECK: pred.urem.continue48: +; CHECK-NEXT: [[TMP26:%.*]] = extractelement <32 x i1> [[TMP60]], i32 25 +; CHECK-NEXT: br i1 [[TMP26]], label [[PRED_UREM_IF49:%.*]], label [[PRED_UREM_CONTINUE50:%.*]] +; CHECK: pred.urem.if49: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE50]] +; CHECK: pred.urem.continue50: +; CHECK-NEXT: [[TMP27:%.*]] = extractelement <32 x i1> [[TMP60]], i32 26 +; CHECK-NEXT: br i1 [[TMP27]], label [[PRED_UREM_IF51:%.*]], label [[PRED_UREM_CONTINUE52:%.*]] +; CHECK: pred.urem.if51: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE52]] +; CHECK: pred.urem.continue52: +; CHECK-NEXT: [[TMP28:%.*]] = extractelement <32 x i1> [[TMP60]], i32 27 +; CHECK-NEXT: br i1 [[TMP28]], label [[PRED_UREM_IF53:%.*]], label [[PRED_UREM_CONTINUE54:%.*]] +; CHECK: pred.urem.if53: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE54]] +; CHECK: pred.urem.continue54: +; CHECK-NEXT: [[TMP29:%.*]] = extractelement <32 x i1> [[TMP60]], i32 28 +; CHECK-NEXT: br i1 [[TMP29]], label [[PRED_UREM_IF55:%.*]], label [[PRED_UREM_CONTINUE56:%.*]] +; CHECK: pred.urem.if55: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE56]] +; CHECK: pred.urem.continue56: +; CHECK-NEXT: [[TMP30:%.*]] = extractelement <32 x i1> [[TMP60]], i32 29 +; CHECK-NEXT: br i1 [[TMP30]], label [[PRED_UREM_IF57:%.*]], label [[PRED_UREM_CONTINUE58:%.*]] +; CHECK: pred.urem.if57: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE58]] +; CHECK: pred.urem.continue58: +; CHECK-NEXT: [[TMP31:%.*]] = extractelement <32 x i1> [[TMP60]], i32 30 +; CHECK-NEXT: br i1 [[TMP31]], label [[PRED_UREM_IF59:%.*]], label [[PRED_UREM_CONTINUE60:%.*]] +; CHECK: pred.urem.if59: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE60]] +; CHECK: pred.urem.continue60: +; CHECK-NEXT: [[TMP32:%.*]] = extractelement <32 x i1> [[TMP60]], i32 31 +; CHECK-NEXT: br i1 [[TMP32]], label [[PRED_UREM_IF61:%.*]], label [[PRED_UREM_CONTINUE62]] +; CHECK: pred.urem.if61: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE62]] +; CHECK: pred.urem.continue62: +; CHECK-NEXT: [[TMP33:%.*]] = select <32 x i1> [[TMP60]], <32 x i1> poison, <32 x i1> zeroinitializer +; CHECK-NEXT: [[TMP34:%.*]] = or <32 x i1> [[TMP33]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[CMP_I]], <32 x i32> zeroinitializer, <32 x i32> poison +; CHECK-NEXT: [[TMP35:%.*]] = extractelement <32 x i32> [[PREDPHI]], i32 0 +; CHECK-NEXT: [[TMP36:%.*]] = ashr i32 [[CONV5_I]], [[TMP35]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT63:%.*]] = insertelement <32 x i32> poison, i32 [[TMP36]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT64:%.*]] = shufflevector <32 x i32> [[BROADCAST_SPLATINSERT63]], <32 x i32> poison, <32 x i32> zeroinitializer +; CHECK-NEXT: [[TMP37:%.*]] = icmp eq <32 x i32> [[BROADCAST_SPLAT64]], zeroinitializer +; CHECK-NEXT: [[TMP38:%.*]] = shl <32 x i32> [[PREDPHI]], splat (i32 24) +; CHECK-NEXT: [[TMP39:%.*]] = ashr exact <32 x i32> [[TMP38]], splat (i32 24) +; CHECK-NEXT: [[TMP40:%.*]] = extractelement <32 x i1> [[TMP37]], i32 0 +; CHECK-NEXT: [[TMP41:%.*]] = select i1 [[TMP40]], <32 x i32> [[TMP39]], <32 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI65:%.*]] = select <32 x i1> [[TMP34]], <32 x i32> [[TMP41]], <32 x i32> zeroinitializer +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 32 +; CHECK-NEXT: [[TMP42:%.*]] = icmp eq i32 [[INDEX_NEXT]], 96 +; CHECK-NEXT: br i1 [[TMP42]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP_HEADER]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[TMP43:%.*]] = extractelement <32 x i32> [[PREDPHI65]], i32 31 +; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] +; CHECK: vec.epilog.iter.check: +; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF11:![0-9]+]] +; CHECK: vec.epilog.ph: +; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i32 [ 96, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT66:%.*]] = insertelement <4 x i1> poison, i1 [[CMP_I]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT67:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT66]], <4 x i1> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP44:%.*]] = xor <4 x i1> [[BROADCAST_SPLAT67]], splat (i1 true) +; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] +; CHECK: vec.epilog.vector.body: +; CHECK-NEXT: [[INDEX68:%.*]] = phi i32 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT81:%.*]], [[PRED_UREM_CONTINUE76:%.*]] ] +; CHECK-NEXT: [[TMP45:%.*]] = extractelement <4 x i1> [[TMP44]], i32 0 +; CHECK-NEXT: br i1 [[TMP45]], label [[PRED_UREM_IF69:%.*]], label [[PRED_UREM_CONTINUE70:%.*]] +; CHECK: pred.urem.if69: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE70]] +; CHECK: pred.urem.continue70: +; CHECK-NEXT: [[TMP46:%.*]] = extractelement <4 x i1> [[TMP44]], i32 1 +; CHECK-NEXT: br i1 [[TMP46]], label [[PRED_UREM_IF71:%.*]], label [[PRED_UREM_CONTINUE72:%.*]] +; CHECK: pred.urem.if71: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE72]] +; CHECK: pred.urem.continue72: +; CHECK-NEXT: [[TMP47:%.*]] = extractelement <4 x i1> [[TMP44]], i32 2 +; CHECK-NEXT: br i1 [[TMP47]], label [[PRED_UREM_IF73:%.*]], label [[PRED_UREM_CONTINUE74:%.*]] +; CHECK: pred.urem.if73: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE74]] +; CHECK: pred.urem.continue74: +; CHECK-NEXT: [[TMP48:%.*]] = extractelement <4 x i1> [[TMP44]], i32 3 +; CHECK-NEXT: br i1 [[TMP48]], label [[PRED_UREM_IF75:%.*]], label [[PRED_UREM_CONTINUE76]] +; CHECK: pred.urem.if75: +; CHECK-NEXT: br label [[PRED_UREM_CONTINUE76]] +; CHECK: pred.urem.continue76: +; CHECK-NEXT: [[TMP49:%.*]] = select <4 x i1> [[TMP44]], <4 x i1> poison, <4 x i1> zeroinitializer +; CHECK-NEXT: [[TMP50:%.*]] = or <4 x i1> [[TMP49]], [[BROADCAST_SPLAT67]] +; CHECK-NEXT: [[PREDPHI77:%.*]] = select i1 [[CMP_I]], <4 x i32> zeroinitializer, <4 x i32> poison +; CHECK-NEXT: [[TMP51:%.*]] = extractelement <4 x i32> [[PREDPHI77]], i32 0 +; CHECK-NEXT: [[TMP52:%.*]] = ashr i32 [[CONV5_I]], [[TMP51]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT78:%.*]] = insertelement <4 x i32> poison, i32 [[TMP52]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT79:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT78]], <4 x i32> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP53:%.*]] = icmp eq <4 x i32> [[BROADCAST_SPLAT79]], zeroinitializer +; CHECK-NEXT: [[TMP54:%.*]] = shl <4 x i32> [[PREDPHI77]], splat (i32 24) +; CHECK-NEXT: [[TMP55:%.*]] = ashr exact <4 x i32> [[TMP54]], splat (i32 24) +; CHECK-NEXT: [[TMP56:%.*]] = extractelement <4 x i1> [[TMP53]], i32 0 +; CHECK-NEXT: [[TMP57:%.*]] = select i1 [[TMP56]], <4 x i32> [[TMP55]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI80:%.*]] = select <4 x i1> [[TMP50]], <4 x i32> [[TMP57]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[INDEX_NEXT81]] = add nuw i32 [[INDEX68]], 4 +; CHECK-NEXT: [[TMP58:%.*]] = icmp eq i32 [[INDEX_NEXT81]], 100 +; CHECK-NEXT: br i1 [[TMP58]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK: vec.epilog.middle.block: +; CHECK-NEXT: [[TMP59:%.*]] = extractelement <4 x i32> [[PREDPHI80]], i32 3 +; CHECK-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] +; CHECK: vec.epilog.scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ 0, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 4, [[VEC_EPILOG_ITER_CHECK]] ], [ 100, [[ITER_CHECK:%.*]] ] +; CHECK-NEXT: br label [[LOOP_HEADER1:%.*]] ; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 100, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] ; CHECK-NEXT: br i1 [[CMP_I]], label [[THEN:%.*]], label [[ELSE:%.*]] ; CHECK: then: -; CHECK-NEXT: [[P_1:%.*]] = phi i32 [ [[REM_I:%.*]], [[ELSE]] ], [ 0, [[LOOP_HEADER]] ] +; CHECK-NEXT: [[P_1:%.*]] = phi i32 [ [[REM_I:%.*]], [[ELSE]] ], [ 0, [[LOOP_HEADER1]] ] ; CHECK-NEXT: [[SHR_I:%.*]] = ashr i32 [[CONV5_I]], [[P_1]] ; CHECK-NEXT: [[TOBOOL6_NOT_I:%.*]] = icmp eq i32 [[SHR_I]], 0 ; CHECK-NEXT: [[SEXT_I:%.*]] = shl i32 [[P_1]], 24 @@ -710,9 +950,9 @@ define i32 @cost_ashr_with_op_known_invariant_via_scev(i8 %a) { ; CHECK-NEXT: [[P_2:%.*]] = phi i32 [ 0, [[ELSE]] ], [ [[TMP1]], [[THEN]] ] ; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], -1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i8 [[IV_NEXT]], 0 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT:%.*]], label [[LOOP_HEADER]] +; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER1]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: exit: -; CHECK-NEXT: [[P_2_LCSSA:%.*]] = phi i32 [ [[P_2]], [[LOOP_LATCH]] ] +; CHECK-NEXT: [[P_2_LCSSA:%.*]] = phi i32 [ [[P_2]], [[LOOP_LATCH]] ], [ [[TMP43]], [[MIDDLE_BLOCK]] ], [ [[TMP59]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[P_2_LCSSA]] ; entry: @@ -838,7 +1078,7 @@ define void @sdiv_by_zero(ptr noalias %src, ptr noalias %dst, i32 %d) #2 { ; CHECK-NEXT: store <8 x i32> [[PREDPHI]], ptr [[TMP42]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 -; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -858,7 +1098,7 @@ define void @sdiv_by_zero(ptr noalias %src, ptr noalias %dst, i32 %d) #2 { ; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_DST]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp ult i64 [[IV]], 16 -; CHECK-NEXT: br i1 [[EC]], label [[LOOP_HEADER]], label [[EXIT:%.*]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label [[LOOP_HEADER]], label [[EXIT:%.*]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -1197,12 +1437,12 @@ define i64 @test_predicated_udiv(i32 %d, i1 %c) #2 { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 32 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <32 x i32> [[VEC_IND]], splat (i32 32) ; CHECK-NEXT: [[TMP163:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992 -; CHECK-NEXT: br i1 [[TMP163]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP163]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP164:%.*]] = extractelement <32 x i64> [[PREDPHI]], i32 31 ; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF13:![0-9]+]] +; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF17:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 992, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT63:%.*]] = insertelement <8 x i1> poison, i1 [[C]], i64 0 @@ -1293,7 +1533,7 @@ define i64 @test_predicated_udiv(i32 %d, i1 %c) #2 { ; CHECK-NEXT: [[INDEX_NEXT86]] = add nuw i32 [[INDEX67]], 8 ; CHECK-NEXT: [[VEC_IND_NEXT87]] = add <8 x i32> [[VEC_IND68]], splat (i32 8) ; CHECK-NEXT: [[TMP208:%.*]] = icmp eq i32 [[INDEX_NEXT86]], 1000 -; CHECK-NEXT: br i1 [[TMP208]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP208]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[TMP209:%.*]] = extractelement <8 x i64> [[PREDPHI85]], i32 7 ; CHECK-NEXT: br i1 false, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -1312,7 +1552,7 @@ define i64 @test_predicated_udiv(i32 %d, i1 %c) #2 { ; CHECK-NEXT: [[MERGE:%.*]] = phi i64 [ [[ZEXT]], [[THEN]] ], [ 0, [[LOOP_HEADER]] ] ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: [[MERGE_LCSSA:%.*]] = phi i64 [ [[MERGE]], [[LOOP_LATCH]] ], [ [[TMP164]], [[MIDDLE_BLOCK]] ], [ [[TMP209]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i64 [[MERGE_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll index b3c45a565a8fe..801f910c5e13d 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll @@ -376,16 +376,16 @@ define void @multi_exit(ptr %dst, ptr %src.1, ptr %src.2, i64 %A, i64 %B) #0 { ; CHECK-NEXT: [[TMP20:%.*]] = select i1 [[TMP19]], i64 4, i64 [[N_MOD_VF]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP20]] ; CHECK-NEXT: [[TMP21:%.*]] = load i64, ptr [[SRC_2]], align 8, !alias.scope [[META6:![0-9]+]] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP21]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP22:%.*]] = trunc i64 [[N_VEC]] to i32 -; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <2 x i64> [[BROADCAST_SPLAT]], zeroinitializer +; CHECK-NEXT: [[TMP31:%.*]] = icmp ne i64 [[TMP21]], 0 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[TMP31]], i64 0 +; CHECK-NEXT: [[TMP23:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i64 [[INDEX]] to i32 ; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i64, ptr [[SRC_3]], i32 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i64, ptr [[TMP24]], i32 2 +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i64, ptr [[TMP24]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP25]], align 8, !alias.scope [[META9:![0-9]+]] ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq <2 x i64> [[WIDE_LOAD]], zeroinitializer ; CHECK-NEXT: [[TMP27:%.*]] = and <2 x i1> [[TMP23]], [[TMP26]] @@ -739,10 +739,10 @@ define i64 @cost_loop_invariant_recipes(i1 %x, i64 %y) { ; CHECK: vector.ph: ; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x i64> poison, i64 [[Y:%.*]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT1]], <2 x i64> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[X:%.*]], i64 0 +; CHECK-NEXT: [[X:%.*]] = xor i1 [[X1:%.*]], true +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[X]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[TMP0:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true) -; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i1> [[TMP0]] to <2 x i64> +; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i1> [[BROADCAST_SPLAT]] to <2 x i64> ; CHECK-NEXT: [[TMP2:%.*]] = shl <2 x i64> [[BROADCAST_SPLAT2]], [[TMP1]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: diff --git a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll index 3165422dcc539..d19ae728cc913 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll @@ -23,8 +23,8 @@ define i1 @fn(ptr %nno) #0 { ; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i64> [[VEC_IND]], splat (i64 1) ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw i32, ptr [[NNO]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[TMP23]], i32 0 -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[TMP5]], i32 -3 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[TMP23]], i64 0 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[TMP4]], i64 -3 ; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i1> [[TMP1]], <4 x i1> poison, <4 x i32> ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP6]], <4 x i1> [[REVERSE]], <4 x i32> poison) ; CHECK-NEXT: [[REVERSE1:%.*]] = shufflevector <4 x i32> [[WIDE_MASKED_LOAD]], <4 x i32> poison, <4 x i32> diff --git a/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll b/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll index 6e940ee58fabe..a1b92e0658bd3 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll @@ -161,9 +161,9 @@ define void @test_induction_step_needs_expansion(ptr noalias %j, ptr %k, i64 %l, ; CHECK-NEXT: [[TMP6:%.*]] = sub <16 x i16> [[STEP_ADD_2]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP7:%.*]] = sub <16 x i16> [[STEP_ADD_3]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[K:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i32 16 -; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i32 32 -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i32 48 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i64 16 +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i64 32 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i64 48 ; CHECK-NEXT: store <16 x i16> [[TMP4]], ptr [[TMP8]], align 2 ; CHECK-NEXT: store <16 x i16> [[TMP5]], ptr [[TMP10]], align 2 ; CHECK-NEXT: store <16 x i16> [[TMP6]], ptr [[TMP21]], align 2 diff --git a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll index 12b8d1e15b523..84579d97b38e2 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll @@ -26,7 +26,7 @@ define void @firstorderrec(ptr nocapture noundef readonly %x, ptr noalias nocapt ; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <16 x i8> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 16 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-NEXT: [[WIDE_LOAD1]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <16 x i8> [[VECTOR_RECUR]], <16 x i8> [[WIDE_LOAD]], <16 x i32> @@ -34,7 +34,7 @@ define void @firstorderrec(ptr nocapture noundef readonly %x, ptr noalias nocapt ; CHECK-NEXT: [[TMP9:%.*]] = add <16 x i8> [[WIDE_LOAD]], [[TMP7]] ; CHECK-NEXT: [[TMP10:%.*]] = add <16 x i8> [[WIDE_LOAD1]], [[TMP8]] ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[Y:%.*]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 16 +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 16 ; CHECK-NEXT: store <16 x i8> [[TMP9]], ptr [[TMP11]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP10]], ptr [[TMP14]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 @@ -119,7 +119,7 @@ define void @thirdorderrec(ptr nocapture noundef readonly %x, ptr noalias nocapt ; CHECK-NEXT: [[VECTOR_RECUR4:%.*]] = phi <16 x i8> [ [[VECTOR_RECUR_INIT3]], [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX]] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 16 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-NEXT: [[WIDE_LOAD5]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <16 x i8> [[VECTOR_RECUR]], <16 x i8> [[WIDE_LOAD]], <16 x i32> @@ -135,7 +135,7 @@ define void @thirdorderrec(ptr nocapture noundef readonly %x, ptr noalias nocapt ; CHECK-NEXT: [[TMP17:%.*]] = add <16 x i8> [[TMP15]], [[WIDE_LOAD]] ; CHECK-NEXT: [[TMP18:%.*]] = add <16 x i8> [[TMP16]], [[WIDE_LOAD5]] ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[Y:%.*]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP19]], i32 16 +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP19]], i64 16 ; CHECK-NEXT: store <16 x i8> [[TMP17]], ptr [[TMP19]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP18]], ptr [[TMP22]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 diff --git a/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll b/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll index 39217e51ab117..41249c595f9eb 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll @@ -41,9 +41,9 @@ define void @fp_iv_loop1(ptr noalias nocapture %A, i32 %N) #0 { ; AUTO_VEC-NEXT: [[STEP_ADD2:%.*]] = fadd fast <8 x float> [[STEP_ADD]], splat (float 4.000000e+00) ; AUTO_VEC-NEXT: [[STEP_ADD3:%.*]] = fadd fast <8 x float> [[STEP_ADD2]], splat (float 4.000000e+00) ; AUTO_VEC-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 8 -; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 16 -; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 24 +; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 8 +; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 16 +; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 24 ; AUTO_VEC-NEXT: store <8 x float> [[VEC_IND]], ptr [[TMP1]], align 4 ; AUTO_VEC-NEXT: store <8 x float> [[STEP_ADD]], ptr [[TMP2]], align 4 ; AUTO_VEC-NEXT: store <8 x float> [[STEP_ADD2]], ptr [[TMP3]], align 4 @@ -208,9 +208,9 @@ define double @external_use_with_fast_math(ptr %a, i64 %n) { ; AUTO_VEC-NEXT: [[STEP_ADD_2:%.*]] = fadd fast <4 x double> [[STEP_ADD]], splat (double 1.200000e+01) ; AUTO_VEC-NEXT: [[STEP_ADD_3:%.*]] = fadd fast <4 x double> [[STEP_ADD_2]], splat (double 1.200000e+01) ; AUTO_VEC-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[A]], i64 [[INDEX]] -; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[TMP1]], i32 4 -; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr double, ptr [[TMP1]], i32 8 -; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr double, ptr [[TMP1]], i32 12 +; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[TMP1]], i64 4 +; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr double, ptr [[TMP1]], i64 8 +; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr double, ptr [[TMP1]], i64 12 ; AUTO_VEC-NEXT: store <4 x double> [[VEC_IND]], ptr [[TMP1]], align 8 ; AUTO_VEC-NEXT: store <4 x double> [[STEP_ADD]], ptr [[TMP2]], align 8 ; AUTO_VEC-NEXT: store <4 x double> [[STEP_ADD_2]], ptr [[TMP3]], align 8 @@ -326,9 +326,9 @@ define void @fadd_reassoc_FMF(ptr nocapture %p, i32 %N) { ; AUTO_VEC-NEXT: [[STEP_ADD2:%.*]] = fadd reassoc <8 x float> [[STEP_ADD]], splat (float 3.360000e+02) ; AUTO_VEC-NEXT: [[STEP_ADD3:%.*]] = fadd reassoc <8 x float> [[STEP_ADD2]], splat (float 3.360000e+02) ; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[P]], i64 [[INDEX]] -; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 -; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 16 -; AUTO_VEC-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 24 +; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 8 +; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 16 +; AUTO_VEC-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 24 ; AUTO_VEC-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; AUTO_VEC-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP3]], align 4 ; AUTO_VEC-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x float>, ptr [[TMP4]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/fminimumnum.ll b/llvm/test/Transforms/LoopVectorize/X86/fminimumnum.ll index a0637ceb53cf2..137c09b653f2c 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/fminimumnum.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/fminimumnum.ll @@ -22,17 +22,17 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i32 4 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP7]], align 4 ; CHECK-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> [[WIDE_LOAD5]], <4 x float> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i32 4 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i64 4 ; CHECK-NEXT: store <4 x float> [[TMP8]], ptr [[TMP10]], align 4 ; CHECK-NEXT: store <4 x float> [[TMP9]], ptr [[TMP12]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -99,17 +99,17 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i32 4 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP7]], align 4 ; CHECK-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> [[WIDE_LOAD5]], <4 x float> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i32 4 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i64 4 ; CHECK-NEXT: store <4 x float> [[TMP8]], ptr [[TMP10]], align 4 ; CHECK-NEXT: store <4 x float> [[TMP9]], ptr [[TMP12]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -176,17 +176,17 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i32 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP2]], align 8 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x double>, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x double>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <2 x double>, ptr [[TMP7]], align 8 ; CHECK-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> [[WIDE_LOAD]], <2 x double> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP9:%.*]] = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> [[WIDE_LOAD5]], <2 x double> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i32 2 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i64 2 ; CHECK-NEXT: store <2 x double> [[TMP8]], ptr [[TMP10]], align 8 ; CHECK-NEXT: store <2 x double> [[TMP9]], ptr [[TMP12]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -253,17 +253,17 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i32 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP2]], align 8 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x double>, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x double>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <2 x double>, ptr [[TMP7]], align 8 ; CHECK-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> [[WIDE_LOAD]], <2 x double> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP9:%.*]] = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> [[WIDE_LOAD5]], <2 x double> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i32 2 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i64 2 ; CHECK-NEXT: store <2 x double> [[TMP8]], ptr [[TMP10]], align 8 ; CHECK-NEXT: store <2 x double> [[TMP9]], ptr [[TMP12]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll b/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll index 877fcd4d638eb..34a99b07ee93e 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll @@ -75,7 +75,7 @@ define double @sumIfVector(ptr nocapture readonly %arr) { ; SSE-NEXT: [[VEC_PHI:%.*]] = phi <2 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[VECTOR_BODY]] ] ; SSE-NEXT: [[VEC_PHI1:%.*]] = phi <2 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI3:%.*]], [[VECTOR_BODY]] ] ; SSE-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[ARR:%.*]], i32 [[INDEX]] -; SSE-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[TMP2]], i32 2 +; SSE-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[TMP2]], i64 2 ; SSE-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP2]], align 8 ; SSE-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x double>, ptr [[TMP5]], align 8 ; SSE-NEXT: [[TMP6:%.*]] = fcmp fast une <2 x double> [[WIDE_LOAD]], splat (double 4.200000e+01) @@ -106,9 +106,9 @@ define double @sumIfVector(ptr nocapture readonly %arr) { ; AVX-NEXT: [[VEC_PHI2:%.*]] = phi <4 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI8:%.*]], [[VECTOR_BODY]] ] ; AVX-NEXT: [[VEC_PHI3:%.*]] = phi <4 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI9:%.*]], [[VECTOR_BODY]] ] ; AVX-NEXT: [[TMP4:%.*]] = getelementptr double, ptr [[ARR:%.*]], i32 [[INDEX]] -; AVX-NEXT: [[TMP9:%.*]] = getelementptr double, ptr [[TMP4]], i32 4 -; AVX-NEXT: [[TMP10:%.*]] = getelementptr double, ptr [[TMP4]], i32 8 -; AVX-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP4]], i32 12 +; AVX-NEXT: [[TMP9:%.*]] = getelementptr double, ptr [[TMP4]], i64 4 +; AVX-NEXT: [[TMP10:%.*]] = getelementptr double, ptr [[TMP4]], i64 8 +; AVX-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP4]], i64 12 ; AVX-NEXT: [[WIDE_LOAD:%.*]] = load <4 x double>, ptr [[TMP4]], align 8 ; AVX-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x double>, ptr [[TMP9]], align 8 ; AVX-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x double>, ptr [[TMP10]], align 8 diff --git a/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll index 4028dd87e34b3..04bff3c393f62 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll @@ -125,13 +125,13 @@ define void @multiple_truncated_ivs_with_wide_uses(i1 %c, ptr %A, ptr %B) { ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[C]], <4 x i16> [[VEC_IND]], <4 x i16> splat (i16 10) ; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[C]], <4 x i16> [[STEP_ADD]], <4 x i16> splat (i16 10) ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i16, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i16, ptr [[TMP4]], i32 4 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[TMP4]], i64 4 ; CHECK-NEXT: store <4 x i16> [[TMP1]], ptr [[TMP4]], align 2, !alias.scope [[META6:![0-9]+]], !noalias [[META9:![0-9]+]] -; CHECK-NEXT: store <4 x i16> [[TMP2]], ptr [[TMP7]], align 2, !alias.scope [[META6]], !noalias [[META9]] +; CHECK-NEXT: store <4 x i16> [[TMP2]], ptr [[TMP3]], align 2, !alias.scope [[META6]], !noalias [[META9]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP8]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[TMP8]], i64 4 ; CHECK-NEXT: store <4 x i32> [[VEC_IND3]], ptr [[TMP8]], align 4, !alias.scope [[META9]] -; CHECK-NEXT: store <4 x i32> [[STEP_ADD4]], ptr [[TMP11]], align 4, !alias.scope [[META9]] +; CHECK-NEXT: store <4 x i32> [[STEP_ADD4]], ptr [[TMP5]], align 4, !alias.scope [[META9]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[STEP_ADD]], splat (i16 4) ; CHECK-NEXT: [[VEC_IND_NEXT6]] = add <4 x i32> [[STEP_ADD4]], splat (i32 4) @@ -192,7 +192,7 @@ define void @truncated_ivs_with_wide_and_scalar_uses(i1 %c, ptr %dst) { ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[DST]], i32 [[TMP0]] ; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[C]], <8 x i16> [[VEC_IND]], <8 x i16> splat (i16 10) ; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[C]], <8 x i16> [[STEP_ADD]], <8 x i16> splat (i16 10) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i16, ptr [[TMP3]], i32 8 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i16, ptr [[TMP3]], i64 8 ; CHECK-NEXT: store <8 x i16> [[TMP5]], ptr [[TMP3]], align 2 ; CHECK-NEXT: store <8 x i16> [[TMP6]], ptr [[TMP8]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 @@ -477,9 +477,9 @@ define i32 @test_scalar_predicated_cost(i64 %x, i64 %y, ptr %A) #0 { ; CHECK-NEXT: [[TMP21:%.*]] = trunc <8 x i64> [[TMP13]] to <8 x i32> ; CHECK-NEXT: [[TMP22:%.*]] = trunc <8 x i64> [[TMP14]] to <8 x i32> ; CHECK-NEXT: [[TMP23:%.*]] = trunc <8 x i64> [[TMP15]] to <8 x i32> -; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP16]], i32 8 -; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i32, ptr [[TMP16]], i32 16 -; CHECK-NEXT: [[TMP27:%.*]] = getelementptr i32, ptr [[TMP16]], i32 24 +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP16]], i64 8 +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i32, ptr [[TMP16]], i64 16 +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr i32, ptr [[TMP16]], i64 24 ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP20]], ptr align 4 [[TMP16]], <8 x i1> [[TMP8]]) ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP21]], ptr align 4 [[TMP25]], <8 x i1> [[TMP9]]) ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP22]], ptr align 4 [[TMP26]], <8 x i1> [[TMP10]]) diff --git a/llvm/test/Transforms/LoopVectorize/X86/induction-step.ll b/llvm/test/Transforms/LoopVectorize/X86/induction-step.ll index 61f07eff768c1..d25d9f81de985 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/induction-step.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/induction-step.ll @@ -27,7 +27,7 @@ define i16 @wide_add_induction_step_live_in(ptr %dst, i64 %N, i16 %off) { ; CHECK-NEXT: [[TMP4:%.*]] = add <4 x i16> [[VEC_IND]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP9:%.*]] = add <4 x i16> [[STEP_ADD]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, ptr [[DST:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[TMP5]], i32 4 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[TMP5]], i64 4 ; CHECK-NEXT: store <4 x i16> [[TMP4]], ptr [[TMP5]], align 2 ; CHECK-NEXT: store <4 x i16> [[TMP9]], ptr [[TMP8]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -99,7 +99,7 @@ define i16 @wide_sub_induction_step_live_in(ptr %dst, i64 %N, i16 %off) { ; CHECK-NEXT: [[TMP5:%.*]] = sub <4 x i16> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP10:%.*]] = sub <4 x i16> [[STEP_ADD]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[DST:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i32 4 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i64 4 ; CHECK-NEXT: store <4 x i16> [[TMP5]], ptr [[TMP6]], align 2 ; CHECK-NEXT: store <4 x i16> [[TMP10]], ptr [[TMP9]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll b/llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll index d75fd0e0023f7..ad6dfb054b726 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll @@ -32,9 +32,9 @@ define void @uaddsat(ptr nocapture readonly %pSrc, i16 signext %offset, ptr noca ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC:%.*]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[OFFSET_IDX2:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[PDST:%.*]], i64 [[OFFSET_IDX2]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 16 -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 32 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 48 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i64 16 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i64 32 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i64 48 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i16>, ptr [[NEXT_GEP]], align 2 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i16>, ptr [[TMP1]], align 2 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i16>, ptr [[TMP2]], align 2 @@ -43,9 +43,9 @@ define void @uaddsat(ptr nocapture readonly %pSrc, i16 signext %offset, ptr noca ; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> [[WIDE_LOAD4]], <16 x i16> [[BROADCAST_SPLAT]]) ; CHECK-NEXT: [[TMP6:%.*]] = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> [[WIDE_LOAD5]], <16 x i16> [[BROADCAST_SPLAT]]) ; CHECK-NEXT: [[TMP7:%.*]] = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> [[WIDE_LOAD6]], <16 x i16> [[BROADCAST_SPLAT]]) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 16 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 32 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 48 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i64 16 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i64 32 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i64 48 ; CHECK-NEXT: store <16 x i16> [[TMP4]], ptr [[NEXT_GEP3]], align 2 ; CHECK-NEXT: store <16 x i16> [[TMP5]], ptr [[TMP8]], align 2 ; CHECK-NEXT: store <16 x i16> [[TMP6]], ptr [[TMP9]], align 2 @@ -160,9 +160,9 @@ define void @fshl(ptr nocapture readonly %pSrc, i8 signext %offset, ptr nocaptur ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC:%.*]], i64 [[INDEX]] ; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[PDST:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 32 -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 64 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 96 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 32 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 64 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 96 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <32 x i8>, ptr [[NEXT_GEP]], align 2 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <32 x i8>, ptr [[TMP1]], align 2 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <32 x i8>, ptr [[TMP2]], align 2 @@ -171,9 +171,9 @@ define void @fshl(ptr nocapture readonly %pSrc, i8 signext %offset, ptr nocaptur ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[WIDE_LOAD3]], <32 x i8> [[WIDE_LOAD3]], <32 x i8> [[BROADCAST_SPLAT]]) ; CHECK-NEXT: [[TMP6:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[WIDE_LOAD4]], <32 x i8> [[WIDE_LOAD4]], <32 x i8> [[BROADCAST_SPLAT]]) ; CHECK-NEXT: [[TMP7:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[WIDE_LOAD5]], <32 x i8> [[WIDE_LOAD5]], <32 x i8> [[BROADCAST_SPLAT]]) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i32 32 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i32 64 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i32 96 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i64 32 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i64 64 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i64 96 ; CHECK-NEXT: store <32 x i8> [[TMP4]], ptr [[NEXT_GEP2]], align 2 ; CHECK-NEXT: store <32 x i8> [[TMP5]], ptr [[TMP8]], align 2 ; CHECK-NEXT: store <32 x i8> [[TMP6]], ptr [[TMP9]], align 2 diff --git a/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll b/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll index b710236c026d2..751e885733f17 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll @@ -38,9 +38,9 @@ define i32 @inv_val_store_to_inv_address_with_reduction(ptr %a, i64 %n, ptr %b) ; CHECK-NEXT: [[VEC_PHI5:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI6:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 16 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 32 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 48 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 16 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 32 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 48 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i32>, ptr [[TMP1]], align 8, !alias.scope [[META0:![0-9]+]] ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <16 x i32>, ptr [[TMP2]], align 8, !alias.scope [[META0]] ; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i32>, ptr [[TMP3]], align 8, !alias.scope [[META0]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/iv-live-outs.ll b/llvm/test/Transforms/LoopVectorize/X86/iv-live-outs.ll index bcb6b5c422343..a247285317a1e 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/iv-live-outs.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/iv-live-outs.ll @@ -18,9 +18,9 @@ define i64 @test_pr98660(ptr %dst, i64 %N) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 1 ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP5]] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP9]], i32 8 -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP9]], i32 16 -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP9]], i32 24 +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP9]], i64 8 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP9]], i64 16 +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP9]], i64 24 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP9]], align 4 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i32>, ptr [[TMP14]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i32>, ptr [[TMP15]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll b/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll index 6e3b2a5390948..ea3ec99cf46e1 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll @@ -193,17 +193,17 @@ define void @test_tc_20(ptr noalias %src, ptr noalias %dst) { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 4 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 8 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 12 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 4 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 8 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP1]], align 64 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 64 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 64 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i8>, ptr [[TMP5]], align 64 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 4 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 8 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 12 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 4 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 8 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 12 ; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD]], ptr [[TMP6]], align 64 ; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD1]], ptr [[TMP8]], align 64 ; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD2]], ptr [[TMP9]], align 64 diff --git a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll index 6605338771c47..78363e13595cb 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll @@ -39,9 +39,9 @@ define i32 @test_explicit_pred(i64 %len) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp slt <4 x i64> [[STEP_ADD1]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP7:%.*]] = icmp slt <4 x i64> [[STEP_ADD2]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[ALLOCA]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP8]], i32 4 -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP8]], i32 8 -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP8]], i32 12 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP8]], i64 4 +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP8]], i64 8 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP8]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP8]], align 4 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 ; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP14]], align 4 @@ -171,9 +171,9 @@ define i32 @test_explicit_pred_generic(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 ; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i32 4 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i32 8 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i32 12 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i64 4 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i64 8 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP64]], align 4 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i32>, ptr [[TMP69]], align 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP70]], align 4 @@ -718,9 +718,9 @@ define i32 @test_max_trip_count(i64 %len, ptr %test_base, i64 %n) { ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 2 ; CHECK-NEXT: [[TMP64:%.*]] = insertelement <4 x i1> [[TMP63]], i1 [[TMP60]], i32 3 ; CHECK-NEXT: [[TMP65:%.*]] = getelementptr i32, ptr [[ALLOCA]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP65]], i32 4 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP65]], i32 8 -; CHECK-NEXT: [[TMP72:%.*]] = getelementptr i32, ptr [[TMP65]], i32 12 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP65]], i64 4 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP65]], i64 8 +; CHECK-NEXT: [[TMP72:%.*]] = getelementptr i32, ptr [[TMP65]], i64 12 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP65]], <4 x i1> [[TMP40]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP70]], <4 x i1> [[TMP48]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP71]], <4 x i1> [[TMP56]], <4 x i32> poison) @@ -877,9 +877,9 @@ define i32 @test_non_zero_start(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 ; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i32 4 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i32 8 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i32 12 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i64 4 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i64 8 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP64]], align 4 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i32>, ptr [[TMP69]], align 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP70]], align 4 @@ -1231,9 +1231,9 @@ define i32 @neg_off_by_many(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 ; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i32 4 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i32 8 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i32 12 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i64 4 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i64 8 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i64 12 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP64]], <4 x i1> [[TMP39]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP69]], <4 x i1> [[TMP47]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP70]], <4 x i1> [[TMP55]], <4 x i32> poison) @@ -1362,9 +1362,9 @@ define i32 @neg_off_by_one_iteration(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 ; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i32 4 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i32 8 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i32 12 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i64 4 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i64 8 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i64 12 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP64]], <4 x i1> [[TMP39]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP69]], <4 x i1> [[TMP47]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP70]], <4 x i1> [[TMP55]], <4 x i32> poison) @@ -1493,9 +1493,9 @@ define i32 @neg_off_by_one_byte(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 ; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i32 4 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i32 8 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i32 12 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i64 4 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i64 8 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i64 12 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP64]], <4 x i1> [[TMP39]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP69]], <4 x i1> [[TMP47]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP70]], <4 x i1> [[TMP55]], <4 x i32> poison) @@ -1633,9 +1633,9 @@ define i32 @test_constant_max(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 2 ; CHECK-NEXT: [[TMP64:%.*]] = insertelement <4 x i1> [[TMP63]], i1 [[TMP60]], i32 3 ; CHECK-NEXT: [[TMP65:%.*]] = getelementptr i32, ptr [[ALLOCA]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP65]], i32 4 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP65]], i32 8 -; CHECK-NEXT: [[TMP72:%.*]] = getelementptr i32, ptr [[TMP65]], i32 12 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP65]], i64 4 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP65]], i64 8 +; CHECK-NEXT: [[TMP72:%.*]] = getelementptr i32, ptr [[TMP65]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP65]], align 4 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i32>, ptr [[TMP70]], align 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP71]], align 4 @@ -1793,9 +1793,9 @@ define i32 @test_allocsize(i64 %len, ptr %test_base) nofree nosync { ; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 ; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[ALLOCATION]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i32 4 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i32 8 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i32 12 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i64 4 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i64 8 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i64 12 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP64]], <4 x i1> [[TMP39]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP69]], <4 x i1> [[TMP47]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP70]], <4 x i1> [[TMP55]], <4 x i32> poison) @@ -1925,9 +1925,9 @@ define i32 @test_allocsize_array(i64 %len, ptr %test_base) nofree nosync { ; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 ; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[ALLOCATION]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i32 4 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i32 8 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i32 12 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i64 4 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i64 8 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i64 12 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP64]], <4 x i1> [[TMP39]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP69]], <4 x i1> [[TMP47]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP70]], <4 x i1> [[TMP55]], <4 x i32> poison) @@ -2067,9 +2067,9 @@ define i32 @test_allocsize_cond_deref(i1 %allzero, ptr %test_base) { ; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 ; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[ALLOCATION]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i32 4 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i32 8 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i32 12 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i64 4 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i64 8 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i64 12 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP64]], <4 x i1> [[TMP39]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP69]], <4 x i1> [[TMP47]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP70]], <4 x i1> [[TMP55]], <4 x i32> poison) diff --git a/llvm/test/Transforms/LoopVectorize/X86/masked-store-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/masked-store-cost.ll index 2c172b2aecd16..1d0906902ad62 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/masked-store-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/masked-store-cost.ll @@ -35,9 +35,9 @@ define i32 @test_scalar_predicated_cost(i64 %x, i64 %y, ptr %A) #0 { ; CHECK-NEXT: [[TMP21:%.*]] = trunc <8 x i64> [[TMP13]] to <8 x i32> ; CHECK-NEXT: [[TMP22:%.*]] = trunc <8 x i64> [[TMP14]] to <8 x i32> ; CHECK-NEXT: [[TMP23:%.*]] = trunc <8 x i64> [[TMP15]] to <8 x i32> -; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP16]], i32 8 -; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i32, ptr [[TMP16]], i32 16 -; CHECK-NEXT: [[TMP27:%.*]] = getelementptr i32, ptr [[TMP16]], i32 24 +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP16]], i64 8 +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i32, ptr [[TMP16]], i64 16 +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr i32, ptr [[TMP16]], i64 24 ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP20]], ptr align 4 [[TMP16]], <8 x i1> [[TMP8]]) ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP21]], ptr align 4 [[TMP25]], <8 x i1> [[TMP9]]) ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP22]], ptr align 4 [[TMP26]], <8 x i1> [[TMP10]]) @@ -199,7 +199,7 @@ define void @test_scalar_cost_single_store_loop_varying_cond(ptr %dst, ptr noali ; CHECK-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <16 x i32> [[WIDE_VEC4]], <16 x i32> poison, <4 x i32> ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq <4 x i32> [[STRIDED_VEC]], splat (i32 123) ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <4 x i32> [[STRIDED_VEC5]], splat (i32 123) -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 4 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 4 ; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> zeroinitializer, ptr align 4 [[NEXT_GEP]], <4 x i1> [[TMP8]]) ; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> zeroinitializer, ptr align 4 [[TMP11]], <4 x i1> [[TMP9]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll b/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll index 932153a23bdbd..e4977ee642b09 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll @@ -72,9 +72,9 @@ define void @foo1(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX2: [[VECTOR_BODY]]: ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 8 -; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 16 -; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 24 +; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 8 +; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 16 +; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 24 ; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4 ; AVX2-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, ptr [[TMP5]], align 4 ; AVX2-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP6]], align 4 @@ -84,9 +84,9 @@ define void @foo1(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX2-NEXT: [[TMP10:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX2-NEXT: [[TMP11:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX2-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[B]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP12]], i32 8 -; AVX2-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP12]], i32 16 -; AVX2-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP12]], i32 24 +; AVX2-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP12]], i64 8 +; AVX2-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP12]], i64 16 +; AVX2-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP12]], i64 24 ; AVX2-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr align 4 [[TMP12]], <8 x i1> [[TMP8]], <8 x i32> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD8:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr align 4 [[TMP14]], <8 x i1> [[TMP9]], <8 x i32> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr align 4 [[TMP15]], <8 x i1> [[TMP10]], <8 x i32> poison) @@ -96,9 +96,9 @@ define void @foo1(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX2-NEXT: [[TMP19:%.*]] = add nsw <8 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_LOAD6]] ; AVX2-NEXT: [[TMP20:%.*]] = add nsw <8 x i32> [[WIDE_MASKED_LOAD10]], [[WIDE_LOAD7]] ; AVX2-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr [[TMP21]], i32 8 -; AVX2-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[TMP21]], i32 16 -; AVX2-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP21]], i32 24 +; AVX2-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr [[TMP21]], i64 8 +; AVX2-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[TMP21]], i64 16 +; AVX2-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP21]], i64 24 ; AVX2-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP17]], ptr align 4 [[TMP21]], <8 x i1> [[TMP8]]) ; AVX2-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP18]], ptr align 4 [[TMP23]], <8 x i1> [[TMP9]]) ; AVX2-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP19]], ptr align 4 [[TMP24]], <8 x i1> [[TMP10]]) @@ -151,9 +151,9 @@ define void @foo1(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX512: [[VECTOR_BODY]]: ; AVX512-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 16 -; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 32 -; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 48 +; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 16 +; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 32 +; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 48 ; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i32>, ptr [[TMP3]], align 4 ; AVX512-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i32>, ptr [[TMP5]], align 4 ; AVX512-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i32>, ptr [[TMP6]], align 4 @@ -163,9 +163,9 @@ define void @foo1(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX512-NEXT: [[TMP10:%.*]] = icmp slt <16 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX512-NEXT: [[TMP11:%.*]] = icmp slt <16 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX512-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[B]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP12]], i32 16 -; AVX512-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP12]], i32 32 -; AVX512-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP12]], i32 48 +; AVX512-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP12]], i64 16 +; AVX512-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP12]], i64 32 +; AVX512-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP12]], i64 48 ; AVX512-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i32> @llvm.masked.load.v16i32.p0(ptr align 4 [[TMP12]], <16 x i1> [[TMP8]], <16 x i32> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD8:%.*]] = call <16 x i32> @llvm.masked.load.v16i32.p0(ptr align 4 [[TMP14]], <16 x i1> [[TMP9]], <16 x i32> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <16 x i32> @llvm.masked.load.v16i32.p0(ptr align 4 [[TMP15]], <16 x i1> [[TMP10]], <16 x i32> poison) @@ -175,9 +175,9 @@ define void @foo1(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX512-NEXT: [[TMP19:%.*]] = add nsw <16 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_LOAD6]] ; AVX512-NEXT: [[TMP20:%.*]] = add nsw <16 x i32> [[WIDE_MASKED_LOAD10]], [[WIDE_LOAD7]] ; AVX512-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr [[TMP21]], i32 16 -; AVX512-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[TMP21]], i32 32 -; AVX512-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP21]], i32 48 +; AVX512-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr [[TMP21]], i64 16 +; AVX512-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[TMP21]], i64 32 +; AVX512-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP21]], i64 48 ; AVX512-NEXT: call void @llvm.masked.store.v16i32.p0(<16 x i32> [[TMP17]], ptr align 4 [[TMP21]], <16 x i1> [[TMP8]]) ; AVX512-NEXT: call void @llvm.masked.store.v16i32.p0(<16 x i32> [[TMP18]], ptr align 4 [[TMP23]], <16 x i1> [[TMP9]]) ; AVX512-NEXT: call void @llvm.masked.store.v16i32.p0(<16 x i32> [[TMP19]], ptr align 4 [[TMP24]], <16 x i1> [[TMP10]]) @@ -293,9 +293,9 @@ define void @foo1_addrspace1(ptr addrspace(1) nocapture %A, ptr addrspace(1) noc ; AVX2: [[VECTOR_BODY]]: ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TRIGGER]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i32 8 -; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i32 16 -; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i32 24 +; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i64 8 +; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i64 16 +; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i64 24 ; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr addrspace(1) [[TMP3]], align 4 ; AVX2-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, ptr addrspace(1) [[TMP5]], align 4 ; AVX2-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr addrspace(1) [[TMP6]], align 4 @@ -305,9 +305,9 @@ define void @foo1_addrspace1(ptr addrspace(1) nocapture %A, ptr addrspace(1) noc ; AVX2-NEXT: [[TMP10:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX2-NEXT: [[TMP11:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX2-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr addrspace(1) [[B]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i32 8 -; AVX2-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i32 16 -; AVX2-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i32 24 +; AVX2-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i64 8 +; AVX2-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i64 16 +; AVX2-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i64 24 ; AVX2-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) align 4 [[TMP12]], <8 x i1> [[TMP8]], <8 x i32> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD8:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) align 4 [[TMP14]], <8 x i1> [[TMP9]], <8 x i32> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) align 4 [[TMP15]], <8 x i1> [[TMP10]], <8 x i32> poison) @@ -317,9 +317,9 @@ define void @foo1_addrspace1(ptr addrspace(1) nocapture %A, ptr addrspace(1) noc ; AVX2-NEXT: [[TMP19:%.*]] = add nsw <8 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_LOAD6]] ; AVX2-NEXT: [[TMP20:%.*]] = add nsw <8 x i32> [[WIDE_MASKED_LOAD10]], [[WIDE_LOAD7]] ; AVX2-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr addrspace(1) [[A]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i32 8 -; AVX2-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i32 16 -; AVX2-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i32 24 +; AVX2-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i64 8 +; AVX2-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i64 16 +; AVX2-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i64 24 ; AVX2-NEXT: call void @llvm.masked.store.v8i32.p1(<8 x i32> [[TMP17]], ptr addrspace(1) align 4 [[TMP21]], <8 x i1> [[TMP8]]) ; AVX2-NEXT: call void @llvm.masked.store.v8i32.p1(<8 x i32> [[TMP18]], ptr addrspace(1) align 4 [[TMP23]], <8 x i1> [[TMP9]]) ; AVX2-NEXT: call void @llvm.masked.store.v8i32.p1(<8 x i32> [[TMP19]], ptr addrspace(1) align 4 [[TMP24]], <8 x i1> [[TMP10]]) @@ -372,9 +372,9 @@ define void @foo1_addrspace1(ptr addrspace(1) nocapture %A, ptr addrspace(1) noc ; AVX512: [[VECTOR_BODY]]: ; AVX512-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TRIGGER]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i32 16 -; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i32 32 -; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i32 48 +; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i64 16 +; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i64 32 +; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i64 48 ; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i32>, ptr addrspace(1) [[TMP3]], align 4 ; AVX512-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i32>, ptr addrspace(1) [[TMP5]], align 4 ; AVX512-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i32>, ptr addrspace(1) [[TMP6]], align 4 @@ -384,9 +384,9 @@ define void @foo1_addrspace1(ptr addrspace(1) nocapture %A, ptr addrspace(1) noc ; AVX512-NEXT: [[TMP10:%.*]] = icmp slt <16 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX512-NEXT: [[TMP11:%.*]] = icmp slt <16 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX512-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr addrspace(1) [[B]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i32 16 -; AVX512-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i32 32 -; AVX512-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i32 48 +; AVX512-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i64 16 +; AVX512-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i64 32 +; AVX512-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i64 48 ; AVX512-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i32> @llvm.masked.load.v16i32.p1(ptr addrspace(1) align 4 [[TMP12]], <16 x i1> [[TMP8]], <16 x i32> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD8:%.*]] = call <16 x i32> @llvm.masked.load.v16i32.p1(ptr addrspace(1) align 4 [[TMP14]], <16 x i1> [[TMP9]], <16 x i32> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <16 x i32> @llvm.masked.load.v16i32.p1(ptr addrspace(1) align 4 [[TMP15]], <16 x i1> [[TMP10]], <16 x i32> poison) @@ -396,9 +396,9 @@ define void @foo1_addrspace1(ptr addrspace(1) nocapture %A, ptr addrspace(1) noc ; AVX512-NEXT: [[TMP19:%.*]] = add nsw <16 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_LOAD6]] ; AVX512-NEXT: [[TMP20:%.*]] = add nsw <16 x i32> [[WIDE_MASKED_LOAD10]], [[WIDE_LOAD7]] ; AVX512-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr addrspace(1) [[A]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i32 16 -; AVX512-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i32 32 -; AVX512-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i32 48 +; AVX512-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i64 16 +; AVX512-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i64 32 +; AVX512-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i64 48 ; AVX512-NEXT: call void @llvm.masked.store.v16i32.p1(<16 x i32> [[TMP17]], ptr addrspace(1) align 4 [[TMP21]], <16 x i1> [[TMP8]]) ; AVX512-NEXT: call void @llvm.masked.store.v16i32.p1(<16 x i32> [[TMP18]], ptr addrspace(1) align 4 [[TMP23]], <16 x i1> [[TMP9]]) ; AVX512-NEXT: call void @llvm.masked.store.v16i32.p1(<16 x i32> [[TMP19]], ptr addrspace(1) align 4 [[TMP24]], <16 x i1> [[TMP10]]) @@ -524,9 +524,9 @@ define void @foo2(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX2: [[VECTOR_BODY]]: ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 8 -; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 16 -; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 24 +; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 8 +; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 16 +; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 24 ; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4 ; AVX2-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, ptr [[TMP5]], align 4 ; AVX2-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP6]], align 4 @@ -536,9 +536,9 @@ define void @foo2(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX2-NEXT: [[TMP10:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX2-NEXT: [[TMP11:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX2-NEXT: [[TMP12:%.*]] = getelementptr float, ptr [[B]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[TMP12]], i32 8 -; AVX2-NEXT: [[TMP15:%.*]] = getelementptr float, ptr [[TMP12]], i32 16 -; AVX2-NEXT: [[TMP16:%.*]] = getelementptr float, ptr [[TMP12]], i32 24 +; AVX2-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[TMP12]], i64 8 +; AVX2-NEXT: [[TMP15:%.*]] = getelementptr float, ptr [[TMP12]], i64 16 +; AVX2-NEXT: [[TMP16:%.*]] = getelementptr float, ptr [[TMP12]], i64 24 ; AVX2-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr align 4 [[TMP12]], <8 x i1> [[TMP8]], <8 x float> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD8:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr align 4 [[TMP14]], <8 x i1> [[TMP9]], <8 x float> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr align 4 [[TMP15]], <8 x i1> [[TMP10]], <8 x float> poison) @@ -552,9 +552,9 @@ define void @foo2(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX2-NEXT: [[TMP23:%.*]] = fadd <8 x float> [[WIDE_MASKED_LOAD9]], [[TMP19]] ; AVX2-NEXT: [[TMP24:%.*]] = fadd <8 x float> [[WIDE_MASKED_LOAD10]], [[TMP20]] ; AVX2-NEXT: [[TMP25:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP27:%.*]] = getelementptr float, ptr [[TMP25]], i32 8 -; AVX2-NEXT: [[TMP28:%.*]] = getelementptr float, ptr [[TMP25]], i32 16 -; AVX2-NEXT: [[TMP29:%.*]] = getelementptr float, ptr [[TMP25]], i32 24 +; AVX2-NEXT: [[TMP27:%.*]] = getelementptr float, ptr [[TMP25]], i64 8 +; AVX2-NEXT: [[TMP28:%.*]] = getelementptr float, ptr [[TMP25]], i64 16 +; AVX2-NEXT: [[TMP29:%.*]] = getelementptr float, ptr [[TMP25]], i64 24 ; AVX2-NEXT: call void @llvm.masked.store.v8f32.p0(<8 x float> [[TMP21]], ptr align 4 [[TMP25]], <8 x i1> [[TMP8]]) ; AVX2-NEXT: call void @llvm.masked.store.v8f32.p0(<8 x float> [[TMP22]], ptr align 4 [[TMP27]], <8 x i1> [[TMP9]]) ; AVX2-NEXT: call void @llvm.masked.store.v8f32.p0(<8 x float> [[TMP23]], ptr align 4 [[TMP28]], <8 x i1> [[TMP10]]) @@ -608,9 +608,9 @@ define void @foo2(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX512: [[VECTOR_BODY]]: ; AVX512-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 16 -; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 32 -; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 48 +; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 16 +; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 32 +; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 48 ; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i32>, ptr [[TMP3]], align 4 ; AVX512-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i32>, ptr [[TMP5]], align 4 ; AVX512-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i32>, ptr [[TMP6]], align 4 @@ -620,9 +620,9 @@ define void @foo2(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX512-NEXT: [[TMP10:%.*]] = icmp slt <16 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX512-NEXT: [[TMP11:%.*]] = icmp slt <16 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX512-NEXT: [[TMP12:%.*]] = getelementptr float, ptr [[B]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[TMP12]], i32 16 -; AVX512-NEXT: [[TMP15:%.*]] = getelementptr float, ptr [[TMP12]], i32 32 -; AVX512-NEXT: [[TMP16:%.*]] = getelementptr float, ptr [[TMP12]], i32 48 +; AVX512-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[TMP12]], i64 16 +; AVX512-NEXT: [[TMP15:%.*]] = getelementptr float, ptr [[TMP12]], i64 32 +; AVX512-NEXT: [[TMP16:%.*]] = getelementptr float, ptr [[TMP12]], i64 48 ; AVX512-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x float> @llvm.masked.load.v16f32.p0(ptr align 4 [[TMP12]], <16 x i1> [[TMP8]], <16 x float> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD8:%.*]] = call <16 x float> @llvm.masked.load.v16f32.p0(ptr align 4 [[TMP14]], <16 x i1> [[TMP9]], <16 x float> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <16 x float> @llvm.masked.load.v16f32.p0(ptr align 4 [[TMP15]], <16 x i1> [[TMP10]], <16 x float> poison) @@ -636,9 +636,9 @@ define void @foo2(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX512-NEXT: [[TMP23:%.*]] = fadd <16 x float> [[WIDE_MASKED_LOAD9]], [[TMP19]] ; AVX512-NEXT: [[TMP24:%.*]] = fadd <16 x float> [[WIDE_MASKED_LOAD10]], [[TMP20]] ; AVX512-NEXT: [[TMP25:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP27:%.*]] = getelementptr float, ptr [[TMP25]], i32 16 -; AVX512-NEXT: [[TMP28:%.*]] = getelementptr float, ptr [[TMP25]], i32 32 -; AVX512-NEXT: [[TMP29:%.*]] = getelementptr float, ptr [[TMP25]], i32 48 +; AVX512-NEXT: [[TMP27:%.*]] = getelementptr float, ptr [[TMP25]], i64 16 +; AVX512-NEXT: [[TMP28:%.*]] = getelementptr float, ptr [[TMP25]], i64 32 +; AVX512-NEXT: [[TMP29:%.*]] = getelementptr float, ptr [[TMP25]], i64 48 ; AVX512-NEXT: call void @llvm.masked.store.v16f32.p0(<16 x float> [[TMP21]], ptr align 4 [[TMP25]], <16 x i1> [[TMP8]]) ; AVX512-NEXT: call void @llvm.masked.store.v16f32.p0(<16 x float> [[TMP22]], ptr align 4 [[TMP27]], <16 x i1> [[TMP9]]) ; AVX512-NEXT: call void @llvm.masked.store.v16f32.p0(<16 x float> [[TMP23]], ptr align 4 [[TMP28]], <16 x i1> [[TMP10]]) @@ -732,25 +732,25 @@ define void @foo3(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX1: [[VECTOR_BODY]]: ; AVX1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX1-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 4 -; AVX1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 8 -; AVX1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 12 +; AVX1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 4 +; AVX1-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 8 +; AVX1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 12 ; AVX1-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4, !alias.scope [[META8:![0-9]+]] -; AVX1-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4, !alias.scope [[META8]] -; AVX1-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4, !alias.scope [[META8]] -; AVX1-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4, !alias.scope [[META8]] +; AVX1-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4, !alias.scope [[META8]] +; AVX1-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4, !alias.scope [[META8]] +; AVX1-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4, !alias.scope [[META8]] ; AVX1-NEXT: [[TMP6:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD]], splat (i32 100) ; AVX1-NEXT: [[TMP7:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX1-NEXT: [[TMP8:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX1-NEXT: [[TMP9:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD8]], splat (i32 100) ; AVX1-NEXT: [[TMP10:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP12:%.*]] = getelementptr double, ptr [[TMP10]], i32 4 -; AVX1-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[TMP10]], i32 8 -; AVX1-NEXT: [[TMP14:%.*]] = getelementptr double, ptr [[TMP10]], i32 12 +; AVX1-NEXT: [[TMP12:%.*]] = getelementptr double, ptr [[TMP10]], i64 4 +; AVX1-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[TMP10]], i64 8 +; AVX1-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP10]], i64 12 ; AVX1-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP10]], <4 x i1> [[TMP6]], <4 x double> poison), !alias.scope [[META11:![0-9]+]] ; AVX1-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP12]], <4 x i1> [[TMP7]], <4 x double> poison), !alias.scope [[META11]] ; AVX1-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP13]], <4 x i1> [[TMP8]], <4 x double> poison), !alias.scope [[META11]] -; AVX1-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP14]], <4 x i1> [[TMP9]], <4 x double> poison), !alias.scope [[META11]] +; AVX1-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP11]], <4 x i1> [[TMP9]], <4 x double> poison), !alias.scope [[META11]] ; AVX1-NEXT: [[TMP15:%.*]] = sitofp <4 x i32> [[WIDE_LOAD]] to <4 x double> ; AVX1-NEXT: [[TMP16:%.*]] = sitofp <4 x i32> [[WIDE_LOAD6]] to <4 x double> ; AVX1-NEXT: [[TMP17:%.*]] = sitofp <4 x i32> [[WIDE_LOAD7]] to <4 x double> @@ -760,13 +760,13 @@ define void @foo3(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX1-NEXT: [[TMP21:%.*]] = fadd <4 x double> [[WIDE_MASKED_LOAD10]], [[TMP17]] ; AVX1-NEXT: [[TMP22:%.*]] = fadd <4 x double> [[WIDE_MASKED_LOAD11]], [[TMP18]] ; AVX1-NEXT: [[TMP23:%.*]] = getelementptr double, ptr [[A]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i32 4 -; AVX1-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i32 8 -; AVX1-NEXT: [[TMP27:%.*]] = getelementptr double, ptr [[TMP23]], i32 12 +; AVX1-NEXT: [[TMP24:%.*]] = getelementptr double, ptr [[TMP23]], i64 4 +; AVX1-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i64 8 +; AVX1-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i64 12 ; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP19]], ptr align 8 [[TMP23]], <4 x i1> [[TMP6]]), !alias.scope [[META13:![0-9]+]], !noalias [[META15:![0-9]+]] -; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP20]], ptr align 8 [[TMP25]], <4 x i1> [[TMP7]]), !alias.scope [[META13]], !noalias [[META15]] -; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP21]], ptr align 8 [[TMP26]], <4 x i1> [[TMP8]]), !alias.scope [[META13]], !noalias [[META15]] -; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP22]], ptr align 8 [[TMP27]], <4 x i1> [[TMP9]]), !alias.scope [[META13]], !noalias [[META15]] +; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP20]], ptr align 8 [[TMP24]], <4 x i1> [[TMP7]]), !alias.scope [[META13]], !noalias [[META15]] +; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP21]], ptr align 8 [[TMP25]], <4 x i1> [[TMP8]]), !alias.scope [[META13]], !noalias [[META15]] +; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP22]], ptr align 8 [[TMP26]], <4 x i1> [[TMP9]]), !alias.scope [[META13]], !noalias [[META15]] ; AVX1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; AVX1-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000 ; AVX1-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] @@ -795,25 +795,25 @@ define void @foo3(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX2: [[VECTOR_BODY]]: ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 4 -; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 8 -; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 12 +; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 4 +; AVX2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 8 +; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 12 ; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4, !alias.scope [[META12:![0-9]+]] -; AVX2-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4, !alias.scope [[META12]] -; AVX2-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4, !alias.scope [[META12]] -; AVX2-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4, !alias.scope [[META12]] +; AVX2-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4, !alias.scope [[META12]] +; AVX2-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4, !alias.scope [[META12]] +; AVX2-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4, !alias.scope [[META12]] ; AVX2-NEXT: [[TMP6:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD]], splat (i32 100) ; AVX2-NEXT: [[TMP7:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX2-NEXT: [[TMP8:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX2-NEXT: [[TMP9:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD8]], splat (i32 100) ; AVX2-NEXT: [[TMP10:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP12:%.*]] = getelementptr double, ptr [[TMP10]], i32 4 -; AVX2-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[TMP10]], i32 8 -; AVX2-NEXT: [[TMP14:%.*]] = getelementptr double, ptr [[TMP10]], i32 12 +; AVX2-NEXT: [[TMP12:%.*]] = getelementptr double, ptr [[TMP10]], i64 4 +; AVX2-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[TMP10]], i64 8 +; AVX2-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP10]], i64 12 ; AVX2-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP10]], <4 x i1> [[TMP6]], <4 x double> poison), !alias.scope [[META15:![0-9]+]] ; AVX2-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP12]], <4 x i1> [[TMP7]], <4 x double> poison), !alias.scope [[META15]] ; AVX2-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP13]], <4 x i1> [[TMP8]], <4 x double> poison), !alias.scope [[META15]] -; AVX2-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP14]], <4 x i1> [[TMP9]], <4 x double> poison), !alias.scope [[META15]] +; AVX2-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP11]], <4 x i1> [[TMP9]], <4 x double> poison), !alias.scope [[META15]] ; AVX2-NEXT: [[TMP15:%.*]] = sitofp <4 x i32> [[WIDE_LOAD]] to <4 x double> ; AVX2-NEXT: [[TMP16:%.*]] = sitofp <4 x i32> [[WIDE_LOAD6]] to <4 x double> ; AVX2-NEXT: [[TMP17:%.*]] = sitofp <4 x i32> [[WIDE_LOAD7]] to <4 x double> @@ -823,13 +823,13 @@ define void @foo3(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX2-NEXT: [[TMP21:%.*]] = fadd <4 x double> [[WIDE_MASKED_LOAD10]], [[TMP17]] ; AVX2-NEXT: [[TMP22:%.*]] = fadd <4 x double> [[WIDE_MASKED_LOAD11]], [[TMP18]] ; AVX2-NEXT: [[TMP23:%.*]] = getelementptr double, ptr [[A]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i32 4 -; AVX2-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i32 8 -; AVX2-NEXT: [[TMP27:%.*]] = getelementptr double, ptr [[TMP23]], i32 12 +; AVX2-NEXT: [[TMP24:%.*]] = getelementptr double, ptr [[TMP23]], i64 4 +; AVX2-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i64 8 +; AVX2-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i64 12 ; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP19]], ptr align 8 [[TMP23]], <4 x i1> [[TMP6]]), !alias.scope [[META17:![0-9]+]], !noalias [[META19:![0-9]+]] -; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP20]], ptr align 8 [[TMP25]], <4 x i1> [[TMP7]]), !alias.scope [[META17]], !noalias [[META19]] -; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP21]], ptr align 8 [[TMP26]], <4 x i1> [[TMP8]]), !alias.scope [[META17]], !noalias [[META19]] -; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP22]], ptr align 8 [[TMP27]], <4 x i1> [[TMP9]]), !alias.scope [[META17]], !noalias [[META19]] +; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP20]], ptr align 8 [[TMP24]], <4 x i1> [[TMP7]]), !alias.scope [[META17]], !noalias [[META19]] +; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP21]], ptr align 8 [[TMP25]], <4 x i1> [[TMP8]]), !alias.scope [[META17]], !noalias [[META19]] +; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP22]], ptr align 8 [[TMP26]], <4 x i1> [[TMP9]]), !alias.scope [[META17]], !noalias [[META19]] ; AVX2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; AVX2-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000 ; AVX2-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] @@ -860,25 +860,25 @@ define void @foo3(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX512: [[VECTOR_BODY]]: ; AVX512-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX512-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 8 -; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 16 -; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 24 +; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 8 +; AVX512-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 16 +; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 24 ; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4, !alias.scope [[META12:![0-9]+]] -; AVX512-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4, !alias.scope [[META12]] -; AVX512-NEXT: [[WIDE_LOAD7:%.*]] = load <8 x i32>, ptr [[TMP4]], align 4, !alias.scope [[META12]] -; AVX512-NEXT: [[WIDE_LOAD8:%.*]] = load <8 x i32>, ptr [[TMP5]], align 4, !alias.scope [[META12]] +; AVX512-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP4]], align 4, !alias.scope [[META12]] +; AVX512-NEXT: [[WIDE_LOAD7:%.*]] = load <8 x i32>, ptr [[TMP2]], align 4, !alias.scope [[META12]] +; AVX512-NEXT: [[WIDE_LOAD8:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4, !alias.scope [[META12]] ; AVX512-NEXT: [[TMP6:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD]], splat (i32 100) ; AVX512-NEXT: [[TMP7:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX512-NEXT: [[TMP8:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX512-NEXT: [[TMP9:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD8]], splat (i32 100) ; AVX512-NEXT: [[TMP10:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP12:%.*]] = getelementptr double, ptr [[TMP10]], i32 8 -; AVX512-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[TMP10]], i32 16 -; AVX512-NEXT: [[TMP14:%.*]] = getelementptr double, ptr [[TMP10]], i32 24 +; AVX512-NEXT: [[TMP12:%.*]] = getelementptr double, ptr [[TMP10]], i64 8 +; AVX512-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[TMP10]], i64 16 +; AVX512-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP10]], i64 24 ; AVX512-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP10]], <8 x i1> [[TMP6]], <8 x double> poison), !alias.scope [[META15:![0-9]+]] ; AVX512-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP12]], <8 x i1> [[TMP7]], <8 x double> poison), !alias.scope [[META15]] ; AVX512-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP13]], <8 x i1> [[TMP8]], <8 x double> poison), !alias.scope [[META15]] -; AVX512-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP14]], <8 x i1> [[TMP9]], <8 x double> poison), !alias.scope [[META15]] +; AVX512-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP11]], <8 x i1> [[TMP9]], <8 x double> poison), !alias.scope [[META15]] ; AVX512-NEXT: [[TMP15:%.*]] = sitofp <8 x i32> [[WIDE_LOAD]] to <8 x double> ; AVX512-NEXT: [[TMP16:%.*]] = sitofp <8 x i32> [[WIDE_LOAD6]] to <8 x double> ; AVX512-NEXT: [[TMP17:%.*]] = sitofp <8 x i32> [[WIDE_LOAD7]] to <8 x double> @@ -888,13 +888,13 @@ define void @foo3(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX512-NEXT: [[TMP21:%.*]] = fadd <8 x double> [[WIDE_MASKED_LOAD10]], [[TMP17]] ; AVX512-NEXT: [[TMP22:%.*]] = fadd <8 x double> [[WIDE_MASKED_LOAD11]], [[TMP18]] ; AVX512-NEXT: [[TMP23:%.*]] = getelementptr double, ptr [[A]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i32 8 -; AVX512-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i32 16 -; AVX512-NEXT: [[TMP27:%.*]] = getelementptr double, ptr [[TMP23]], i32 24 +; AVX512-NEXT: [[TMP24:%.*]] = getelementptr double, ptr [[TMP23]], i64 8 +; AVX512-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i64 16 +; AVX512-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i64 24 ; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[TMP19]], ptr align 8 [[TMP23]], <8 x i1> [[TMP6]]), !alias.scope [[META17:![0-9]+]], !noalias [[META19:![0-9]+]] -; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[TMP20]], ptr align 8 [[TMP25]], <8 x i1> [[TMP7]]), !alias.scope [[META17]], !noalias [[META19]] -; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[TMP21]], ptr align 8 [[TMP26]], <8 x i1> [[TMP8]]), !alias.scope [[META17]], !noalias [[META19]] -; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[TMP22]], ptr align 8 [[TMP27]], <8 x i1> [[TMP9]]), !alias.scope [[META17]], !noalias [[META19]] +; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[TMP20]], ptr align 8 [[TMP24]], <8 x i1> [[TMP7]]), !alias.scope [[META17]], !noalias [[META19]] +; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[TMP21]], ptr align 8 [[TMP25]], <8 x i1> [[TMP8]]), !alias.scope [[META17]], !noalias [[META19]] +; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[TMP22]], ptr align 8 [[TMP26]], <8 x i1> [[TMP9]]), !alias.scope [[META17]], !noalias [[META19]] ; AVX512-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; AVX512-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], 9984 ; AVX512-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] @@ -1117,68 +1117,68 @@ define void @foo6(ptr nocapture readonly %in, ptr nocapture %out, i32 %size, ptr ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX2-NEXT: [[OFFSET_IDX:%.*]] = sub i64 4095, [[INDEX]] ; AVX2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[OFFSET_IDX]] -; AVX2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0 -; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 -3 -; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 -4 -; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 -3 -; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 -8 -; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 -3 -; AVX2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 -12 -; AVX2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 -3 -; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4, !alias.scope [[META22:![0-9]+]] +; AVX2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 0 +; AVX2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i64 -3 +; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 -4 +; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 -3 +; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 -8 +; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP5]], i64 -3 +; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 -12 +; AVX2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i64 -3 +; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4, !alias.scope [[META22:![0-9]+]] ; AVX2-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD]], <4 x i32> poison, <4 x i32> -; AVX2-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4, !alias.scope [[META22]] +; AVX2-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4, !alias.scope [[META22]] ; AVX2-NEXT: [[REVERSE7:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD6]], <4 x i32> poison, <4 x i32> -; AVX2-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4, !alias.scope [[META22]] +; AVX2-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP6]], align 4, !alias.scope [[META22]] ; AVX2-NEXT: [[REVERSE9:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD8]], <4 x i32> poison, <4 x i32> -; AVX2-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x i32>, ptr [[TMP9]], align 4, !alias.scope [[META22]] +; AVX2-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x i32>, ptr [[TMP8]], align 4, !alias.scope [[META22]] ; AVX2-NEXT: [[REVERSE11:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD10]], <4 x i32> poison, <4 x i32> ; AVX2-NEXT: [[TMP10:%.*]] = icmp sgt <4 x i32> [[REVERSE]], zeroinitializer ; AVX2-NEXT: [[TMP11:%.*]] = icmp sgt <4 x i32> [[REVERSE7]], zeroinitializer ; AVX2-NEXT: [[TMP12:%.*]] = icmp sgt <4 x i32> [[REVERSE9]], zeroinitializer ; AVX2-NEXT: [[TMP13:%.*]] = icmp sgt <4 x i32> [[REVERSE11]], zeroinitializer ; AVX2-NEXT: [[TMP14:%.*]] = getelementptr double, ptr [[IN]], i64 [[OFFSET_IDX]] -; AVX2-NEXT: [[TMP15:%.*]] = getelementptr double, ptr [[TMP14]], i32 0 -; AVX2-NEXT: [[TMP16:%.*]] = getelementptr double, ptr [[TMP15]], i32 -3 -; AVX2-NEXT: [[TMP17:%.*]] = getelementptr double, ptr [[TMP14]], i32 -4 -; AVX2-NEXT: [[TMP18:%.*]] = getelementptr double, ptr [[TMP17]], i32 -3 -; AVX2-NEXT: [[TMP19:%.*]] = getelementptr double, ptr [[TMP14]], i32 -8 -; AVX2-NEXT: [[TMP20:%.*]] = getelementptr double, ptr [[TMP19]], i32 -3 -; AVX2-NEXT: [[TMP21:%.*]] = getelementptr double, ptr [[TMP14]], i32 -12 -; AVX2-NEXT: [[TMP22:%.*]] = getelementptr double, ptr [[TMP21]], i32 -3 +; AVX2-NEXT: [[TMP22:%.*]] = getelementptr double, ptr [[TMP14]], i64 0 +; AVX2-NEXT: [[TMP15:%.*]] = getelementptr double, ptr [[TMP22]], i64 -3 +; AVX2-NEXT: [[TMP16:%.*]] = getelementptr double, ptr [[TMP14]], i64 -4 +; AVX2-NEXT: [[TMP17:%.*]] = getelementptr double, ptr [[TMP16]], i64 -3 +; AVX2-NEXT: [[TMP18:%.*]] = getelementptr double, ptr [[TMP14]], i64 -8 +; AVX2-NEXT: [[TMP19:%.*]] = getelementptr double, ptr [[TMP18]], i64 -3 +; AVX2-NEXT: [[TMP20:%.*]] = getelementptr double, ptr [[TMP14]], i64 -12 +; AVX2-NEXT: [[TMP21:%.*]] = getelementptr double, ptr [[TMP20]], i64 -3 ; AVX2-NEXT: [[REVERSE12:%.*]] = shufflevector <4 x i1> [[TMP10]], <4 x i1> poison, <4 x i32> -; AVX2-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP16]], <4 x i1> [[REVERSE12]], <4 x double> poison), !alias.scope [[META25:![0-9]+]] +; AVX2-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP15]], <4 x i1> [[REVERSE12]], <4 x double> poison), !alias.scope [[META25:![0-9]+]] ; AVX2-NEXT: [[REVERSE13:%.*]] = shufflevector <4 x double> [[WIDE_MASKED_LOAD]], <4 x double> poison, <4 x i32> ; AVX2-NEXT: [[REVERSE14:%.*]] = shufflevector <4 x i1> [[TMP11]], <4 x i1> poison, <4 x i32> -; AVX2-NEXT: [[WIDE_MASKED_LOAD15:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP18]], <4 x i1> [[REVERSE14]], <4 x double> poison), !alias.scope [[META25]] +; AVX2-NEXT: [[WIDE_MASKED_LOAD15:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP17]], <4 x i1> [[REVERSE14]], <4 x double> poison), !alias.scope [[META25]] ; AVX2-NEXT: [[REVERSE16:%.*]] = shufflevector <4 x double> [[WIDE_MASKED_LOAD15]], <4 x double> poison, <4 x i32> ; AVX2-NEXT: [[REVERSE17:%.*]] = shufflevector <4 x i1> [[TMP12]], <4 x i1> poison, <4 x i32> -; AVX2-NEXT: [[WIDE_MASKED_LOAD18:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP20]], <4 x i1> [[REVERSE17]], <4 x double> poison), !alias.scope [[META25]] +; AVX2-NEXT: [[WIDE_MASKED_LOAD18:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP19]], <4 x i1> [[REVERSE17]], <4 x double> poison), !alias.scope [[META25]] ; AVX2-NEXT: [[REVERSE19:%.*]] = shufflevector <4 x double> [[WIDE_MASKED_LOAD18]], <4 x double> poison, <4 x i32> ; AVX2-NEXT: [[REVERSE20:%.*]] = shufflevector <4 x i1> [[TMP13]], <4 x i1> poison, <4 x i32> -; AVX2-NEXT: [[WIDE_MASKED_LOAD21:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP22]], <4 x i1> [[REVERSE20]], <4 x double> poison), !alias.scope [[META25]] +; AVX2-NEXT: [[WIDE_MASKED_LOAD21:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP21]], <4 x i1> [[REVERSE20]], <4 x double> poison), !alias.scope [[META25]] ; AVX2-NEXT: [[REVERSE22:%.*]] = shufflevector <4 x double> [[WIDE_MASKED_LOAD21]], <4 x double> poison, <4 x i32> ; AVX2-NEXT: [[TMP23:%.*]] = fadd <4 x double> [[REVERSE13]], splat (double 5.000000e-01) ; AVX2-NEXT: [[TMP24:%.*]] = fadd <4 x double> [[REVERSE16]], splat (double 5.000000e-01) ; AVX2-NEXT: [[TMP25:%.*]] = fadd <4 x double> [[REVERSE19]], splat (double 5.000000e-01) ; AVX2-NEXT: [[TMP26:%.*]] = fadd <4 x double> [[REVERSE22]], splat (double 5.000000e-01) ; AVX2-NEXT: [[TMP27:%.*]] = getelementptr double, ptr [[OUT]], i64 [[OFFSET_IDX]] -; AVX2-NEXT: [[TMP28:%.*]] = getelementptr double, ptr [[TMP27]], i32 0 -; AVX2-NEXT: [[TMP29:%.*]] = getelementptr double, ptr [[TMP28]], i32 -3 -; AVX2-NEXT: [[TMP30:%.*]] = getelementptr double, ptr [[TMP27]], i32 -4 -; AVX2-NEXT: [[TMP31:%.*]] = getelementptr double, ptr [[TMP30]], i32 -3 -; AVX2-NEXT: [[TMP32:%.*]] = getelementptr double, ptr [[TMP27]], i32 -8 -; AVX2-NEXT: [[TMP33:%.*]] = getelementptr double, ptr [[TMP32]], i32 -3 -; AVX2-NEXT: [[TMP34:%.*]] = getelementptr double, ptr [[TMP27]], i32 -12 -; AVX2-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[TMP34]], i32 -3 +; AVX2-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[TMP27]], i64 0 +; AVX2-NEXT: [[TMP28:%.*]] = getelementptr double, ptr [[TMP35]], i64 -3 +; AVX2-NEXT: [[TMP29:%.*]] = getelementptr double, ptr [[TMP27]], i64 -4 +; AVX2-NEXT: [[TMP30:%.*]] = getelementptr double, ptr [[TMP29]], i64 -3 +; AVX2-NEXT: [[TMP31:%.*]] = getelementptr double, ptr [[TMP27]], i64 -8 +; AVX2-NEXT: [[TMP32:%.*]] = getelementptr double, ptr [[TMP31]], i64 -3 +; AVX2-NEXT: [[TMP33:%.*]] = getelementptr double, ptr [[TMP27]], i64 -12 +; AVX2-NEXT: [[TMP34:%.*]] = getelementptr double, ptr [[TMP33]], i64 -3 ; AVX2-NEXT: [[REVERSE24:%.*]] = shufflevector <4 x double> [[TMP23]], <4 x double> poison, <4 x i32> -; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[REVERSE24]], ptr align 8 [[TMP29]], <4 x i1> [[REVERSE12]]), !alias.scope [[META27:![0-9]+]], !noalias [[META29:![0-9]+]] +; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[REVERSE24]], ptr align 8 [[TMP28]], <4 x i1> [[REVERSE12]]), !alias.scope [[META27:![0-9]+]], !noalias [[META29:![0-9]+]] ; AVX2-NEXT: [[REVERSE26:%.*]] = shufflevector <4 x double> [[TMP24]], <4 x double> poison, <4 x i32> -; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[REVERSE26]], ptr align 8 [[TMP31]], <4 x i1> [[REVERSE14]]), !alias.scope [[META27]], !noalias [[META29]] +; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[REVERSE26]], ptr align 8 [[TMP30]], <4 x i1> [[REVERSE14]]), !alias.scope [[META27]], !noalias [[META29]] ; AVX2-NEXT: [[REVERSE28:%.*]] = shufflevector <4 x double> [[TMP25]], <4 x double> poison, <4 x i32> -; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[REVERSE28]], ptr align 8 [[TMP33]], <4 x i1> [[REVERSE17]]), !alias.scope [[META27]], !noalias [[META29]] +; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[REVERSE28]], ptr align 8 [[TMP32]], <4 x i1> [[REVERSE17]]), !alias.scope [[META27]], !noalias [[META29]] ; AVX2-NEXT: [[REVERSE30:%.*]] = shufflevector <4 x double> [[TMP26]], <4 x double> poison, <4 x i32> -; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[REVERSE30]], ptr align 8 [[TMP35]], <4 x i1> [[REVERSE20]]), !alias.scope [[META27]], !noalias [[META29]] +; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[REVERSE30]], ptr align 8 [[TMP34]], <4 x i1> [[REVERSE20]]), !alias.scope [[META27]], !noalias [[META29]] ; AVX2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; AVX2-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 ; AVX2-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] @@ -1208,68 +1208,68 @@ define void @foo6(ptr nocapture readonly %in, ptr nocapture %out, i32 %size, ptr ; AVX512-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX512-NEXT: [[OFFSET_IDX:%.*]] = sub i64 4095, [[INDEX]] ; AVX512-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[OFFSET_IDX]] -; AVX512-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0 -; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 -7 -; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 -8 -; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 -7 -; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 -16 -; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 -7 -; AVX512-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 -24 -; AVX512-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 -7 -; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4, !alias.scope [[META34:![0-9]+]] +; AVX512-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 0 +; AVX512-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i64 -7 +; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 -8 +; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 -7 +; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 -16 +; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP5]], i64 -7 +; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 -24 +; AVX512-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i64 -7 +; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP2]], align 4, !alias.scope [[META34:![0-9]+]] ; AVX512-NEXT: [[REVERSE:%.*]] = shufflevector <8 x i32> [[WIDE_LOAD]], <8 x i32> poison, <8 x i32> -; AVX512-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP5]], align 4, !alias.scope [[META34]] +; AVX512-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP4]], align 4, !alias.scope [[META34]] ; AVX512-NEXT: [[REVERSE7:%.*]] = shufflevector <8 x i32> [[WIDE_LOAD6]], <8 x i32> poison, <8 x i32> -; AVX512-NEXT: [[WIDE_LOAD8:%.*]] = load <8 x i32>, ptr [[TMP7]], align 4, !alias.scope [[META34]] +; AVX512-NEXT: [[WIDE_LOAD8:%.*]] = load <8 x i32>, ptr [[TMP6]], align 4, !alias.scope [[META34]] ; AVX512-NEXT: [[REVERSE9:%.*]] = shufflevector <8 x i32> [[WIDE_LOAD8]], <8 x i32> poison, <8 x i32> -; AVX512-NEXT: [[WIDE_LOAD10:%.*]] = load <8 x i32>, ptr [[TMP9]], align 4, !alias.scope [[META34]] +; AVX512-NEXT: [[WIDE_LOAD10:%.*]] = load <8 x i32>, ptr [[TMP8]], align 4, !alias.scope [[META34]] ; AVX512-NEXT: [[REVERSE11:%.*]] = shufflevector <8 x i32> [[WIDE_LOAD10]], <8 x i32> poison, <8 x i32> ; AVX512-NEXT: [[TMP10:%.*]] = icmp sgt <8 x i32> [[REVERSE]], zeroinitializer ; AVX512-NEXT: [[TMP11:%.*]] = icmp sgt <8 x i32> [[REVERSE7]], zeroinitializer ; AVX512-NEXT: [[TMP12:%.*]] = icmp sgt <8 x i32> [[REVERSE9]], zeroinitializer ; AVX512-NEXT: [[TMP13:%.*]] = icmp sgt <8 x i32> [[REVERSE11]], zeroinitializer ; AVX512-NEXT: [[TMP14:%.*]] = getelementptr double, ptr [[IN]], i64 [[OFFSET_IDX]] -; AVX512-NEXT: [[TMP15:%.*]] = getelementptr double, ptr [[TMP14]], i32 0 -; AVX512-NEXT: [[TMP16:%.*]] = getelementptr double, ptr [[TMP15]], i32 -7 -; AVX512-NEXT: [[TMP17:%.*]] = getelementptr double, ptr [[TMP14]], i32 -8 -; AVX512-NEXT: [[TMP18:%.*]] = getelementptr double, ptr [[TMP17]], i32 -7 -; AVX512-NEXT: [[TMP19:%.*]] = getelementptr double, ptr [[TMP14]], i32 -16 -; AVX512-NEXT: [[TMP20:%.*]] = getelementptr double, ptr [[TMP19]], i32 -7 -; AVX512-NEXT: [[TMP21:%.*]] = getelementptr double, ptr [[TMP14]], i32 -24 -; AVX512-NEXT: [[TMP22:%.*]] = getelementptr double, ptr [[TMP21]], i32 -7 +; AVX512-NEXT: [[TMP22:%.*]] = getelementptr double, ptr [[TMP14]], i64 0 +; AVX512-NEXT: [[TMP15:%.*]] = getelementptr double, ptr [[TMP22]], i64 -7 +; AVX512-NEXT: [[TMP16:%.*]] = getelementptr double, ptr [[TMP14]], i64 -8 +; AVX512-NEXT: [[TMP17:%.*]] = getelementptr double, ptr [[TMP16]], i64 -7 +; AVX512-NEXT: [[TMP18:%.*]] = getelementptr double, ptr [[TMP14]], i64 -16 +; AVX512-NEXT: [[TMP19:%.*]] = getelementptr double, ptr [[TMP18]], i64 -7 +; AVX512-NEXT: [[TMP20:%.*]] = getelementptr double, ptr [[TMP14]], i64 -24 +; AVX512-NEXT: [[TMP21:%.*]] = getelementptr double, ptr [[TMP20]], i64 -7 ; AVX512-NEXT: [[REVERSE12:%.*]] = shufflevector <8 x i1> [[TMP10]], <8 x i1> poison, <8 x i32> -; AVX512-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP16]], <8 x i1> [[REVERSE12]], <8 x double> poison), !alias.scope [[META37:![0-9]+]] +; AVX512-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP15]], <8 x i1> [[REVERSE12]], <8 x double> poison), !alias.scope [[META37:![0-9]+]] ; AVX512-NEXT: [[REVERSE13:%.*]] = shufflevector <8 x double> [[WIDE_MASKED_LOAD]], <8 x double> poison, <8 x i32> ; AVX512-NEXT: [[REVERSE14:%.*]] = shufflevector <8 x i1> [[TMP11]], <8 x i1> poison, <8 x i32> -; AVX512-NEXT: [[WIDE_MASKED_LOAD15:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP18]], <8 x i1> [[REVERSE14]], <8 x double> poison), !alias.scope [[META37]] +; AVX512-NEXT: [[WIDE_MASKED_LOAD15:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP17]], <8 x i1> [[REVERSE14]], <8 x double> poison), !alias.scope [[META37]] ; AVX512-NEXT: [[REVERSE16:%.*]] = shufflevector <8 x double> [[WIDE_MASKED_LOAD15]], <8 x double> poison, <8 x i32> ; AVX512-NEXT: [[REVERSE17:%.*]] = shufflevector <8 x i1> [[TMP12]], <8 x i1> poison, <8 x i32> -; AVX512-NEXT: [[WIDE_MASKED_LOAD18:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP20]], <8 x i1> [[REVERSE17]], <8 x double> poison), !alias.scope [[META37]] +; AVX512-NEXT: [[WIDE_MASKED_LOAD18:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP19]], <8 x i1> [[REVERSE17]], <8 x double> poison), !alias.scope [[META37]] ; AVX512-NEXT: [[REVERSE19:%.*]] = shufflevector <8 x double> [[WIDE_MASKED_LOAD18]], <8 x double> poison, <8 x i32> ; AVX512-NEXT: [[REVERSE20:%.*]] = shufflevector <8 x i1> [[TMP13]], <8 x i1> poison, <8 x i32> -; AVX512-NEXT: [[WIDE_MASKED_LOAD21:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP22]], <8 x i1> [[REVERSE20]], <8 x double> poison), !alias.scope [[META37]] +; AVX512-NEXT: [[WIDE_MASKED_LOAD21:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP21]], <8 x i1> [[REVERSE20]], <8 x double> poison), !alias.scope [[META37]] ; AVX512-NEXT: [[REVERSE22:%.*]] = shufflevector <8 x double> [[WIDE_MASKED_LOAD21]], <8 x double> poison, <8 x i32> ; AVX512-NEXT: [[TMP23:%.*]] = fadd <8 x double> [[REVERSE13]], splat (double 5.000000e-01) ; AVX512-NEXT: [[TMP24:%.*]] = fadd <8 x double> [[REVERSE16]], splat (double 5.000000e-01) ; AVX512-NEXT: [[TMP25:%.*]] = fadd <8 x double> [[REVERSE19]], splat (double 5.000000e-01) ; AVX512-NEXT: [[TMP26:%.*]] = fadd <8 x double> [[REVERSE22]], splat (double 5.000000e-01) ; AVX512-NEXT: [[TMP27:%.*]] = getelementptr double, ptr [[OUT]], i64 [[OFFSET_IDX]] -; AVX512-NEXT: [[TMP28:%.*]] = getelementptr double, ptr [[TMP27]], i32 0 -; AVX512-NEXT: [[TMP29:%.*]] = getelementptr double, ptr [[TMP28]], i32 -7 -; AVX512-NEXT: [[TMP30:%.*]] = getelementptr double, ptr [[TMP27]], i32 -8 -; AVX512-NEXT: [[TMP31:%.*]] = getelementptr double, ptr [[TMP30]], i32 -7 -; AVX512-NEXT: [[TMP32:%.*]] = getelementptr double, ptr [[TMP27]], i32 -16 -; AVX512-NEXT: [[TMP33:%.*]] = getelementptr double, ptr [[TMP32]], i32 -7 -; AVX512-NEXT: [[TMP34:%.*]] = getelementptr double, ptr [[TMP27]], i32 -24 -; AVX512-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[TMP34]], i32 -7 +; AVX512-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[TMP27]], i64 0 +; AVX512-NEXT: [[TMP28:%.*]] = getelementptr double, ptr [[TMP35]], i64 -7 +; AVX512-NEXT: [[TMP29:%.*]] = getelementptr double, ptr [[TMP27]], i64 -8 +; AVX512-NEXT: [[TMP30:%.*]] = getelementptr double, ptr [[TMP29]], i64 -7 +; AVX512-NEXT: [[TMP31:%.*]] = getelementptr double, ptr [[TMP27]], i64 -16 +; AVX512-NEXT: [[TMP32:%.*]] = getelementptr double, ptr [[TMP31]], i64 -7 +; AVX512-NEXT: [[TMP33:%.*]] = getelementptr double, ptr [[TMP27]], i64 -24 +; AVX512-NEXT: [[TMP34:%.*]] = getelementptr double, ptr [[TMP33]], i64 -7 ; AVX512-NEXT: [[REVERSE24:%.*]] = shufflevector <8 x double> [[TMP23]], <8 x double> poison, <8 x i32> -; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[REVERSE24]], ptr align 8 [[TMP29]], <8 x i1> [[REVERSE12]]), !alias.scope [[META39:![0-9]+]], !noalias [[META41:![0-9]+]] +; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[REVERSE24]], ptr align 8 [[TMP28]], <8 x i1> [[REVERSE12]]), !alias.scope [[META39:![0-9]+]], !noalias [[META41:![0-9]+]] ; AVX512-NEXT: [[REVERSE26:%.*]] = shufflevector <8 x double> [[TMP24]], <8 x double> poison, <8 x i32> -; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[REVERSE26]], ptr align 8 [[TMP31]], <8 x i1> [[REVERSE14]]), !alias.scope [[META39]], !noalias [[META41]] +; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[REVERSE26]], ptr align 8 [[TMP30]], <8 x i1> [[REVERSE14]]), !alias.scope [[META39]], !noalias [[META41]] ; AVX512-NEXT: [[REVERSE28:%.*]] = shufflevector <8 x double> [[TMP25]], <8 x double> poison, <8 x i32> -; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[REVERSE28]], ptr align 8 [[TMP33]], <8 x i1> [[REVERSE17]]), !alias.scope [[META39]], !noalias [[META41]] +; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[REVERSE28]], ptr align 8 [[TMP32]], <8 x i1> [[REVERSE17]]), !alias.scope [[META39]], !noalias [[META41]] ; AVX512-NEXT: [[REVERSE30:%.*]] = shufflevector <8 x double> [[TMP26]], <8 x double> poison, <8 x i32> -; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[REVERSE30]], ptr align 8 [[TMP35]], <8 x i1> [[REVERSE20]]), !alias.scope [[META39]], !noalias [[META41]] +; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[REVERSE30]], ptr align 8 [[TMP34]], <8 x i1> [[REVERSE20]]), !alias.scope [[META39]], !noalias [[META41]] ; AVX512-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; AVX512-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 ; AVX512-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]] @@ -1332,9 +1332,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX1: [[VECTOR_BODY]]: ; AVX1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX1-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 4 -; AVX1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 8 -; AVX1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 12 +; AVX1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 4 +; AVX1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 8 +; AVX1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 12 ; AVX1-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1 ; AVX1-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1 ; AVX1-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1 @@ -1348,9 +1348,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX1-NEXT: [[TMP16:%.*]] = icmp ne <4 x i8> [[TMP8]], zeroinitializer ; AVX1-NEXT: [[TMP17:%.*]] = icmp ne <4 x i8> [[TMP9]], zeroinitializer ; AVX1-NEXT: [[TMP13:%.*]] = getelementptr ptr, ptr [[IN]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP13]], i32 4 -; AVX1-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP13]], i32 8 -; AVX1-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP13]], i32 12 +; AVX1-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP13]], i64 4 +; AVX1-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP13]], i64 8 +; AVX1-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP13]], i64 12 ; AVX1-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP13]], <4 x i1> [[TMP14]], <4 x ptr> poison) ; AVX1-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP20]], <4 x i1> [[TMP15]], <4 x ptr> poison) ; AVX1-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP21]], <4 x i1> [[TMP16]], <4 x ptr> poison) @@ -1364,9 +1364,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX1-NEXT: [[TMP33:%.*]] = select <4 x i1> [[TMP16]], <4 x i1> [[TMP29]], <4 x i1> zeroinitializer ; AVX1-NEXT: [[TMP34:%.*]] = select <4 x i1> [[TMP17]], <4 x i1> [[TMP30]], <4 x i1> zeroinitializer ; AVX1-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[OUT]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i32 4 -; AVX1-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i32 8 -; AVX1-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i32 12 +; AVX1-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i64 4 +; AVX1-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i64 8 +; AVX1-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i64 12 ; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP35]], <4 x i1> [[TMP31]]) ; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP37]], <4 x i1> [[TMP32]]) ; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP38]], <4 x i1> [[TMP33]]) @@ -1424,9 +1424,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX2: [[VECTOR_BODY]]: ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 4 -; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 8 -; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 12 +; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 4 +; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 8 +; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 12 ; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1 ; AVX2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1 ; AVX2-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1 @@ -1440,9 +1440,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX2-NEXT: [[TMP16:%.*]] = icmp ne <4 x i8> [[TMP8]], zeroinitializer ; AVX2-NEXT: [[TMP17:%.*]] = icmp ne <4 x i8> [[TMP9]], zeroinitializer ; AVX2-NEXT: [[TMP18:%.*]] = getelementptr ptr, ptr [[IN]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 4 -; AVX2-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 8 -; AVX2-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 12 +; AVX2-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 4 +; AVX2-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 8 +; AVX2-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 12 ; AVX2-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP18]], <4 x i1> [[TMP14]], <4 x ptr> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP20]], <4 x i1> [[TMP15]], <4 x ptr> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP21]], <4 x i1> [[TMP16]], <4 x ptr> poison) @@ -1456,9 +1456,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX2-NEXT: [[TMP33:%.*]] = select <4 x i1> [[TMP16]], <4 x i1> [[TMP29]], <4 x i1> zeroinitializer ; AVX2-NEXT: [[TMP34:%.*]] = select <4 x i1> [[TMP17]], <4 x i1> [[TMP30]], <4 x i1> zeroinitializer ; AVX2-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[OUT]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i32 4 -; AVX2-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i32 8 -; AVX2-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i32 12 +; AVX2-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i64 4 +; AVX2-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i64 8 +; AVX2-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i64 12 ; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP35]], <4 x i1> [[TMP31]]) ; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP37]], <4 x i1> [[TMP32]]) ; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP38]], <4 x i1> [[TMP33]]) @@ -1516,9 +1516,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX512: [[VECTOR_BODY]]: ; AVX512-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX512-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 8 -; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 -; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 24 +; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 8 +; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 16 +; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 24 ; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP1]], align 1 ; AVX512-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i8>, ptr [[TMP3]], align 1 ; AVX512-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP4]], align 1 @@ -1532,9 +1532,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX512-NEXT: [[TMP16:%.*]] = icmp ne <8 x i8> [[TMP8]], zeroinitializer ; AVX512-NEXT: [[TMP17:%.*]] = icmp ne <8 x i8> [[TMP9]], zeroinitializer ; AVX512-NEXT: [[TMP18:%.*]] = getelementptr ptr, ptr [[IN]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 8 -; AVX512-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 16 -; AVX512-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 24 +; AVX512-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 8 +; AVX512-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 16 +; AVX512-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 24 ; AVX512-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x ptr> @llvm.masked.load.v8p0.p0(ptr align 8 [[TMP18]], <8 x i1> [[TMP14]], <8 x ptr> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <8 x ptr> @llvm.masked.load.v8p0.p0(ptr align 8 [[TMP20]], <8 x i1> [[TMP15]], <8 x ptr> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <8 x ptr> @llvm.masked.load.v8p0.p0(ptr align 8 [[TMP21]], <8 x i1> [[TMP16]], <8 x ptr> poison) @@ -1548,9 +1548,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX512-NEXT: [[TMP33:%.*]] = select <8 x i1> [[TMP16]], <8 x i1> [[TMP29]], <8 x i1> zeroinitializer ; AVX512-NEXT: [[TMP34:%.*]] = select <8 x i1> [[TMP17]], <8 x i1> [[TMP30]], <8 x i1> zeroinitializer ; AVX512-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[OUT]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i32 8 -; AVX512-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i32 16 -; AVX512-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i32 24 +; AVX512-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i64 8 +; AVX512-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i64 16 +; AVX512-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i64 24 ; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> splat (double 5.000000e-01), ptr align 8 [[TMP35]], <8 x i1> [[TMP31]]) ; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> splat (double 5.000000e-01), ptr align 8 [[TMP37]], <8 x i1> [[TMP32]]) ; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> splat (double 5.000000e-01), ptr align 8 [[TMP38]], <8 x i1> [[TMP33]]) @@ -1653,9 +1653,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX1: [[VECTOR_BODY]]: ; AVX1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX1-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 4 -; AVX1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 8 -; AVX1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 12 +; AVX1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 4 +; AVX1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 8 +; AVX1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 12 ; AVX1-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1 ; AVX1-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1 ; AVX1-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1 @@ -1669,9 +1669,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX1-NEXT: [[TMP16:%.*]] = icmp ne <4 x i8> [[TMP8]], zeroinitializer ; AVX1-NEXT: [[TMP17:%.*]] = icmp ne <4 x i8> [[TMP9]], zeroinitializer ; AVX1-NEXT: [[TMP18:%.*]] = getelementptr ptr, ptr [[IN]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 4 -; AVX1-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 8 -; AVX1-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 12 +; AVX1-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 4 +; AVX1-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 8 +; AVX1-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 12 ; AVX1-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP18]], <4 x i1> [[TMP14]], <4 x ptr> poison) ; AVX1-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP20]], <4 x i1> [[TMP15]], <4 x ptr> poison) ; AVX1-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP21]], <4 x i1> [[TMP16]], <4 x ptr> poison) @@ -1685,9 +1685,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX1-NEXT: [[TMP33:%.*]] = select <4 x i1> [[TMP16]], <4 x i1> [[TMP29]], <4 x i1> zeroinitializer ; AVX1-NEXT: [[TMP34:%.*]] = select <4 x i1> [[TMP17]], <4 x i1> [[TMP30]], <4 x i1> zeroinitializer ; AVX1-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[OUT]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i32 4 -; AVX1-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i32 8 -; AVX1-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i32 12 +; AVX1-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i64 4 +; AVX1-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i64 8 +; AVX1-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i64 12 ; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP35]], <4 x i1> [[TMP31]]) ; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP37]], <4 x i1> [[TMP32]]) ; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP38]], <4 x i1> [[TMP33]]) @@ -1745,9 +1745,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX2: [[VECTOR_BODY]]: ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 4 -; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 8 -; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 12 +; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 4 +; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 8 +; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 12 ; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1 ; AVX2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1 ; AVX2-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1 @@ -1761,9 +1761,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX2-NEXT: [[TMP16:%.*]] = icmp ne <4 x i8> [[TMP8]], zeroinitializer ; AVX2-NEXT: [[TMP12:%.*]] = icmp ne <4 x i8> [[TMP9]], zeroinitializer ; AVX2-NEXT: [[TMP13:%.*]] = getelementptr ptr, ptr [[IN]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP19:%.*]] = getelementptr ptr, ptr [[TMP13]], i32 4 -; AVX2-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP13]], i32 8 -; AVX2-NEXT: [[TMP23:%.*]] = getelementptr ptr, ptr [[TMP13]], i32 12 +; AVX2-NEXT: [[TMP19:%.*]] = getelementptr ptr, ptr [[TMP13]], i64 4 +; AVX2-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP13]], i64 8 +; AVX2-NEXT: [[TMP23:%.*]] = getelementptr ptr, ptr [[TMP13]], i64 12 ; AVX2-NEXT: [[WIDE_MASKED_LOAD6:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP13]], <4 x i1> [[TMP17]], <4 x ptr> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP19]], <4 x i1> [[TMP15]], <4 x ptr> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP20]], <4 x i1> [[TMP16]], <4 x ptr> poison) @@ -1777,9 +1777,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX2-NEXT: [[TMP33:%.*]] = select <4 x i1> [[TMP16]], <4 x i1> [[TMP29]], <4 x i1> zeroinitializer ; AVX2-NEXT: [[TMP34:%.*]] = select <4 x i1> [[TMP12]], <4 x i1> [[TMP21]], <4 x i1> zeroinitializer ; AVX2-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[OUT]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i32 4 -; AVX2-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i32 8 -; AVX2-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i32 12 +; AVX2-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i64 4 +; AVX2-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i64 8 +; AVX2-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i64 12 ; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP35]], <4 x i1> [[TMP31]]) ; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP37]], <4 x i1> [[TMP32]]) ; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP38]], <4 x i1> [[TMP33]]) @@ -1837,9 +1837,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX512: [[VECTOR_BODY]]: ; AVX512-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX512-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 8 -; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 -; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 24 +; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 8 +; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 16 +; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 24 ; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP1]], align 1 ; AVX512-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i8>, ptr [[TMP3]], align 1 ; AVX512-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP4]], align 1 @@ -1853,9 +1853,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX512-NEXT: [[TMP16:%.*]] = icmp ne <8 x i8> [[TMP8]], zeroinitializer ; AVX512-NEXT: [[TMP17:%.*]] = icmp ne <8 x i8> [[TMP9]], zeroinitializer ; AVX512-NEXT: [[TMP18:%.*]] = getelementptr ptr, ptr [[IN]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 8 -; AVX512-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 16 -; AVX512-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 24 +; AVX512-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 8 +; AVX512-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 16 +; AVX512-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 24 ; AVX512-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x ptr> @llvm.masked.load.v8p0.p0(ptr align 8 [[TMP18]], <8 x i1> [[TMP14]], <8 x ptr> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <8 x ptr> @llvm.masked.load.v8p0.p0(ptr align 8 [[TMP20]], <8 x i1> [[TMP15]], <8 x ptr> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <8 x ptr> @llvm.masked.load.v8p0.p0(ptr align 8 [[TMP21]], <8 x i1> [[TMP16]], <8 x ptr> poison) @@ -1869,9 +1869,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX512-NEXT: [[TMP33:%.*]] = select <8 x i1> [[TMP16]], <8 x i1> [[TMP29]], <8 x i1> zeroinitializer ; AVX512-NEXT: [[TMP34:%.*]] = select <8 x i1> [[TMP17]], <8 x i1> [[TMP30]], <8 x i1> zeroinitializer ; AVX512-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[OUT]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i32 8 -; AVX512-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i32 16 -; AVX512-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i32 24 +; AVX512-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i64 8 +; AVX512-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i64 16 +; AVX512-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i64 24 ; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> splat (double 5.000000e-01), ptr align 8 [[TMP35]], <8 x i1> [[TMP31]]) ; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> splat (double 5.000000e-01), ptr align 8 [[TMP37]], <8 x i1> [[TMP32]]) ; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> splat (double 5.000000e-01), ptr align 8 [[TMP38]], <8 x i1> [[TMP33]]) diff --git a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll index e23f8a9b63ef0..d514ab6bc72b7 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll @@ -1186,13 +1186,13 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; O1VEC2: vector.body: ; O1VEC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; O1VEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDEX]] -; O1VEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i32 4 +; O1VEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i64 4 ; O1VEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 ; O1VEC2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 ; O1VEC2-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] ; O1VEC2-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] ; O1VEC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDEX]] -; O1VEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP6]], i32 4 +; O1VEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP6]], i64 4 ; O1VEC2-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP6]], align 4 ; O1VEC2-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP8]], align 4 ; O1VEC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -1214,13 +1214,13 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; OzVEC2: vector.body: ; OzVEC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; OzVEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDEX]] -; OzVEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i32 4 +; OzVEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i64 4 ; OzVEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 ; OzVEC2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 ; OzVEC2-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] ; OzVEC2-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] ; OzVEC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDEX]] -; OzVEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP6]], i32 4 +; OzVEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP6]], i64 4 ; OzVEC2-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP6]], align 4 ; OzVEC2-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP8]], align 4 ; OzVEC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/X86/multi-exit-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/multi-exit-cost.ll index de6418066dea0..2809a77b36f1a 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/multi-exit-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/multi-exit-cost.ll @@ -30,8 +30,8 @@ define i64 @test_value_in_exit_compare_chain_used_outside(ptr %src, i64 %x, i64 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i8> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP29:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP18:%.*]] = and i64 [[TMP10]], 1 ; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP18]] -; CHECK-NEXT: [[TMP27:%.*]] = getelementptr i8, ptr [[TMP26]], i32 0 -; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[TMP27]], i32 -7 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP26]], i64 0 +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[TMP12]], i64 -7 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP28]], align 1 ; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <8 x i8> [[WIDE_LOAD]], <8 x i8> poison, <8 x i32> ; CHECK-NEXT: [[TMP29]] = xor <8 x i8> [[REVERSE]], [[VEC_PHI]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll b/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll index 31269b1b8c221..85d77eaadc632 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll @@ -35,17 +35,17 @@ define void @foo(ptr addrspace(1) align 8 dereferenceable_or_null(16), ptr addrs ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[DOT12]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i32 4 -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i32 8 -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i32 12 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i64 4 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i64 8 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x ptr addrspace(1)>, ptr addrspace(1) [[TMP5]], align 8, !alias.scope [[META0:![0-9]+]] ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x ptr addrspace(1)>, ptr addrspace(1) [[TMP6]], align 8, !alias.scope [[META0]] ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x ptr addrspace(1)>, ptr addrspace(1) [[TMP7]], align 8, !alias.scope [[META0]] ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x ptr addrspace(1)>, ptr addrspace(1) [[TMP8]], align 8, !alias.scope [[META0]] ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[DOT10]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP9]], i32 4 -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP9]], i32 8 -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP9]], i32 12 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP9]], i64 4 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP9]], i64 8 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP9]], i64 12 ; CHECK-NEXT: store <4 x ptr addrspace(1)> [[WIDE_LOAD]], ptr addrspace(1) [[TMP9]], align 8, !alias.scope [[META3:![0-9]+]], !noalias [[META0]] ; CHECK-NEXT: store <4 x ptr addrspace(1)> [[WIDE_LOAD4]], ptr addrspace(1) [[TMP10]], align 8, !alias.scope [[META3]], !noalias [[META0]] ; CHECK-NEXT: store <4 x ptr addrspace(1)> [[WIDE_LOAD5]], ptr addrspace(1) [[TMP11]], align 8, !alias.scope [[META3]], !noalias [[META0]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll b/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll index 3c618d71fc974..9217c905945ac 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll @@ -67,7 +67,7 @@ define i32 @main(ptr %ptr) { ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[DOTPROMOTED]], [[INDEX]] ; CHECK-NEXT: [[TMP20:%.*]] = add i32 [[OFFSET_IDX]], 1 ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i32 [[TMP20]] -; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[TMP22]], i32 4 +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[TMP22]], i64 4 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP22]], align 4 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP25]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr39160.ll b/llvm/test/Transforms/LoopVectorize/X86/pr39160.ll index 878d288b918e4..4f8975f3d5e84 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr39160.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr39160.ll @@ -1,75 +1,71 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 ; RUN: opt -passes=loop-vectorize -S < %s 2>&1 | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:1" target triple = "x86_64-unknown-linux-gnu" ; Make sure that we can compile the test without crash. -define void @barney(ptr %dst, i1 %arg) { - -; CHECK-LABEL: @barney( -; CHECK: middle.block: - -bb: - br label %bb2 - -bb2: ; preds = %bb2, %bb - %tmp4 = icmp slt i32 undef, 0 - br i1 %tmp4, label %bb2, label %bb5 - -bb5: ; preds = %bb2 - br label %bb19 - -bb18: ; preds = %bb33 - ret void - -bb19: ; preds = %bb36, %bb5 - %tmp21 = phi i64 [ undef, %bb36 ], [ 2, %bb5 ] - %tmp22 = phi i32 [ %tmp65, %bb36 ], [ undef, %bb5 ] - br label %bb50 - -bb33: ; preds = %bb62 - br i1 %arg, label %bb18, label %bb36 - -bb36: ; preds = %bb33 - br label %bb19 - -bb46: ; preds = %bb50 - br i1 %arg, label %bb48, label %bb59 - -bb48: ; preds = %bb46 - %tmp49 = add i32 %tmp52, 14 - ret void - -bb50: ; preds = %bb50, %bb19 - %tmp52 = phi i32 [ %tmp55, %bb50 ], [ %tmp22, %bb19 ] - %tmp53 = phi i64 [ %tmp56, %bb50 ], [ 1, %bb19 ] - %gep = getelementptr inbounds i8, ptr %dst, i64 %tmp53 - store i8 1, ptr %gep - %tmp54 = add i32 %tmp52, 12 - %tmp55 = add i32 %tmp52, 13 - %tmp56 = add nuw nsw i64 %tmp53, 1 - %tmp58 = icmp ult i64 %tmp53, undef - br i1 %tmp58, label %bb50, label %bb46 - -bb59: ; preds = %bb46 - br label %bb62 - -bb62: ; preds = %bb68, %bb59 - %tmp63 = phi i32 [ %tmp65, %bb68 ], [ %tmp55, %bb59 ] - %tmp64 = phi i64 [ %tmp66, %bb68 ], [ %tmp56, %bb59 ] - %tmp65 = add i32 %tmp63, 13 - %tmp66 = add nuw nsw i64 %tmp64, 1 - %tmp67 = icmp ult i64 %tmp66, %tmp21 - br i1 %tmp67, label %bb68, label %bb33 - -bb68: ; preds = %bb62 - br label %bb62 -} define i32 @foo(ptr addrspace(1) %p) { - -; CHECK-LABEL: foo -; CHECK: middle.block: +; CHECK-LABEL: define i32 @foo( +; CHECK-SAME: ptr addrspace(1) [[P:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[OUTER:.*]] +; CHECK: [[OUTER]]: +; CHECK-NEXT: [[INDVAR:%.*]] = phi i32 [ [[INDVAR_NEXT:%.*]], %[[OUTER_LATCH:.*]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[OUTER_LATCH]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDVAR]], 1 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP0]], 8 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 8 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP1:%.*]] = add i32 1, [[N_VEC]] +; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[N_VEC]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i32 6, [[TMP2]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 8) +; CHECK-NEXT: [[TMP4]] = or <4 x i32> [[VEC_PHI]], [[VEC_IND]] +; CHECK-NEXT: [[TMP5]] = or <4 x i32> [[VEC_PHI1]], [[STEP_ADD]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[STEP_ADD]], splat (i32 8) +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[BIN_RDX:%.*]] = or <4 x i32> [[TMP5]], [[TMP4]] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[BIN_RDX]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[OUTER_LATCH]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0, %[[OUTER]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[TMP1]], %[[MIDDLE_BLOCK]] ], [ 1, %[[OUTER]] ] +; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i32 [ [[TMP3]], %[[MIDDLE_BLOCK]] ], [ 6, %[[OUTER]] ] +; CHECK-NEXT: br label %[[INNER:.*]] +; CHECK: [[INNER]]: +; CHECK-NEXT: [[TMP8:%.*]] = phi i32 [ [[TMP10:%.*]], %[[INNER]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[A:%.*]] = phi i32 [ [[TMP11:%.*]], %[[INNER]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[B:%.*]] = phi i32 [ [[TMP9:%.*]], %[[INNER]] ], [ [[BC_RESUME_VAL2]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[TMP9]] = add i32 [[B]], 2 +; CHECK-NEXT: [[TMP10]] = or i32 [[TMP8]], [[B]] +; CHECK-NEXT: [[TMP11]] = add nuw nsw i32 [[A]], 1 +; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 +; CHECK-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[IV]], [[TMP12]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[INNER]], label %[[OUTER_LATCH]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: [[OUTER_LATCH]]: +; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP10]], %[[INNER]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: store atomic i32 [[DOTLCSSA]], ptr addrspace(1) [[P]] unordered, align 4 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[TMP14:%.*]] = icmp ugt i64 [[IV]], 63 +; CHECK-NEXT: [[INDVAR_NEXT]] = add i32 [[INDVAR]], 1 +; CHECK-NEXT: br i1 [[TMP14]], label %[[EXIT:.*]], label %[[OUTER]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret i32 0 +; entry: br label %outer diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr47437.ll b/llvm/test/Transforms/LoopVectorize/X86/pr47437.ll index 737bcf35fbd2c..38db41271d1f6 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr47437.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr47437.ll @@ -124,7 +124,7 @@ define void @test_muladd(ptr noalias nocapture %d1, ptr noalias nocapture readon ; SSE41-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[TMP22]], [[TMP16]] ; SSE41-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[TMP23]], [[TMP17]] ; SSE41-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[D1:%.*]], i64 [[INDEX]] -; SSE41-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP26]], i32 4 +; SSE41-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP26]], i64 4 ; SSE41-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 ; SSE41-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP29]], align 4 ; SSE41-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -250,9 +250,9 @@ define void @test_muladd(ptr noalias nocapture %d1, ptr noalias nocapture readon ; AVX1-NEXT: [[TMP69:%.*]] = add nsw <4 x i32> [[TMP67]], [[TMP46]] ; AVX1-NEXT: [[TMP70:%.*]] = add nsw <4 x i32> [[TMP68]], [[TMP47]] ; AVX1-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[D1:%.*]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i32 4 -; AVX1-NEXT: [[TMP71:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i32 8 -; AVX1-NEXT: [[TMP72:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i32 12 +; AVX1-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i64 4 +; AVX1-NEXT: [[TMP71:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i64 8 +; AVX1-NEXT: [[TMP72:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i64 12 ; AVX1-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; AVX1-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP26]], align 4 ; AVX1-NEXT: store <4 x i32> [[TMP69]], ptr [[TMP71]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll index 08855fe9ecba5..c756a54ec6d2b 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll @@ -30,8 +30,8 @@ define void @test(ptr noundef align 8 dereferenceable_or_null(16) %arr) #0 { ; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP1]], <4 x i1> [[TMP3]], <4 x i1> zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[ARR]], i64 [[TMP5]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[TMP6]], i32 0 -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[TMP7]], i32 -3 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[TMP6]], i64 0 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[TMP7]], i64 -3 ; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i1> [[TMP4]], <4 x i1> poison, <4 x i32> ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> splat (i64 1), ptr align 8 [[TMP8]], <4 x i1> [[REVERSE]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/predicate-switch.ll b/llvm/test/Transforms/LoopVectorize/X86/predicate-switch.ll index 2aceb279d47db..5a396f88b1a64 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/predicate-switch.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/predicate-switch.ll @@ -76,7 +76,7 @@ define void @switch_default_to_latch_common_dest(ptr %start, ptr %end) { ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP9:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], splat (i64 -12) @@ -214,7 +214,7 @@ define void @switch_default_to_latch_common_dest_using_branches(ptr %start, ptr ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP9:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], splat (i64 -12) @@ -337,7 +337,7 @@ define void @switch_all_dests_distinct(ptr %start, ptr %end) { ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP9:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], splat (i64 -12) @@ -527,7 +527,7 @@ define void @switch_all_dests_distinct_variant_using_branches(ptr %start, ptr %e ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP9:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], splat (i64 -12) @@ -687,7 +687,7 @@ define void @switch_multiple_common_dests(ptr %start, ptr %end) { ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP23:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], splat (i64 -12) @@ -836,7 +836,7 @@ define void @switch4_default_common_dest_with_case(ptr %start, ptr %end) { ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP15:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], splat (i64 -12) @@ -1014,7 +1014,7 @@ define void @switch_under_br_default_common_dest_with_case(ptr %start, ptr %end, ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP9:%.*]] = icmp ule <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] @@ -1167,7 +1167,7 @@ define void @br_under_switch_default_common_dest_with_case(ptr %start, ptr %end, ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP9:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], splat (i64 -12) @@ -1319,7 +1319,7 @@ define void @large_number_of_cases(ptr %start, ptr %end) { ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP9:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], splat (i64 1) diff --git a/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll b/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll index 52e90e4475208..3afdf947081b6 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll @@ -60,7 +60,7 @@ define float @reduction_sum_float_fastmath(i32 %n, ptr %array) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[ARRAY:%.*]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[TMP6]] = fadd fast <4 x float> [[VEC_PHI]], [[WIDE_LOAD]] @@ -111,7 +111,7 @@ define float @reduction_sum_float_only_reassoc(i32 %n, ptr %array) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[ARRAY:%.*]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[TMP6]] = fadd reassoc <4 x float> [[VEC_PHI]], [[WIDE_LOAD]] @@ -162,7 +162,7 @@ define float @reduction_sum_float_only_reassoc_and_contract(i32 %n, ptr %array) ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[ARRAY:%.*]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[TMP6]] = fadd reassoc contract <4 x float> [[VEC_PHI]], [[WIDE_LOAD]] @@ -220,7 +220,7 @@ define float @PR35538(ptr nocapture readonly %a, i32 %N) #0 { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ splat (float -1.000000e+00), [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -1.000000e+00), [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[TMP6:%.*]] = fcmp nnan ninf nsz oge <4 x float> [[WIDE_LOAD]], [[VEC_PHI]] @@ -301,7 +301,7 @@ define float @PR35538_more_FMF(ptr nocapture readonly %a, i32 %N) #0 { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ splat (float -1.000000e+00), [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -1.000000e+00), [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[TMP6:%.*]] = fcmp nnan ninf oge <4 x float> [[WIDE_LOAD]], [[VEC_PHI]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll b/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll index f44d3008cbaa5..3813560d9300a 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll @@ -587,10 +587,10 @@ define double @test_load_used_by_other_load_scev_low_trip_count(ptr %ptr.a, ptr ; I64-NEXT: [[TMP9:%.*]] = getelementptr double, ptr [[PTR_B]], i64 [[TMP7]] ; I64-NEXT: [[TMP10:%.*]] = load double, ptr [[PTR_A]], align 8 ; I64-NEXT: [[ADD1:%.*]] = fadd double [[TMP10]], 0.000000e+00 -; I64-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP3]], i64 8 -; I64-NEXT: [[TMP15:%.*]] = load double, ptr [[TMP13]], align 8 +; I64-NEXT: [[GEP_C_OFFSET:%.*]] = getelementptr i8, ptr [[TMP3]], i64 8 +; I64-NEXT: [[LOAD_C:%.*]] = load double, ptr [[GEP_C_OFFSET]], align 8 ; I64-NEXT: [[MUL1]] = fmul double [[ADD1]], 0.000000e+00 -; I64-NEXT: [[MUL2:%.*]] = fmul double [[TMP15]], 0.000000e+00 +; I64-NEXT: [[MUL2:%.*]] = fmul double [[LOAD_C]], 0.000000e+00 ; I64-NEXT: [[ADD2:%.*]] = fadd double [[MUL2]], 0.000000e+00 ; I64-NEXT: [[ADD3:%.*]] = fadd double [[ADD2]], 1.000000e+00 ; I64-NEXT: [[TMP24:%.*]] = load double, ptr [[TMP9]], align 8 diff --git a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll index e99ffda9e4043..93cf59c019d5f 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll @@ -524,22 +524,78 @@ define void @example23c(ptr noalias nocapture %src, ptr noalias nocapture %dst) ; induction is used outside the loop. define i64 @example23d(ptr noalias nocapture %src, ptr noalias nocapture %dst) optsize { ; CHECK-LABEL: @example23d( +; CHECK-NEXT: br label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: ; CHECK-NEXT: br label [[TMP1:%.*]] -; CHECK: 1: -; CHECK-NEXT: [[DOT04:%.*]] = phi ptr [ [[SRC:%.*]], [[TMP0:%.*]] ], [ [[TMP2:%.*]], [[TMP1]] ] -; CHECK-NEXT: [[DOT013:%.*]] = phi ptr [ [[DST:%.*]], [[TMP0]] ], [ [[TMP6:%.*]], [[TMP1]] ] -; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[TMP0]] ], [ [[TMP7:%.*]], [[TMP1]] ] -; CHECK-NEXT: [[TMP2]] = getelementptr inbounds nuw i8, ptr [[DOT04]], i64 2 +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE14:%.*]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE14]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[TMP9]], i64 2 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[TMP2]], i64 4 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[TMP10]], i64 6 +; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[DST:%.*]], i64 [[OFFSET_IDX4]] +; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[TMP11]], i64 4 +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX4]] +; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[TMP32]], i64 8 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX4]] +; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[TMP6]], i64 12 +; CHECK-NEXT: [[TMP33:%.*]] = icmp ult <4 x i64> [[VEC_IND]], splat (i64 257) +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP33]], i64 0 +; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] +; CHECK: pred.store.if: +; CHECK-NEXT: [[DOT013:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX4]] +; CHECK-NEXT: [[DOT04:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr [[DOT04]], align 2 ; CHECK-NEXT: [[TMP4:%.*]] = zext i16 [[TMP3]] to i32 ; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i32 [[TMP4]], 7 -; CHECK-NEXT: [[TMP6]] = getelementptr inbounds nuw i8, ptr [[DOT013]], i64 4 ; CHECK-NEXT: store i32 [[TMP5]], ptr [[DOT013]], align 4 -; CHECK-NEXT: [[TMP7]] = add nuw nsw i64 [[I_02]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP7]], 257 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[TMP8:%.*]], label [[TMP1]] -; CHECK: 8: -; CHECK-NEXT: ret i64 [[TMP7]] +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]] +; CHECK: pred.store.continue: +; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP33]], i64 1 +; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10:%.*]] +; CHECK: pred.store.if9: +; CHECK-NEXT: [[TMP13:%.*]] = load i16, ptr [[NEXT_GEP1]], align 2 +; CHECK-NEXT: [[TMP14:%.*]] = zext i16 [[TMP13]] to i32 +; CHECK-NEXT: [[TMP15:%.*]] = shl nuw nsw i32 [[TMP14]], 7 +; CHECK-NEXT: store i32 [[TMP15]], ptr [[NEXT_GEP6]], align 4 +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE10]] +; CHECK: pred.store.continue10: +; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP33]], i64 2 +; CHECK-NEXT: br i1 [[TMP16]], label [[PRED_STORE_IF11:%.*]], label [[PRED_STORE_CONTINUE12:%.*]] +; CHECK: pred.store.if11: +; CHECK-NEXT: [[TMP17:%.*]] = load i16, ptr [[NEXT_GEP2]], align 2 +; CHECK-NEXT: [[TMP18:%.*]] = zext i16 [[TMP17]] to i32 +; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i32 [[TMP18]], 7 +; CHECK-NEXT: store i32 [[TMP19]], ptr [[NEXT_GEP7]], align 4 +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE12]] +; CHECK: pred.store.continue12: +; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i1> [[TMP33]], i64 3 +; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_STORE_IF13:%.*]], label [[PRED_STORE_CONTINUE14]] +; CHECK: pred.store.if13: +; CHECK-NEXT: [[TMP21:%.*]] = load i16, ptr [[NEXT_GEP3]], align 2 +; CHECK-NEXT: [[TMP22:%.*]] = zext i16 [[TMP21]] to i32 +; CHECK-NEXT: [[TMP23:%.*]] = shl nuw nsw i32 [[TMP22]], 7 +; CHECK-NEXT: store i32 [[TMP23]], ptr [[NEXT_GEP8]], align 4 +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE14]] +; CHECK: pred.store.continue14: +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) +; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 +; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[TMP1]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: br label [[TMP30:%.*]] +; CHECK: 25: +; CHECK-NEXT: [[TMP25:%.*]] = xor <4 x i1> [[TMP33]], splat (i1 true) +; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP25]], i1 false) +; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], -1 +; CHECK-NEXT: [[TMP28:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[TMP27]] +; CHECK-NEXT: [[TMP29:%.*]] = add nsw i64 [[TMP28]], 1 +; CHECK-NEXT: ret i64 [[TMP29]] ; br label %1 diff --git a/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll b/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll index 602a3921eb34c..da48f984cb329 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll @@ -59,13 +59,13 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; CHECK-NEXT: [[TMP30:%.*]] = add i64 [[INDEX]], 30 ; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[INDEX]], 31 ; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 8 -; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 16 -; CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 24 +; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i64 8 +; CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i64 16 +; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i64 24 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP32]], align 4, !tbaa [[INT_TBAA1:![0-9]+]] -; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i32>, ptr [[TMP37]], align 4, !tbaa [[INT_TBAA1]] -; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, ptr [[TMP38]], align 4, !tbaa [[INT_TBAA1]] -; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP39]], align 4, !tbaa [[INT_TBAA1]] +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i32>, ptr [[TMP33]], align 4, !tbaa [[INT_TBAA1]] +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, ptr [[TMP34]], align 4, !tbaa [[INT_TBAA1]] +; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP35]], align 4, !tbaa [[INT_TBAA1]] ; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP0]], i64 [[IDXPROM5]] ; CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP1]], i64 [[IDXPROM5]] ; CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP2]], i64 [[IDXPROM5]] @@ -290,13 +290,13 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; MAX-BW-NEXT: [[TMP30:%.*]] = add i64 [[INDEX]], 30 ; MAX-BW-NEXT: [[TMP31:%.*]] = add i64 [[INDEX]], 31 ; MAX-BW-NEXT: [[TMP32:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[TMP0]] -; MAX-BW-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 8 -; MAX-BW-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 16 -; MAX-BW-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 24 +; MAX-BW-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i64 8 +; MAX-BW-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i64 16 +; MAX-BW-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i64 24 ; MAX-BW-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP32]], align 4, !tbaa [[INT_TBAA1:![0-9]+]] -; MAX-BW-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i32>, ptr [[TMP37]], align 4, !tbaa [[INT_TBAA1]] -; MAX-BW-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, ptr [[TMP38]], align 4, !tbaa [[INT_TBAA1]] -; MAX-BW-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP39]], align 4, !tbaa [[INT_TBAA1]] +; MAX-BW-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i32>, ptr [[TMP33]], align 4, !tbaa [[INT_TBAA1]] +; MAX-BW-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, ptr [[TMP34]], align 4, !tbaa [[INT_TBAA1]] +; MAX-BW-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP35]], align 4, !tbaa [[INT_TBAA1]] ; MAX-BW-NEXT: [[TMP40:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP0]], i64 [[IDXPROM5]] ; MAX-BW-NEXT: [[TMP41:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP1]], i64 [[IDXPROM5]] ; MAX-BW-NEXT: [[TMP42:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP2]], i64 [[IDXPROM5]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/uniform_load.ll b/llvm/test/Transforms/LoopVectorize/X86/uniform_load.ll index 8081c0e17f865..692ab3db0aa42 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/uniform_load.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/uniform_load.ll @@ -25,9 +25,9 @@ define void @foo(ptr nocapture noalias %A, i64 %N) #0 { ; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr @inc, align 4 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x float> poison, float [[TMP1]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x float> [[BROADCAST_SPLATINSERT]], <8 x float> poison, <8 x i32> zeroinitializer -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i32 8 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[A]], i32 16 -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i32 24 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 8 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[A]], i64 16 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 24 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[A]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x float>, ptr [[TMP5]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll index fda944e072d4a..714d01315e507 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll @@ -29,29 +29,29 @@ define void @vectorized(ptr noalias nocapture %A, ptr noalias nocapture readonly ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 4 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 8 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 12 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 8 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP0:![0-9]+]] -; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP4]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP5]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP4]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP0]] ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i32 4 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i32 8 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i32 12 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 4 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 8 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP6]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP8]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP9]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP10]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP5]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP8]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP7]], align 4, !llvm.access.group [[ACC_GRP0]] ; CHECK-NEXT: [[TMP11:%.*]] = fadd fast <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD4]] ; CHECK-NEXT: [[TMP12:%.*]] = fadd fast <4 x float> [[WIDE_LOAD1]], [[WIDE_LOAD5]] ; CHECK-NEXT: [[TMP13:%.*]] = fadd fast <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD6]] ; CHECK-NEXT: [[TMP14:%.*]] = fadd fast <4 x float> [[WIDE_LOAD3]], [[WIDE_LOAD7]] ; CHECK-NEXT: store <4 x float> [[TMP11]], ptr [[TMP6]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: store <4 x float> [[TMP12]], ptr [[TMP8]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: store <4 x float> [[TMP13]], ptr [[TMP9]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: store <4 x float> [[TMP14]], ptr [[TMP10]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: store <4 x float> [[TMP12]], ptr [[TMP5]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: store <4 x float> [[TMP13]], ptr [[TMP8]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: store <4 x float> [[TMP14]], ptr [[TMP7]], align 4, !llvm.access.group [[ACC_GRP0]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP1:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll index c8e3766aa936e..a792d2463e647 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll @@ -56,17 +56,17 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 16 -; NO-VP-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 32 -; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 48 +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 16 +; NO-VP-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 32 +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 48 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i32>, ptr [[TMP4]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i32>, ptr [[TMP9]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i32>, ptr [[TMP10]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i32>, ptr [[TMP11]], align 4 ; NO-VP-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 16 -; NO-VP-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 32 -; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 48 +; NO-VP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i64 16 +; NO-VP-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i64 32 +; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i64 48 ; NO-VP-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i32>, ptr [[TMP12]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i32>, ptr [[TMP17]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD7:%.*]] = load <16 x i32>, ptr [[TMP18]], align 4 @@ -76,9 +76,9 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; NO-VP-NEXT: [[TMP22:%.*]] = add nsw <16 x i32> [[WIDE_LOAD7]], [[WIDE_LOAD3]] ; NO-VP-NEXT: [[TMP23:%.*]] = add nsw <16 x i32> [[WIDE_LOAD8]], [[WIDE_LOAD4]] ; NO-VP-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i32 16 -; NO-VP-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i32 32 -; NO-VP-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i32 48 +; NO-VP-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i64 16 +; NO-VP-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i64 32 +; NO-VP-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i64 48 ; NO-VP-NEXT: store <16 x i32> [[TMP20]], ptr [[TMP24]], align 4 ; NO-VP-NEXT: store <16 x i32> [[TMP21]], ptr [[TMP29]], align 4 ; NO-VP-NEXT: store <16 x i32> [[TMP22]], ptr [[TMP30]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll b/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll index 8184cad22ae8b..26268f1ff4e94 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll @@ -18,9 +18,9 @@ define void @iv.4_used_as_vector_and_first_lane(ptr %src, ptr noalias %dst) { ; CHECK-NEXT: [[STEP_ADD_2:%.*]] = add <4 x i64> [[STEP_ADD]], splat (i64 4) ; CHECK-NEXT: [[STEP_ADD_3:%.*]] = add <4 x i64> [[STEP_ADD_2]], splat (i64 4) ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i32 4 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i32 8 -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i32 12 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i64 4 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i64 8 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP9]], align 8 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i64>, ptr [[TMP10]], align 8 @@ -36,9 +36,9 @@ define void @iv.4_used_as_vector_and_first_lane(ptr %src, ptr noalias %dst) { ; CHECK-NEXT: [[TMP19:%.*]] = icmp ule <4 x i64> [[WIDE_LOAD6]], splat (i64 128) ; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 1 ; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP27]] -; CHECK-NEXT: [[TMP33:%.*]] = getelementptr i64, ptr [[TMP28]], i32 4 -; CHECK-NEXT: [[TMP34:%.*]] = getelementptr i64, ptr [[TMP28]], i32 8 -; CHECK-NEXT: [[TMP35:%.*]] = getelementptr i64, ptr [[TMP28]], i32 12 +; CHECK-NEXT: [[TMP33:%.*]] = getelementptr i64, ptr [[TMP28]], i64 4 +; CHECK-NEXT: [[TMP34:%.*]] = getelementptr i64, ptr [[TMP28]], i64 8 +; CHECK-NEXT: [[TMP35:%.*]] = getelementptr i64, ptr [[TMP28]], i64 12 ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[TMP12]], ptr align 4 [[TMP28]], <4 x i1> [[TMP16]]) ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[TMP13]], ptr align 4 [[TMP33]], <4 x i1> [[TMP17]]) ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[TMP14]], ptr align 4 [[TMP34]], <4 x i1> [[TMP18]]) @@ -88,9 +88,9 @@ define void @iv.4_used_as_first_lane(ptr %src, ptr noalias %dst) { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i32 4 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i32 8 -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i32 12 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i64 4 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i64 8 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP9]], align 8 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i64>, ptr [[TMP10]], align 8 @@ -102,9 +102,9 @@ define void @iv.4_used_as_first_lane(ptr %src, ptr noalias %dst) { ; CHECK-NEXT: [[TMP19:%.*]] = icmp ule <4 x i64> [[WIDE_LOAD3]], splat (i64 128) ; CHECK-NEXT: [[TMP23:%.*]] = add i64 [[TMP15]], 1 ; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP23]] -; CHECK-NEXT: [[TMP29:%.*]] = getelementptr i64, ptr [[TMP24]], i32 4 -; CHECK-NEXT: [[TMP30:%.*]] = getelementptr i64, ptr [[TMP24]], i32 8 -; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i64, ptr [[TMP24]], i32 12 +; CHECK-NEXT: [[TMP29:%.*]] = getelementptr i64, ptr [[TMP24]], i64 4 +; CHECK-NEXT: [[TMP30:%.*]] = getelementptr i64, ptr [[TMP24]], i64 8 +; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i64, ptr [[TMP24]], i64 12 ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[WIDE_LOAD]], ptr align 4 [[TMP24]], <4 x i1> [[TMP16]]) ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[WIDE_LOAD1]], ptr align 4 [[TMP29]], <4 x i1> [[TMP17]]) ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[WIDE_LOAD2]], ptr align 4 [[TMP30]], <4 x i1> [[TMP18]]) diff --git a/llvm/test/Transforms/LoopVectorize/assume.ll b/llvm/test/Transforms/LoopVectorize/assume.ll index a9a0b33f542af..eddd5f9ddc584 100644 --- a/llvm/test/Transforms/LoopVectorize/assume.ll +++ b/llvm/test/Transforms/LoopVectorize/assume.ll @@ -11,7 +11,7 @@ define void @test1(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x float>, ptr [[TMP7]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp ogt <2 x float> [[WIDE_LOAD]], splat (float 1.000000e+02) @@ -27,7 +27,7 @@ define void @test1(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) ; CHECK-NEXT: [[TMP8:%.*]] = fadd <2 x float> [[WIDE_LOAD]], splat (float 1.000000e+00) ; CHECK-NEXT: [[TMP9:%.*]] = fadd <2 x float> [[WIDE_LOAD1]], splat (float 1.000000e+00) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i32 2 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 2 ; CHECK-NEXT: store <2 x float> [[TMP8]], ptr [[TMP10]], align 4 ; CHECK-NEXT: store <2 x float> [[TMP9]], ptr [[TMP11]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -80,13 +80,13 @@ define void @test2(ptr noalias %a, ptr noalias %b) { ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x float>, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = fadd <2 x float> [[WIDE_LOAD]], splat (float 1.000000e+00) ; CHECK-NEXT: [[TMP6:%.*]] = fadd <2 x float> [[WIDE_LOAD1]], splat (float 1.000000e+00) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 2 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 2 ; CHECK-NEXT: store <2 x float> [[TMP5]], ptr [[TMP7]], align 4 ; CHECK-NEXT: store <2 x float> [[TMP6]], ptr [[TMP8]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -151,13 +151,13 @@ define void @predicated_assume(ptr noalias nocapture readonly %a, ptr noalias no ; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP1]], <2 x float> splat (float 2.300000e+01), <2 x float> splat (float 4.200000e+01) ; CHECK-NEXT: [[PREDPHI1:%.*]] = select <2 x i1> [[TMP2]], <2 x float> splat (float 2.300000e+01), <2 x float> splat (float 4.200000e+01) ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x float>, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x float> [[PREDPHI]], [[WIDE_LOAD]] ; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x float> [[PREDPHI1]], [[WIDE_LOAD2]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 2 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 2 ; CHECK-NEXT: store <2 x float> [[TMP5]], ptr [[TMP7]], align 4 ; CHECK-NEXT: store <2 x float> [[TMP6]], ptr [[TMP8]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll b/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll index 1fe3962dfd072..6c63b823b7666 100644 --- a/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll +++ b/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll @@ -130,8 +130,8 @@ define i32 @consecutive_ptr_reverse(ptr %a, i64 %n) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[N]], [[INDEX]] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 -3 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 -3 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD]], <4 x i32> poison, <4 x i32> ; CHECK-NEXT: [[TMP5]] = add <4 x i32> [[VEC_PHI]], [[REVERSE]] @@ -177,8 +177,8 @@ define i32 @consecutive_ptr_reverse(ptr %a, i64 %n) { ; INTER-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] ; INTER-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[N]], [[INDEX]] ; INTER-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[OFFSET_IDX]] -; INTER-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0 -; INTER-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 -3 +; INTER-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 0 +; INTER-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 -3 ; INTER-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP4]], align 8 ; INTER-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD]], <4 x i32> poison, <4 x i32> ; INTER-NEXT: [[TMP5]] = add <4 x i32> [[VEC_PHI]], [[REVERSE]] diff --git a/llvm/test/Transforms/LoopVectorize/cse-casts.ll b/llvm/test/Transforms/LoopVectorize/cse-casts.ll index fb45745eff1cb..b6d7a9f81ec9d 100644 --- a/llvm/test/Transforms/LoopVectorize/cse-casts.ll +++ b/llvm/test/Transforms/LoopVectorize/cse-casts.ll @@ -14,12 +14,12 @@ define i8 @preserve_flags_when_cloning_trunc(i8 %start, ptr noalias %src, ptr no ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i8> [ [[TMP0]], %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i8> [ splat (i8 1), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[SRC]], align 4 -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[BROADCAST_SPLAT]], zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP1]], 0 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[TMP10]], i64 0 +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i1> [[TMP2]] to <4 x i16> ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i16, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i16, ptr [[TMP4]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i16, ptr [[TMP4]], i64 4 ; CHECK-NEXT: store <4 x i16> [[TMP3]], ptr [[TMP4]], align 2 ; CHECK-NEXT: store <4 x i16> [[TMP3]], ptr [[TMP5]], align 2 ; CHECK-NEXT: [[TMP6]] = mul <4 x i8> [[VEC_PHI]], splat (i8 3) diff --git a/llvm/test/Transforms/LoopVectorize/cse-gep-source-element-type.ll b/llvm/test/Transforms/LoopVectorize/cse-gep-source-element-type.ll index 5d92c127aff93..901652537a5c5 100644 --- a/llvm/test/Transforms/LoopVectorize/cse-gep-source-element-type.ll +++ b/llvm/test/Transforms/LoopVectorize/cse-gep-source-element-type.ll @@ -16,19 +16,19 @@ define void @cse_replicate_gep(ptr noalias %A, ptr noalias %B, ptr noalias %C, i ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[TMP0]], i32 4 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[TMP0]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i16, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr [[TMP8]], i32 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr [[TMP8]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i16>, ptr [[TMP8]], align 2 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i16>, ptr [[TMP2]], align 2 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[TMP3]], i32 4 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[TMP3]], i64 4 ; CHECK-NEXT: store <4 x i32> [[WIDE_LOAD]], ptr [[TMP3]], align 4 ; CHECK-NEXT: store <4 x i32> [[WIDE_LOAD1]], ptr [[TMP4]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i16, ptr [[C]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i16, ptr [[TMP5]], i32 4 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i16, ptr [[TMP5]], i64 4 ; CHECK-NEXT: store <4 x i16> [[WIDE_LOAD2]], ptr [[TMP5]], align 2 ; CHECK-NEXT: store <4 x i16> [[WIDE_LOAD3]], ptr [[TMP6]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -79,11 +79,11 @@ define void @cse_wide_gep(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %n ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr [[A]], <4 x i64> [[VEC_IND]] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[A]], <4 x i64> [[STEP_ADD]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX1]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr ptr, ptr [[TMP4]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr ptr, ptr [[TMP4]], i64 4 ; CHECK-NEXT: store <4 x ptr> [[TMP0]], ptr [[TMP4]], align 8 ; CHECK-NEXT: store <4 x ptr> [[TMP1]], ptr [[TMP5]], align 8 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX1]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr ptr, ptr [[TMP6]], i32 4 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr ptr, ptr [[TMP6]], i64 4 ; CHECK-NEXT: store <4 x ptr> [[TMP2]], ptr [[TMP6]], align 8 ; CHECK-NEXT: store <4 x ptr> [[TMP3]], ptr [[TMP8]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/dead_instructions.ll b/llvm/test/Transforms/LoopVectorize/dead_instructions.ll index 02e1d0e9e7004..6e5213568c735 100644 --- a/llvm/test/Transforms/LoopVectorize/dead_instructions.ll +++ b/llvm/test/Transforms/LoopVectorize/dead_instructions.ll @@ -25,7 +25,7 @@ define i64 @dead_instructions_01(ptr %a, i64 %n) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 2 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP2]], align 8 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x i64>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[TMP6]] = add <2 x i64> [[WIDE_LOAD]], [[VEC_PHI]] @@ -133,13 +133,13 @@ define void @dead_load_and_vector_pointer(ptr %a, ptr %b) { ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 2 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP2]], align 8, !alias.scope [[META5:![0-9]+]], !noalias [[META8:![0-9]+]] -; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x i32>, ptr [[TMP5]], align 8, !alias.scope [[META5]], !noalias [[META8]] +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x i32>, ptr [[TMP1]], align 8, !alias.scope [[META5]], !noalias [[META8]] ; CHECK-NEXT: [[TMP6:%.*]] = add <2 x i32> [[WIDE_LOAD]], splat (i32 1) ; CHECK-NEXT: [[TMP7:%.*]] = add <2 x i32> [[WIDE_LOAD2]], splat (i32 1) ; CHECK-NEXT: store <2 x i32> [[TMP6]], ptr [[TMP2]], align 4, !alias.scope [[META5]], !noalias [[META8]] -; CHECK-NEXT: store <2 x i32> [[TMP7]], ptr [[TMP5]], align 4, !alias.scope [[META5]], !noalias [[META8]] +; CHECK-NEXT: store <2 x i32> [[TMP7]], ptr [[TMP1]], align 4, !alias.scope [[META5]], !noalias [[META8]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128 ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll index 274bd043cd86b..c23d28cdd0f3a 100644 --- a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll +++ b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll @@ -15,8 +15,8 @@ define dso_local void @constTC(ptr noalias nocapture %A) optsize { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 2 -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 4 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 2 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 4 ; CHECK-NEXT: store <2 x i32> splat (i32 13), ptr [[TMP3]], align 1 ; CHECK-NEXT: store <2 x i32> splat (i32 13), ptr [[TMP7]], align 1 ; CHECK-NEXT: store <2 x i32> splat (i32 13), ptr [[TMP8]], align 1 diff --git a/llvm/test/Transforms/LoopVectorize/expand-scev-after-invoke.ll b/llvm/test/Transforms/LoopVectorize/expand-scev-after-invoke.ll index 4af9f4a13b62b..50e55f6051485 100644 --- a/llvm/test/Transforms/LoopVectorize/expand-scev-after-invoke.ll +++ b/llvm/test/Transforms/LoopVectorize/expand-scev-after-invoke.ll @@ -26,7 +26,7 @@ define void @test(ptr %dst) personality ptr null { ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], [[TMP1]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 4 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 4 ; CHECK-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP4]], align 8 ; CHECK-NEXT: store <4 x i32> [[STEP_ADD]], ptr [[TMP6]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/fcmp-uno-fold-interleave.ll b/llvm/test/Transforms/LoopVectorize/fcmp-uno-fold-interleave.ll index 22226a711bcf0..5edd83bd1e0d1 100644 --- a/llvm/test/Transforms/LoopVectorize/fcmp-uno-fold-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/fcmp-uno-fold-interleave.ll @@ -19,8 +19,8 @@ define float @fmaxnum(ptr %src, i64 %n) { ; IC3-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ] ; IC3-NEXT: [[VEC_PHI2:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] ; IC3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX]] -; IC3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 4 -; IC3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 8 +; IC3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 4 +; IC3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 8 ; IC3-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4 ; IC3-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP1]], align 4 ; IC3-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 @@ -71,9 +71,9 @@ define float @fmaxnum(ptr %src, i64 %n) { ; IC4-NEXT: [[VEC_PHI2:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] ; IC4-NEXT: [[VEC_PHI3:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] ; IC4-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX]] -; IC4-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 4 -; IC4-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 8 -; IC4-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 12 +; IC4-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 4 +; IC4-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 8 +; IC4-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 12 ; IC4-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4 ; IC4-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP1]], align 4 ; IC4-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 @@ -129,10 +129,10 @@ define float @fmaxnum(ptr %src, i64 %n) { ; IC5-NEXT: [[VEC_PHI3:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] ; IC5-NEXT: [[VEC_PHI4:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] ; IC5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX]] -; IC5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 4 -; IC5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 8 -; IC5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 12 -; IC5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 16 +; IC5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 4 +; IC5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 8 +; IC5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 12 +; IC5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 16 ; IC5-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4 ; IC5-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP1]], align 4 ; IC5-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-chains-vplan.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-chains-vplan.ll index 9269558bd5f65..5cb8bd911df75 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-chains-vplan.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-chains-vplan.ll @@ -180,7 +180,7 @@ define i32 @test_chained_first_order_recurrences_4(ptr %base, i64 %x) { ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: -; CHECK-NEXT: WIDEN ir<%for.x.next> = mul ir<%x>, ir<2> +; CHECK-NEXT: CLONE ir<%for.x.next> = mul ir<%x>, ir<2> ; CHECK-NEXT: Successor(s): vector loop ; CHECK-EMPTY: ; CHECK-NEXT: vector loop: { diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll index eca39e6f0b6ba..cf2e7ccd1b2f0 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll @@ -98,7 +98,7 @@ define i32 @sink_after_dead_inst(ptr %A.ptr) { ; CHECK-NEXT: [[TMP1:%.*]] = or <4 x i16> [[TMP0]], [[TMP0]] ; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[A_PTR]], i16 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[TMP3]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[TMP3]], i64 4 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP3]], align 4 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 @@ -160,7 +160,7 @@ define void @sink_dead_inst(ptr %a) { ; CHECK-NEXT: [[TMP7:%.*]] = sub <4 x i16> [[TMP5]], splat (i16 10) ; CHECK-NEXT: [[TMP8:%.*]] = sub <4 x i16> [[TMP6]], splat (i16 10) ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i16, ptr [[A]], i16 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i16, ptr [[TMP9]], i32 4 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i16, ptr [[TMP9]], i64 4 ; CHECK-NEXT: store <4 x i16> [[TMP7]], ptr [[TMP9]], align 2 ; CHECK-NEXT: store <4 x i16> [[TMP8]], ptr [[TMP11]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll index e97d6e66d9d7a..28b46726f80dc 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll @@ -6,59 +6,276 @@ define i32 @FOR_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) { ; VF2IC1-LABEL: define i32 @FOR_used_outside( ; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC1-NEXT: [[ENTRY:.*]]: -; VF2IC1-NEXT: br label %[[LOOP:.*]] -; VF2IC1: [[LOOP]]: -; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[ENTRY:.*:]] +; VF2IC1-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC1: [[VECTOR_PH]]: +; VF2IC1-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF2IC1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF2IC1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC1-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC1: [[VECTOR_BODY]]: +; VF2IC1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE4:.*]] ] +; VF2IC1-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 +; VF2IC1-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 1 +; VF2IC1-NEXT: [[TMP2:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC1-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP3]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC1: [[PRED_LOAD_IF]]: ; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] -; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4 -; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]] -; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +; VF2IC1-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP10]], i32 0 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC1: [[PRED_LOAD_CONTINUE]]: +; VF2IC1-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; VF2IC1-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC1: [[PRED_LOAD_IF1]]: +; VF2IC1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4 +; VF2IC1-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP34]], i32 1 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC1: [[PRED_LOAD_CONTINUE2]]: +; VF2IC1-NEXT: [[TMP12]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP11]], %[[PRED_LOAD_IF1]] ] +; VF2IC1-NEXT: [[TMP13:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP12]], <2 x i32> +; VF2IC1-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC1: [[PRED_STORE_IF]]: +; VF2IC1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP16:%.*]] = extractelement <2 x i32> [[TMP13]], i32 0 +; VF2IC1-NEXT: [[TMP17:%.*]] = extractelement <2 x i32> [[TMP12]], i32 0 +; VF2IC1-NEXT: [[TMP18:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] +; VF2IC1-NEXT: store i32 [[TMP18]], ptr [[TMP15]], align 4 +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC1: [[PRED_STORE_CONTINUE]]: +; VF2IC1-NEXT: [[TMP19:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP19]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_IF3]]: +; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP21:%.*]] = extractelement <2 x i32> [[TMP13]], i32 1 +; VF2IC1-NEXT: [[TMP22:%.*]] = extractelement <2 x i32> [[TMP12]], i32 1 +; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP21]], [[TMP22]] ; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4 -; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1 -; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_CONTINUE4]]: +; VF2IC1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 +; VF2IC1-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC1-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC1-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; VF2IC1: [[MIDDLE_BLOCK]]: +; VF2IC1-NEXT: [[TMP25:%.*]] = xor <2 x i1> [[TMP2]], splat (i1 true) +; VF2IC1-NEXT: [[TMP26:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP25]], i1 false) +; VF2IC1-NEXT: [[TMP27:%.*]] = sub i64 [[TMP26]], 1 +; VF2IC1-NEXT: [[TMP28:%.*]] = sub i64 [[TMP27]], 1 +; VF2IC1-NEXT: [[TMP29:%.*]] = extractelement <2 x i32> [[TMP12]], i64 [[TMP28]] +; VF2IC1-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[VECTOR_RECUR]], i32 1 +; VF2IC1-NEXT: [[TMP31:%.*]] = icmp eq i64 [[TMP27]], 0 +; VF2IC1-NEXT: [[TMP32:%.*]] = select i1 [[TMP31]], i32 [[TMP30]], i32 [[TMP29]] +; VF2IC1-NEXT: br label %[[FOR_END:.*]] ; VF2IC1: [[FOR_END]]: -; VF2IC1-NEXT: [[TMP32:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] ; VF2IC1-NEXT: ret i32 [[TMP32]] ; ; VF2IC2-LABEL: define i32 @FOR_used_outside( ; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC2-NEXT: [[ENTRY:.*]]: -; VF2IC2-NEXT: br label %[[LOOP:.*]] -; VF2IC2: [[LOOP]]: -; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[ENTRY:.*:]] +; VF2IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC2: [[VECTOR_PH]]: +; VF2IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 3 +; VF2IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 +; VF2IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC2: [[VECTOR_BODY]]: +; VF2IC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE12:.*]] ] +; VF2IC2-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP25:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC2-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; VF2IC2-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; VF2IC2-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; VF2IC2-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 3 +; VF2IC2-NEXT: [[TMP4:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP5:%.*]] = icmp ule <2 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP6]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC2: [[PRED_LOAD_IF]]: ; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] -; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4 -; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]] -; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4 +; VF2IC2-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP23]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC2: [[PRED_LOAD_CONTINUE]]: +; VF2IC2-NEXT: [[TMP10:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP9]], %[[PRED_LOAD_IF]] ] +; VF2IC2-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC2: [[PRED_LOAD_IF1]]: +; VF2IC2-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +; VF2IC2-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> [[TMP10]], i32 [[TMP13]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC2: [[PRED_LOAD_CONTINUE2]]: +; VF2IC2-NEXT: [[TMP15:%.*]] = phi <2 x i32> [ [[TMP10]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP14]], %[[PRED_LOAD_IF1]] ] +; VF2IC2-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP16]], label %[[PRED_LOAD_IF3:.*]], label %[[PRED_LOAD_CONTINUE4:.*]] +; VF2IC2: [[PRED_LOAD_IF3]]: +; VF2IC2-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 +; VF2IC2-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE4]] +; VF2IC2: [[PRED_LOAD_CONTINUE4]]: +; VF2IC2-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE2]] ], [ [[TMP19]], %[[PRED_LOAD_IF3]] ] +; VF2IC2-NEXT: [[TMP21:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP21]], label %[[PRED_LOAD_IF5:.*]], label %[[PRED_LOAD_CONTINUE6:.*]] +; VF2IC2: [[PRED_LOAD_IF5]]: +; VF2IC2-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP34]], align 4 +; VF2IC2-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP37]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE6]] +; VF2IC2: [[PRED_LOAD_CONTINUE6]]: +; VF2IC2-NEXT: [[TMP25]] = phi <2 x i32> [ [[TMP20]], %[[PRED_LOAD_CONTINUE4]] ], [ [[TMP24]], %[[PRED_LOAD_IF5]] ] +; VF2IC2-NEXT: [[TMP26:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP15]], <2 x i32> +; VF2IC2-NEXT: [[TMP27:%.*]] = shufflevector <2 x i32> [[TMP15]], <2 x i32> [[TMP25]], <2 x i32> +; VF2IC2-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC2: [[PRED_STORE_IF]]: +; VF2IC2-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[TMP26]], i32 0 +; VF2IC2-NEXT: [[TMP31:%.*]] = extractelement <2 x i32> [[TMP15]], i32 0 +; VF2IC2-NEXT: [[TMP32:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] +; VF2IC2-NEXT: store i32 [[TMP32]], ptr [[TMP29]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC2: [[PRED_STORE_CONTINUE]]: +; VF2IC2-NEXT: [[TMP33:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP33]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]] +; VF2IC2: [[PRED_STORE_IF7]]: +; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP35:%.*]] = extractelement <2 x i32> [[TMP26]], i32 1 +; VF2IC2-NEXT: [[TMP36:%.*]] = extractelement <2 x i32> [[TMP15]], i32 1 +; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[TMP35]], [[TMP36]] ; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4 -; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1 -; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE8]] +; VF2IC2: [[PRED_STORE_CONTINUE8]]: +; VF2IC2-NEXT: [[TMP38:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP38]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]] +; VF2IC2: [[PRED_STORE_IF9]]: +; VF2IC2-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP40:%.*]] = extractelement <2 x i32> [[TMP27]], i32 0 +; VF2IC2-NEXT: [[TMP41:%.*]] = extractelement <2 x i32> [[TMP25]], i32 0 +; VF2IC2-NEXT: [[TMP42:%.*]] = add nsw i32 [[TMP40]], [[TMP41]] +; VF2IC2-NEXT: store i32 [[TMP42]], ptr [[TMP39]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE10]] +; VF2IC2: [[PRED_STORE_CONTINUE10]]: +; VF2IC2-NEXT: [[TMP43:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP43]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_IF11]]: +; VF2IC2-NEXT: [[TMP67:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP45:%.*]] = extractelement <2 x i32> [[TMP27]], i32 1 +; VF2IC2-NEXT: [[TMP46:%.*]] = extractelement <2 x i32> [[TMP25]], i32 1 +; VF2IC2-NEXT: [[TMP68:%.*]] = add nsw i32 [[TMP45]], [[TMP46]] +; VF2IC2-NEXT: store i32 [[TMP68]], ptr [[TMP67]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_CONTINUE12]]: +; VF2IC2-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 +; VF2IC2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD]], splat (i64 2) +; VF2IC2-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC2-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; VF2IC2: [[MIDDLE_BLOCK]]: +; VF2IC2-NEXT: [[TMP49:%.*]] = xor <2 x i1> [[TMP4]], splat (i1 true) +; VF2IC2-NEXT: [[TMP50:%.*]] = xor <2 x i1> [[TMP5]], splat (i1 true) +; VF2IC2-NEXT: [[TMP51:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP50]], i1 false) +; VF2IC2-NEXT: [[TMP52:%.*]] = add i64 2, [[TMP51]] +; VF2IC2-NEXT: [[TMP53:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP49]], i1 false) +; VF2IC2-NEXT: [[TMP54:%.*]] = add i64 0, [[TMP53]] +; VF2IC2-NEXT: [[TMP55:%.*]] = icmp ne i64 [[TMP53]], 2 +; VF2IC2-NEXT: [[TMP56:%.*]] = select i1 [[TMP55]], i64 [[TMP54]], i64 [[TMP52]] +; VF2IC2-NEXT: [[TMP57:%.*]] = sub i64 [[TMP56]], 1 +; VF2IC2-NEXT: [[TMP58:%.*]] = sub i64 [[TMP57]], 1 +; VF2IC2-NEXT: [[TMP59:%.*]] = extractelement <2 x i32> [[TMP15]], i64 [[TMP58]] +; VF2IC2-NEXT: [[TMP60:%.*]] = sub i64 [[TMP58]], 2 +; VF2IC2-NEXT: [[TMP61:%.*]] = extractelement <2 x i32> [[TMP25]], i64 [[TMP60]] +; VF2IC2-NEXT: [[TMP62:%.*]] = icmp uge i64 [[TMP58]], 2 +; VF2IC2-NEXT: [[TMP63:%.*]] = select i1 [[TMP62]], i32 [[TMP61]], i32 [[TMP59]] +; VF2IC2-NEXT: [[TMP64:%.*]] = extractelement <2 x i32> [[VECTOR_RECUR]], i32 1 +; VF2IC2-NEXT: [[TMP65:%.*]] = icmp eq i64 [[TMP57]], 0 +; VF2IC2-NEXT: [[TMP66:%.*]] = select i1 [[TMP65]], i32 [[TMP64]], i32 [[TMP63]] +; VF2IC2-NEXT: br label %[[FOR_END:.*]] ; VF2IC2: [[FOR_END]]: -; VF2IC2-NEXT: [[TMP66:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] ; VF2IC2-NEXT: ret i32 [[TMP66]] ; ; VF1IC2-LABEL: define i32 @FOR_used_outside( ; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF1IC2-NEXT: [[ENTRY:.*]]: -; VF1IC2-NEXT: br label %[[LOOP:.*]] -; VF1IC2: [[LOOP]]: -; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[ENTRY:.*:]] +; VF1IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF1IC2: [[VECTOR_PH]]: +; VF1IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF1IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF1IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF1IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF1IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF1IC2: [[VECTOR_BODY]]: +; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE5:.*]] ] +; VF1IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi i32 [ 33, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[PRED_STORE_CONTINUE5]] ] +; VF1IC2-NEXT: [[TMP3:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[VEC_IV:%.*]] = add i64 [[TMP0]], 0 +; VF1IC2-NEXT: [[VEC_IV1:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[TMP1:%.*]] = icmp ule i64 [[VEC_IV]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: [[TMP2:%.*]] = icmp ule i64 [[VEC_IV1]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF1IC2: [[PRED_LOAD_IF]]: ; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]] -; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4 -; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]] -; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF1IC2: [[PRED_LOAD_CONTINUE]]: +; VF1IC2-NEXT: [[TMP5:%.*]] = phi i32 [ poison, %[[VECTOR_BODY]] ], [ [[TMP7]], %[[PRED_LOAD_IF]] ] +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_LOAD_IF2:.*]], label %[[PRED_LOAD_CONTINUE3:.*]] +; VF1IC2: [[PRED_LOAD_IF2]]: +; VF1IC2-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE3]] +; VF1IC2: [[PRED_LOAD_CONTINUE3]]: +; VF1IC2-NEXT: [[TMP8]] = phi i32 [ poison, %[[PRED_LOAD_CONTINUE]] ], [ [[TMP32]], %[[PRED_LOAD_IF2]] ] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF1IC2: [[PRED_STORE_IF]]: +; VF1IC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP10:%.*]] = add nsw i32 [[VECTOR_RECUR]], [[TMP5]] +; VF1IC2-NEXT: store i32 [[TMP10]], ptr [[TMP9]], align 4 +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF1IC2: [[PRED_STORE_CONTINUE]]: +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_STORE_IF4:.*]], label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_IF4]]: +; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[TMP5]], [[TMP8]] ; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4 -; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1 -; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_CONTINUE5]]: +; VF1IC2-NEXT: [[INDEX_NEXT]] = add i64 [[TMP0]], 2 +; VF1IC2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF1IC2-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; VF1IC2: [[MIDDLE_BLOCK]]: +; VF1IC2-NEXT: [[TMP14:%.*]] = xor i1 [[TMP1]], true +; VF1IC2-NEXT: [[TMP15:%.*]] = xor i1 [[TMP2]], true +; VF1IC2-NEXT: [[TMP16:%.*]] = icmp eq i1 [[TMP15]], false +; VF1IC2-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i64 +; VF1IC2-NEXT: [[TMP18:%.*]] = add i64 1, [[TMP17]] +; VF1IC2-NEXT: [[TMP19:%.*]] = icmp eq i1 [[TMP14]], false +; VF1IC2-NEXT: [[TMP20:%.*]] = zext i1 [[TMP19]] to i64 +; VF1IC2-NEXT: [[TMP21:%.*]] = add i64 0, [[TMP20]] +; VF1IC2-NEXT: [[TMP22:%.*]] = icmp ne i64 [[TMP20]], 1 +; VF1IC2-NEXT: [[TMP23:%.*]] = select i1 [[TMP22]], i64 [[TMP21]], i64 [[TMP18]] +; VF1IC2-NEXT: [[TMP24:%.*]] = sub i64 [[TMP23]], 1 +; VF1IC2-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP26:%.*]] = sub i64 [[TMP25]], 1 +; VF1IC2-NEXT: [[TMP27:%.*]] = icmp uge i64 [[TMP25]], 1 +; VF1IC2-NEXT: [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP5]] +; VF1IC2-NEXT: [[TMP29:%.*]] = icmp eq i64 [[TMP24]], 0 +; VF1IC2-NEXT: [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[VECTOR_RECUR]], i32 [[TMP28]] +; VF1IC2-NEXT: br label %[[FOR_END:.*]] ; VF1IC2: [[FOR_END]]: -; VF1IC2-NEXT: [[TMP30:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] ; VF1IC2-NEXT: ret i32 [[TMP30]] ; entry: @@ -83,59 +300,265 @@ for.end: define i32 @FOR_next_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) { ; VF2IC1-LABEL: define i32 @FOR_next_used_outside( ; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC1-NEXT: [[ENTRY:.*]]: -; VF2IC1-NEXT: br label %[[LOOP:.*]] -; VF2IC1: [[LOOP]]: -; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[ENTRY:.*:]] +; VF2IC1-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC1: [[VECTOR_PH]]: +; VF2IC1-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF2IC1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF2IC1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC1-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC1: [[VECTOR_BODY]]: +; VF2IC1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE4:.*]] ] +; VF2IC1-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 +; VF2IC1-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 1 +; VF2IC1-NEXT: [[TMP2:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC1-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP3]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC1: [[PRED_LOAD_IF]]: ; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] -; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4 -; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]] -; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +; VF2IC1-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP10]], i32 0 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC1: [[PRED_LOAD_CONTINUE]]: +; VF2IC1-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; VF2IC1-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC1: [[PRED_LOAD_IF1]]: +; VF2IC1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4 +; VF2IC1-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP30]], i32 1 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC1: [[PRED_LOAD_CONTINUE2]]: +; VF2IC1-NEXT: [[TMP12]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP11]], %[[PRED_LOAD_IF1]] ] +; VF2IC1-NEXT: [[TMP13:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP12]], <2 x i32> +; VF2IC1-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC1: [[PRED_STORE_IF]]: +; VF2IC1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP16:%.*]] = extractelement <2 x i32> [[TMP13]], i32 0 +; VF2IC1-NEXT: [[TMP17:%.*]] = extractelement <2 x i32> [[TMP12]], i32 0 +; VF2IC1-NEXT: [[TMP18:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] +; VF2IC1-NEXT: store i32 [[TMP18]], ptr [[TMP15]], align 4 +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC1: [[PRED_STORE_CONTINUE]]: +; VF2IC1-NEXT: [[TMP19:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP19]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_IF3]]: +; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP21:%.*]] = extractelement <2 x i32> [[TMP13]], i32 1 +; VF2IC1-NEXT: [[TMP22:%.*]] = extractelement <2 x i32> [[TMP12]], i32 1 +; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP21]], [[TMP22]] ; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4 -; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1 -; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_CONTINUE4]]: +; VF2IC1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 +; VF2IC1-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC1-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC1-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; VF2IC1: [[MIDDLE_BLOCK]]: +; VF2IC1-NEXT: [[TMP25:%.*]] = xor <2 x i1> [[TMP2]], splat (i1 true) +; VF2IC1-NEXT: [[TMP26:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP25]], i1 false) +; VF2IC1-NEXT: [[TMP27:%.*]] = sub i64 [[TMP26]], 1 +; VF2IC1-NEXT: [[TMP28:%.*]] = extractelement <2 x i32> [[TMP12]], i64 [[TMP27]] +; VF2IC1-NEXT: br label %[[FOR_END:.*]] ; VF2IC1: [[FOR_END]]: -; VF2IC1-NEXT: [[TMP28:%.*]] = phi i32 [ [[TMP10]], %[[LOOP]] ] ; VF2IC1-NEXT: ret i32 [[TMP28]] ; ; VF2IC2-LABEL: define i32 @FOR_next_used_outside( ; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC2-NEXT: [[ENTRY:.*]]: -; VF2IC2-NEXT: br label %[[LOOP:.*]] -; VF2IC2: [[LOOP]]: -; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[ENTRY:.*:]] +; VF2IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC2: [[VECTOR_PH]]: +; VF2IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 3 +; VF2IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 +; VF2IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC2: [[VECTOR_BODY]]: +; VF2IC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE12:.*]] ] +; VF2IC2-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP25:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC2-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; VF2IC2-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; VF2IC2-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; VF2IC2-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 3 +; VF2IC2-NEXT: [[TMP4:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP5:%.*]] = icmp ule <2 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP6]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC2: [[PRED_LOAD_IF]]: ; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] -; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4 -; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]] -; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4 +; VF2IC2-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP23]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC2: [[PRED_LOAD_CONTINUE]]: +; VF2IC2-NEXT: [[TMP10:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP9]], %[[PRED_LOAD_IF]] ] +; VF2IC2-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC2: [[PRED_LOAD_IF1]]: +; VF2IC2-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +; VF2IC2-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> [[TMP10]], i32 [[TMP13]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC2: [[PRED_LOAD_CONTINUE2]]: +; VF2IC2-NEXT: [[TMP15:%.*]] = phi <2 x i32> [ [[TMP10]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP14]], %[[PRED_LOAD_IF1]] ] +; VF2IC2-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP16]], label %[[PRED_LOAD_IF3:.*]], label %[[PRED_LOAD_CONTINUE4:.*]] +; VF2IC2: [[PRED_LOAD_IF3]]: +; VF2IC2-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 +; VF2IC2-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE4]] +; VF2IC2: [[PRED_LOAD_CONTINUE4]]: +; VF2IC2-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE2]] ], [ [[TMP19]], %[[PRED_LOAD_IF3]] ] +; VF2IC2-NEXT: [[TMP21:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP21]], label %[[PRED_LOAD_IF5:.*]], label %[[PRED_LOAD_CONTINUE6:.*]] +; VF2IC2: [[PRED_LOAD_IF5]]: +; VF2IC2-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP34]], align 4 +; VF2IC2-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP37]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE6]] +; VF2IC2: [[PRED_LOAD_CONTINUE6]]: +; VF2IC2-NEXT: [[TMP25]] = phi <2 x i32> [ [[TMP20]], %[[PRED_LOAD_CONTINUE4]] ], [ [[TMP24]], %[[PRED_LOAD_IF5]] ] +; VF2IC2-NEXT: [[TMP26:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP15]], <2 x i32> +; VF2IC2-NEXT: [[TMP27:%.*]] = shufflevector <2 x i32> [[TMP15]], <2 x i32> [[TMP25]], <2 x i32> +; VF2IC2-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC2: [[PRED_STORE_IF]]: +; VF2IC2-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[TMP26]], i32 0 +; VF2IC2-NEXT: [[TMP31:%.*]] = extractelement <2 x i32> [[TMP15]], i32 0 +; VF2IC2-NEXT: [[TMP32:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] +; VF2IC2-NEXT: store i32 [[TMP32]], ptr [[TMP29]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC2: [[PRED_STORE_CONTINUE]]: +; VF2IC2-NEXT: [[TMP33:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP33]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]] +; VF2IC2: [[PRED_STORE_IF7]]: +; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP35:%.*]] = extractelement <2 x i32> [[TMP26]], i32 1 +; VF2IC2-NEXT: [[TMP36:%.*]] = extractelement <2 x i32> [[TMP15]], i32 1 +; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[TMP35]], [[TMP36]] ; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4 -; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1 -; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE8]] +; VF2IC2: [[PRED_STORE_CONTINUE8]]: +; VF2IC2-NEXT: [[TMP38:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP38]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]] +; VF2IC2: [[PRED_STORE_IF9]]: +; VF2IC2-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP40:%.*]] = extractelement <2 x i32> [[TMP27]], i32 0 +; VF2IC2-NEXT: [[TMP41:%.*]] = extractelement <2 x i32> [[TMP25]], i32 0 +; VF2IC2-NEXT: [[TMP42:%.*]] = add nsw i32 [[TMP40]], [[TMP41]] +; VF2IC2-NEXT: store i32 [[TMP42]], ptr [[TMP39]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE10]] +; VF2IC2: [[PRED_STORE_CONTINUE10]]: +; VF2IC2-NEXT: [[TMP43:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP43]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_IF11]]: +; VF2IC2-NEXT: [[TMP63:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP45:%.*]] = extractelement <2 x i32> [[TMP27]], i32 1 +; VF2IC2-NEXT: [[TMP46:%.*]] = extractelement <2 x i32> [[TMP25]], i32 1 +; VF2IC2-NEXT: [[TMP64:%.*]] = add nsw i32 [[TMP45]], [[TMP46]] +; VF2IC2-NEXT: store i32 [[TMP64]], ptr [[TMP63]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_CONTINUE12]]: +; VF2IC2-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 +; VF2IC2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD]], splat (i64 2) +; VF2IC2-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC2-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; VF2IC2: [[MIDDLE_BLOCK]]: +; VF2IC2-NEXT: [[TMP49:%.*]] = xor <2 x i1> [[TMP4]], splat (i1 true) +; VF2IC2-NEXT: [[TMP50:%.*]] = xor <2 x i1> [[TMP5]], splat (i1 true) +; VF2IC2-NEXT: [[TMP51:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP50]], i1 false) +; VF2IC2-NEXT: [[TMP52:%.*]] = add i64 2, [[TMP51]] +; VF2IC2-NEXT: [[TMP53:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP49]], i1 false) +; VF2IC2-NEXT: [[TMP54:%.*]] = add i64 0, [[TMP53]] +; VF2IC2-NEXT: [[TMP55:%.*]] = icmp ne i64 [[TMP53]], 2 +; VF2IC2-NEXT: [[TMP56:%.*]] = select i1 [[TMP55]], i64 [[TMP54]], i64 [[TMP52]] +; VF2IC2-NEXT: [[TMP57:%.*]] = sub i64 [[TMP56]], 1 +; VF2IC2-NEXT: [[TMP58:%.*]] = extractelement <2 x i32> [[TMP15]], i64 [[TMP57]] +; VF2IC2-NEXT: [[TMP59:%.*]] = sub i64 [[TMP57]], 2 +; VF2IC2-NEXT: [[TMP60:%.*]] = extractelement <2 x i32> [[TMP25]], i64 [[TMP59]] +; VF2IC2-NEXT: [[TMP61:%.*]] = icmp uge i64 [[TMP57]], 2 +; VF2IC2-NEXT: [[TMP62:%.*]] = select i1 [[TMP61]], i32 [[TMP60]], i32 [[TMP58]] +; VF2IC2-NEXT: br label %[[FOR_END:.*]] ; VF2IC2: [[FOR_END]]: -; VF2IC2-NEXT: [[TMP62:%.*]] = phi i32 [ [[TMP23]], %[[LOOP]] ] ; VF2IC2-NEXT: ret i32 [[TMP62]] ; ; VF1IC2-LABEL: define i32 @FOR_next_used_outside( ; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF1IC2-NEXT: [[ENTRY:.*]]: -; VF1IC2-NEXT: br label %[[LOOP:.*]] -; VF1IC2: [[LOOP]]: -; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[ENTRY:.*:]] +; VF1IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF1IC2: [[VECTOR_PH]]: +; VF1IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF1IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF1IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF1IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF1IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF1IC2: [[VECTOR_BODY]]: +; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE5:.*]] ] +; VF1IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi i32 [ 33, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[PRED_STORE_CONTINUE5]] ] +; VF1IC2-NEXT: [[TMP3:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[VEC_IV:%.*]] = add i64 [[TMP0]], 0 +; VF1IC2-NEXT: [[VEC_IV1:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[TMP1:%.*]] = icmp ule i64 [[VEC_IV]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: [[TMP2:%.*]] = icmp ule i64 [[VEC_IV1]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF1IC2: [[PRED_LOAD_IF]]: ; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]] -; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4 -; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]] -; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF1IC2: [[PRED_LOAD_CONTINUE]]: +; VF1IC2-NEXT: [[TMP5:%.*]] = phi i32 [ poison, %[[VECTOR_BODY]] ], [ [[TMP7]], %[[PRED_LOAD_IF]] ] +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_LOAD_IF2:.*]], label %[[PRED_LOAD_CONTINUE3:.*]] +; VF1IC2: [[PRED_LOAD_IF2]]: +; VF1IC2-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE3]] +; VF1IC2: [[PRED_LOAD_CONTINUE3]]: +; VF1IC2-NEXT: [[TMP8]] = phi i32 [ poison, %[[PRED_LOAD_CONTINUE]] ], [ [[TMP29]], %[[PRED_LOAD_IF2]] ] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF1IC2: [[PRED_STORE_IF]]: +; VF1IC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP10:%.*]] = add nsw i32 [[VECTOR_RECUR]], [[TMP5]] +; VF1IC2-NEXT: store i32 [[TMP10]], ptr [[TMP9]], align 4 +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF1IC2: [[PRED_STORE_CONTINUE]]: +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_STORE_IF4:.*]], label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_IF4]]: +; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[TMP5]], [[TMP8]] ; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4 -; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1 -; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_CONTINUE5]]: +; VF1IC2-NEXT: [[INDEX_NEXT]] = add i64 [[TMP0]], 2 +; VF1IC2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF1IC2-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; VF1IC2: [[MIDDLE_BLOCK]]: +; VF1IC2-NEXT: [[TMP14:%.*]] = xor i1 [[TMP1]], true +; VF1IC2-NEXT: [[TMP15:%.*]] = xor i1 [[TMP2]], true +; VF1IC2-NEXT: [[TMP16:%.*]] = icmp eq i1 [[TMP15]], false +; VF1IC2-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i64 +; VF1IC2-NEXT: [[TMP18:%.*]] = add i64 1, [[TMP17]] +; VF1IC2-NEXT: [[TMP19:%.*]] = icmp eq i1 [[TMP14]], false +; VF1IC2-NEXT: [[TMP20:%.*]] = zext i1 [[TMP19]] to i64 +; VF1IC2-NEXT: [[TMP21:%.*]] = add i64 0, [[TMP20]] +; VF1IC2-NEXT: [[TMP22:%.*]] = icmp ne i64 [[TMP20]], 1 +; VF1IC2-NEXT: [[TMP23:%.*]] = select i1 [[TMP22]], i64 [[TMP21]], i64 [[TMP18]] +; VF1IC2-NEXT: [[TMP24:%.*]] = sub i64 [[TMP23]], 1 +; VF1IC2-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP26:%.*]] = icmp uge i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP27:%.*]] = select i1 [[TMP26]], i32 [[TMP8]], i32 [[TMP5]] +; VF1IC2-NEXT: br label %[[FOR_END:.*]] ; VF1IC2: [[FOR_END]]: -; VF1IC2-NEXT: [[TMP27:%.*]] = phi i32 [ [[TMP7]], %[[LOOP]] ] ; VF1IC2-NEXT: ret i32 [[TMP27]] ; entry: @@ -160,64 +583,287 @@ for.end: define i32 @FOR_and_next_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) { ; VF2IC1-LABEL: define i32 @FOR_and_next_used_outside( ; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC1-NEXT: [[ENTRY:.*]]: -; VF2IC1-NEXT: br label %[[LOOP:.*]] -; VF2IC1: [[LOOP]]: -; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[ENTRY:.*:]] +; VF2IC1-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC1: [[VECTOR_PH]]: +; VF2IC1-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF2IC1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF2IC1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC1-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC1: [[VECTOR_BODY]]: +; VF2IC1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE4:.*]] ] +; VF2IC1-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 +; VF2IC1-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 1 +; VF2IC1-NEXT: [[TMP2:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC1-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP3]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC1: [[PRED_LOAD_IF]]: ; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] -; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4 -; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]] -; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +; VF2IC1-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP10]], i32 0 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC1: [[PRED_LOAD_CONTINUE]]: +; VF2IC1-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; VF2IC1-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC1: [[PRED_LOAD_IF1]]: +; VF2IC1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4 +; VF2IC1-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP35]], i32 1 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC1: [[PRED_LOAD_CONTINUE2]]: +; VF2IC1-NEXT: [[TMP12]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP11]], %[[PRED_LOAD_IF1]] ] +; VF2IC1-NEXT: [[TMP13:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP12]], <2 x i32> +; VF2IC1-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC1: [[PRED_STORE_IF]]: +; VF2IC1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP16:%.*]] = extractelement <2 x i32> [[TMP13]], i32 0 +; VF2IC1-NEXT: [[TMP17:%.*]] = extractelement <2 x i32> [[TMP12]], i32 0 +; VF2IC1-NEXT: [[TMP18:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] +; VF2IC1-NEXT: store i32 [[TMP18]], ptr [[TMP15]], align 4 +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC1: [[PRED_STORE_CONTINUE]]: +; VF2IC1-NEXT: [[TMP19:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP19]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_IF3]]: +; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP21:%.*]] = extractelement <2 x i32> [[TMP13]], i32 1 +; VF2IC1-NEXT: [[TMP22:%.*]] = extractelement <2 x i32> [[TMP12]], i32 1 +; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP21]], [[TMP22]] ; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4 -; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1 -; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_CONTINUE4]]: +; VF2IC1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 +; VF2IC1-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC1-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC1-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF2IC1: [[MIDDLE_BLOCK]]: +; VF2IC1-NEXT: [[TMP25:%.*]] = xor <2 x i1> [[TMP2]], splat (i1 true) +; VF2IC1-NEXT: [[TMP26:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP25]], i1 false) +; VF2IC1-NEXT: [[TMP27:%.*]] = sub i64 [[TMP26]], 1 +; VF2IC1-NEXT: [[TMP28:%.*]] = sub i64 [[TMP27]], 1 +; VF2IC1-NEXT: [[TMP29:%.*]] = extractelement <2 x i32> [[TMP12]], i64 [[TMP28]] +; VF2IC1-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[VECTOR_RECUR]], i32 1 +; VF2IC1-NEXT: [[TMP31:%.*]] = icmp eq i64 [[TMP27]], 0 +; VF2IC1-NEXT: [[TMP32:%.*]] = select i1 [[TMP31]], i32 [[TMP30]], i32 [[TMP29]] +; VF2IC1-NEXT: [[TMP33:%.*]] = extractelement <2 x i32> [[TMP12]], i64 [[TMP27]] +; VF2IC1-NEXT: br label %[[FOR_END:.*]] ; VF2IC1: [[FOR_END]]: -; VF2IC1-NEXT: [[TMP32:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] -; VF2IC1-NEXT: [[TMP33:%.*]] = phi i32 [ [[TMP10]], %[[LOOP]] ] ; VF2IC1-NEXT: [[RES:%.*]] = add i32 [[TMP32]], [[TMP33]] ; VF2IC1-NEXT: ret i32 [[RES]] ; ; VF2IC2-LABEL: define i32 @FOR_and_next_used_outside( ; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC2-NEXT: [[ENTRY:.*]]: -; VF2IC2-NEXT: br label %[[LOOP:.*]] -; VF2IC2: [[LOOP]]: -; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[ENTRY:.*:]] +; VF2IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC2: [[VECTOR_PH]]: +; VF2IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 3 +; VF2IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 +; VF2IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC2: [[VECTOR_BODY]]: +; VF2IC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE12:.*]] ] +; VF2IC2-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP25:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC2-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; VF2IC2-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; VF2IC2-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; VF2IC2-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 3 +; VF2IC2-NEXT: [[TMP4:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP5:%.*]] = icmp ule <2 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP6]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC2: [[PRED_LOAD_IF]]: ; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] -; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4 -; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]] -; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4 +; VF2IC2-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP23]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC2: [[PRED_LOAD_CONTINUE]]: +; VF2IC2-NEXT: [[TMP10:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP9]], %[[PRED_LOAD_IF]] ] +; VF2IC2-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC2: [[PRED_LOAD_IF1]]: +; VF2IC2-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +; VF2IC2-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> [[TMP10]], i32 [[TMP13]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC2: [[PRED_LOAD_CONTINUE2]]: +; VF2IC2-NEXT: [[TMP15:%.*]] = phi <2 x i32> [ [[TMP10]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP14]], %[[PRED_LOAD_IF1]] ] +; VF2IC2-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP16]], label %[[PRED_LOAD_IF3:.*]], label %[[PRED_LOAD_CONTINUE4:.*]] +; VF2IC2: [[PRED_LOAD_IF3]]: +; VF2IC2-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 +; VF2IC2-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE4]] +; VF2IC2: [[PRED_LOAD_CONTINUE4]]: +; VF2IC2-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE2]] ], [ [[TMP19]], %[[PRED_LOAD_IF3]] ] +; VF2IC2-NEXT: [[TMP21:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP21]], label %[[PRED_LOAD_IF5:.*]], label %[[PRED_LOAD_CONTINUE6:.*]] +; VF2IC2: [[PRED_LOAD_IF5]]: +; VF2IC2-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP34]], align 4 +; VF2IC2-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP37]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE6]] +; VF2IC2: [[PRED_LOAD_CONTINUE6]]: +; VF2IC2-NEXT: [[TMP25]] = phi <2 x i32> [ [[TMP20]], %[[PRED_LOAD_CONTINUE4]] ], [ [[TMP24]], %[[PRED_LOAD_IF5]] ] +; VF2IC2-NEXT: [[TMP26:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP15]], <2 x i32> +; VF2IC2-NEXT: [[TMP27:%.*]] = shufflevector <2 x i32> [[TMP15]], <2 x i32> [[TMP25]], <2 x i32> +; VF2IC2-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC2: [[PRED_STORE_IF]]: +; VF2IC2-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[TMP26]], i32 0 +; VF2IC2-NEXT: [[TMP31:%.*]] = extractelement <2 x i32> [[TMP15]], i32 0 +; VF2IC2-NEXT: [[TMP32:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] +; VF2IC2-NEXT: store i32 [[TMP32]], ptr [[TMP29]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC2: [[PRED_STORE_CONTINUE]]: +; VF2IC2-NEXT: [[TMP33:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP33]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]] +; VF2IC2: [[PRED_STORE_IF7]]: +; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP35:%.*]] = extractelement <2 x i32> [[TMP26]], i32 1 +; VF2IC2-NEXT: [[TMP36:%.*]] = extractelement <2 x i32> [[TMP15]], i32 1 +; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[TMP35]], [[TMP36]] ; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4 -; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1 -; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE8]] +; VF2IC2: [[PRED_STORE_CONTINUE8]]: +; VF2IC2-NEXT: [[TMP38:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP38]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]] +; VF2IC2: [[PRED_STORE_IF9]]: +; VF2IC2-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP40:%.*]] = extractelement <2 x i32> [[TMP27]], i32 0 +; VF2IC2-NEXT: [[TMP41:%.*]] = extractelement <2 x i32> [[TMP25]], i32 0 +; VF2IC2-NEXT: [[TMP42:%.*]] = add nsw i32 [[TMP40]], [[TMP41]] +; VF2IC2-NEXT: store i32 [[TMP42]], ptr [[TMP39]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE10]] +; VF2IC2: [[PRED_STORE_CONTINUE10]]: +; VF2IC2-NEXT: [[TMP43:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP43]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_IF11]]: +; VF2IC2-NEXT: [[TMP72:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP45:%.*]] = extractelement <2 x i32> [[TMP27]], i32 1 +; VF2IC2-NEXT: [[TMP46:%.*]] = extractelement <2 x i32> [[TMP25]], i32 1 +; VF2IC2-NEXT: [[TMP73:%.*]] = add nsw i32 [[TMP45]], [[TMP46]] +; VF2IC2-NEXT: store i32 [[TMP73]], ptr [[TMP72]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_CONTINUE12]]: +; VF2IC2-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 +; VF2IC2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD]], splat (i64 2) +; VF2IC2-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC2-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF2IC2: [[MIDDLE_BLOCK]]: +; VF2IC2-NEXT: [[TMP49:%.*]] = xor <2 x i1> [[TMP4]], splat (i1 true) +; VF2IC2-NEXT: [[TMP50:%.*]] = xor <2 x i1> [[TMP5]], splat (i1 true) +; VF2IC2-NEXT: [[TMP51:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP50]], i1 false) +; VF2IC2-NEXT: [[TMP52:%.*]] = add i64 2, [[TMP51]] +; VF2IC2-NEXT: [[TMP53:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP49]], i1 false) +; VF2IC2-NEXT: [[TMP54:%.*]] = add i64 0, [[TMP53]] +; VF2IC2-NEXT: [[TMP55:%.*]] = icmp ne i64 [[TMP53]], 2 +; VF2IC2-NEXT: [[TMP56:%.*]] = select i1 [[TMP55]], i64 [[TMP54]], i64 [[TMP52]] +; VF2IC2-NEXT: [[TMP57:%.*]] = sub i64 [[TMP56]], 1 +; VF2IC2-NEXT: [[TMP58:%.*]] = sub i64 [[TMP57]], 1 +; VF2IC2-NEXT: [[TMP59:%.*]] = extractelement <2 x i32> [[TMP15]], i64 [[TMP58]] +; VF2IC2-NEXT: [[TMP60:%.*]] = sub i64 [[TMP58]], 2 +; VF2IC2-NEXT: [[TMP61:%.*]] = extractelement <2 x i32> [[TMP25]], i64 [[TMP60]] +; VF2IC2-NEXT: [[TMP62:%.*]] = icmp uge i64 [[TMP58]], 2 +; VF2IC2-NEXT: [[TMP63:%.*]] = select i1 [[TMP62]], i32 [[TMP61]], i32 [[TMP59]] +; VF2IC2-NEXT: [[TMP64:%.*]] = extractelement <2 x i32> [[VECTOR_RECUR]], i32 1 +; VF2IC2-NEXT: [[TMP65:%.*]] = icmp eq i64 [[TMP57]], 0 +; VF2IC2-NEXT: [[TMP66:%.*]] = select i1 [[TMP65]], i32 [[TMP64]], i32 [[TMP63]] +; VF2IC2-NEXT: [[TMP67:%.*]] = extractelement <2 x i32> [[TMP15]], i64 [[TMP57]] +; VF2IC2-NEXT: [[TMP68:%.*]] = sub i64 [[TMP57]], 2 +; VF2IC2-NEXT: [[TMP69:%.*]] = extractelement <2 x i32> [[TMP25]], i64 [[TMP68]] +; VF2IC2-NEXT: [[TMP70:%.*]] = icmp uge i64 [[TMP57]], 2 +; VF2IC2-NEXT: [[TMP71:%.*]] = select i1 [[TMP70]], i32 [[TMP69]], i32 [[TMP67]] +; VF2IC2-NEXT: br label %[[FOR_END:.*]] ; VF2IC2: [[FOR_END]]: -; VF2IC2-NEXT: [[TMP66:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] -; VF2IC2-NEXT: [[TMP71:%.*]] = phi i32 [ [[TMP23]], %[[LOOP]] ] ; VF2IC2-NEXT: [[RES:%.*]] = add i32 [[TMP66]], [[TMP71]] ; VF2IC2-NEXT: ret i32 [[RES]] ; ; VF1IC2-LABEL: define i32 @FOR_and_next_used_outside( ; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF1IC2-NEXT: [[ENTRY:.*]]: -; VF1IC2-NEXT: br label %[[LOOP:.*]] -; VF1IC2: [[LOOP]]: -; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[ENTRY:.*:]] +; VF1IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF1IC2: [[VECTOR_PH]]: +; VF1IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF1IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF1IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF1IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF1IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF1IC2: [[VECTOR_BODY]]: +; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE5:.*]] ] +; VF1IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi i32 [ 33, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[PRED_STORE_CONTINUE5]] ] +; VF1IC2-NEXT: [[TMP3:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[VEC_IV:%.*]] = add i64 [[TMP0]], 0 +; VF1IC2-NEXT: [[VEC_IV1:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[TMP1:%.*]] = icmp ule i64 [[VEC_IV]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: [[TMP2:%.*]] = icmp ule i64 [[VEC_IV1]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF1IC2: [[PRED_LOAD_IF]]: ; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]] -; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4 -; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]] -; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF1IC2: [[PRED_LOAD_CONTINUE]]: +; VF1IC2-NEXT: [[TMP5:%.*]] = phi i32 [ poison, %[[VECTOR_BODY]] ], [ [[TMP7]], %[[PRED_LOAD_IF]] ] +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_LOAD_IF2:.*]], label %[[PRED_LOAD_CONTINUE3:.*]] +; VF1IC2: [[PRED_LOAD_IF2]]: +; VF1IC2-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE3]] +; VF1IC2: [[PRED_LOAD_CONTINUE3]]: +; VF1IC2-NEXT: [[TMP8]] = phi i32 [ poison, %[[PRED_LOAD_CONTINUE]] ], [ [[TMP35]], %[[PRED_LOAD_IF2]] ] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF1IC2: [[PRED_STORE_IF]]: +; VF1IC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP10:%.*]] = add nsw i32 [[VECTOR_RECUR]], [[TMP5]] +; VF1IC2-NEXT: store i32 [[TMP10]], ptr [[TMP9]], align 4 +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF1IC2: [[PRED_STORE_CONTINUE]]: +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_STORE_IF4:.*]], label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_IF4]]: +; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[TMP5]], [[TMP8]] ; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4 -; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1 -; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_CONTINUE5]]: +; VF1IC2-NEXT: [[INDEX_NEXT]] = add i64 [[TMP0]], 2 +; VF1IC2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF1IC2-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF1IC2: [[MIDDLE_BLOCK]]: +; VF1IC2-NEXT: [[TMP14:%.*]] = xor i1 [[TMP1]], true +; VF1IC2-NEXT: [[TMP15:%.*]] = xor i1 [[TMP2]], true +; VF1IC2-NEXT: [[TMP16:%.*]] = icmp eq i1 [[TMP15]], false +; VF1IC2-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i64 +; VF1IC2-NEXT: [[TMP18:%.*]] = add i64 1, [[TMP17]] +; VF1IC2-NEXT: [[TMP19:%.*]] = icmp eq i1 [[TMP14]], false +; VF1IC2-NEXT: [[TMP20:%.*]] = zext i1 [[TMP19]] to i64 +; VF1IC2-NEXT: [[TMP21:%.*]] = add i64 0, [[TMP20]] +; VF1IC2-NEXT: [[TMP22:%.*]] = icmp ne i64 [[TMP20]], 1 +; VF1IC2-NEXT: [[TMP23:%.*]] = select i1 [[TMP22]], i64 [[TMP21]], i64 [[TMP18]] +; VF1IC2-NEXT: [[TMP24:%.*]] = sub i64 [[TMP23]], 1 +; VF1IC2-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP26:%.*]] = sub i64 [[TMP25]], 1 +; VF1IC2-NEXT: [[TMP27:%.*]] = icmp uge i64 [[TMP25]], 1 +; VF1IC2-NEXT: [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP5]] +; VF1IC2-NEXT: [[TMP29:%.*]] = icmp eq i64 [[TMP24]], 0 +; VF1IC2-NEXT: [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[VECTOR_RECUR]], i32 [[TMP28]] +; VF1IC2-NEXT: [[TMP31:%.*]] = sub i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP32:%.*]] = icmp uge i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP33:%.*]] = select i1 [[TMP32]], i32 [[TMP8]], i32 [[TMP5]] +; VF1IC2-NEXT: br label %[[FOR_END:.*]] ; VF1IC2: [[FOR_END]]: -; VF1IC2-NEXT: [[TMP30:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] -; VF1IC2-NEXT: [[TMP33:%.*]] = phi i32 [ [[TMP7]], %[[LOOP]] ] ; VF1IC2-NEXT: [[RES:%.*]] = add i32 [[TMP30]], [[TMP33]] ; VF1IC2-NEXT: ret i32 [[RES]] ; diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll index cebd52fa7f866..063f47ce2b32d 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll @@ -33,7 +33,7 @@ define void @recurrence_1(ptr readonly noalias %a, ptr noalias %b, i32 %n) { ; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], [[VECTOR_BODY]] ] ; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[INDEX]], 1 ; UNROLL-NO-IC-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP4]] -; UNROLL-NO-IC-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP5]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP5]], i64 4 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD1]] = load <4 x i32>, ptr [[TMP7]], align 4 ; UNROLL-NO-IC-NEXT: [[TMP8:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[WIDE_LOAD]], <4 x i32> @@ -41,7 +41,7 @@ define void @recurrence_1(ptr readonly noalias %a, ptr noalias %b, i32 %n) { ; UNROLL-NO-IC-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]] ; UNROLL-NO-IC-NEXT: [[TMP11:%.*]] = add <4 x i32> [[WIDE_LOAD]], [[TMP8]] ; UNROLL-NO-IC-NEXT: [[TMP12:%.*]] = add <4 x i32> [[WIDE_LOAD1]], [[TMP9]] -; UNROLL-NO-IC-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP10]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP10]], i64 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP10]], align 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 ; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -231,7 +231,7 @@ define i32 @recurrence_2(ptr nocapture readonly %a, i32 %n) { ; UNROLL-NO-IC-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ poison, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] ; UNROLL-NO-IC-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ poison, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ] ; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] -; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 4 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD2]] = load <4 x i32>, ptr [[TMP4]], align 4 ; UNROLL-NO-IC-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[WIDE_LOAD]], <4 x i32> @@ -485,7 +485,7 @@ define void @recurrence_3(ptr readonly noalias %a, ptr noalias %b, i32 %n, float ; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i16> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], [[VECTOR_BODY]] ] ; UNROLL-NO-IC-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]] ; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 [[OFFSET_IDX]] -; UNROLL-NO-IC-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[TMP4]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[TMP4]], i64 4 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP4]], align 2 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD1]] = load <4 x i16>, ptr [[TMP6]], align 2 ; UNROLL-NO-IC-NEXT: [[TMP7:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> @@ -499,7 +499,7 @@ define void @recurrence_3(ptr readonly noalias %a, ptr noalias %b, i32 %n, float ; UNROLL-NO-IC-NEXT: [[TMP15:%.*]] = fsub fast <4 x double> [[TMP9]], [[TMP13]] ; UNROLL-NO-IC-NEXT: [[TMP16:%.*]] = fsub fast <4 x double> [[TMP10]], [[TMP14]] ; UNROLL-NO-IC-NEXT: [[TMP17:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[OFFSET_IDX]] -; UNROLL-NO-IC-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[TMP17]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[TMP17]], i64 4 ; UNROLL-NO-IC-NEXT: store <4 x double> [[TMP15]], ptr [[TMP17]], align 8 ; UNROLL-NO-IC-NEXT: store <4 x double> [[TMP16]], ptr [[TMP19]], align 8 ; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -1700,7 +1700,7 @@ define void @sink_after(ptr noalias %a, ptr noalias %b, i64 %n) { ; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i16> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], [[VECTOR_BODY]] ] ; UNROLL-NO-IC-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[INDEX]], 1 ; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 [[TMP1]] -; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[TMP2]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[TMP2]], i64 4 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP2]], align 2 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD1]] = load <4 x i16>, ptr [[TMP4]], align 2 ; UNROLL-NO-IC-NEXT: [[TMP5:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> @@ -1712,7 +1712,7 @@ define void @sink_after(ptr noalias %a, ptr noalias %b, i64 %n) { ; UNROLL-NO-IC-NEXT: [[TMP11:%.*]] = mul nsw <4 x i32> [[TMP9]], [[TMP7]] ; UNROLL-NO-IC-NEXT: [[TMP12:%.*]] = mul nsw <4 x i32> [[TMP10]], [[TMP8]] ; UNROLL-NO-IC-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]] -; UNROLL-NO-IC-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i64 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP13]], align 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP15]], align 4 ; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -1915,7 +1915,7 @@ define void @PR34711(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %n) { ; UNROLL-NO-IC-NEXT: [[TMP14:%.*]] = getelementptr inbounds [2 x i16], ptr [[A]], i64 [[TMP5]], i64 1 ; UNROLL-NO-IC-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i16], ptr [[A]], i64 [[TMP6]], i64 1 ; UNROLL-NO-IC-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i16], ptr [[A]], i64 [[TMP7]], i64 1 -; UNROLL-NO-IC-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> splat (i32 7), ptr [[TMP8]], align 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> splat (i32 7), ptr [[TMP18]], align 4 ; UNROLL-NO-IC-NEXT: [[TMP19:%.*]] = load i16, ptr [[TMP9]], align 2 @@ -1943,7 +1943,7 @@ define void @PR34711(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %n) { ; UNROLL-NO-IC-NEXT: [[TMP41:%.*]] = mul nsw <4 x i32> [[TMP39]], [[TMP37]] ; UNROLL-NO-IC-NEXT: [[TMP42:%.*]] = mul nsw <4 x i32> [[TMP40]], [[TMP38]] ; UNROLL-NO-IC-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] -; UNROLL-NO-IC-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[TMP43]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[TMP43]], i64 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP43]], align 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP45]], align 4 ; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -2146,7 +2146,7 @@ define void @sink_after_with_multiple_users(ptr noalias %a, ptr noalias %b, i64 ; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i16> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], [[VECTOR_BODY]] ] ; UNROLL-NO-IC-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[INDEX]], 1 ; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 [[TMP1]] -; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[TMP2]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[TMP2]], i64 4 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP2]], align 2 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD1]] = load <4 x i16>, ptr [[TMP4]], align 2 ; UNROLL-NO-IC-NEXT: [[TMP5:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> @@ -2160,7 +2160,7 @@ define void @sink_after_with_multiple_users(ptr noalias %a, ptr noalias %b, i64 ; UNROLL-NO-IC-NEXT: [[TMP13:%.*]] = mul nsw <4 x i32> [[TMP9]], [[TMP11]] ; UNROLL-NO-IC-NEXT: [[TMP14:%.*]] = mul nsw <4 x i32> [[TMP10]], [[TMP12]] ; UNROLL-NO-IC-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]] -; UNROLL-NO-IC-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP17]], align 4 ; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -2449,7 +2449,7 @@ define void @sink_dead_inst(ptr %a) { ; UNROLL-NO-IC-NEXT: [[TMP8:%.*]] = sub <4 x i16> [[TMP6]], splat (i16 10) ; UNROLL-NO-IC-NEXT: [[TMP9:%.*]] = sub <4 x i16> [[TMP7]], splat (i16 10) ; UNROLL-NO-IC-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[A:%.*]], i16 [[OFFSET_IDX]] -; UNROLL-NO-IC-NEXT: [[TMP12:%.*]] = getelementptr i16, ptr [[TMP10]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP12:%.*]] = getelementptr i16, ptr [[TMP10]], i64 4 ; UNROLL-NO-IC-NEXT: store <4 x i16> [[TMP8]], ptr [[TMP10]], align 2 ; UNROLL-NO-IC-NEXT: store <4 x i16> [[TMP9]], ptr [[TMP12]], align 2 ; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 @@ -3218,7 +3218,7 @@ define i32 @sink_after_dead_inst(ptr %A.ptr, i32 %n) { ; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = or <4 x i16> [[TMP1]], [[TMP1]] ; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[TMP2]] to <4 x i32> ; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[A_PTR:%.*]], i16 [[OFFSET_IDX]] -; UNROLL-NO-IC-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[TMP4]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[TMP4]], i64 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP4]], align 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP6]], align 4 ; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll index 0745f286b2608..0d9d28d079b92 100644 --- a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll @@ -53,7 +53,7 @@ define float @fmaxnum(ptr %src, i64 %n) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC]], i32 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]]) @@ -127,10 +127,10 @@ define float @test_fmax_and_fmin(ptr %src.0, ptr %src.1, i64 %n) { ; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]] ; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_0]], i32 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_0]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC_0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_1]], i32 4 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_1]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[GEP_SRC_1]], align 4 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[TMP4]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI2]], <4 x float> [[WIDE_LOAD]]) diff --git a/llvm/test/Transforms/LoopVectorize/hoist-predicated-loads-with-predicated-stores.ll b/llvm/test/Transforms/LoopVectorize/hoist-predicated-loads-with-predicated-stores.ll index ac767c68e0b25..87942911e915f 100644 --- a/llvm/test/Transforms/LoopVectorize/hoist-predicated-loads-with-predicated-stores.ll +++ b/llvm/test/Transforms/LoopVectorize/hoist-predicated-loads-with-predicated-stores.ll @@ -21,32 +21,20 @@ define void @test_stores_noalias_via_rt_checks_after_loads(ptr %dst, ptr %src, p ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE17:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE11:.*]] ] ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[COND]], i32 [[TMP4]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP6]], align 4, !alias.scope [[META0:![0-9]+]] ; CHECK-NEXT: [[TMP7:%.*]] = icmp ule <2 x i32> [[WIDE_LOAD]], splat (i32 11) ; CHECK-NEXT: [[TMP8:%.*]] = xor <2 x i1> [[TMP7]], splat (i1 true) -; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0 -; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] -; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !alias.scope [[META3:![0-9]+]] -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> poison, i32 [[TMP11]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP13:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP12]], %[[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1 -; CHECK-NEXT: br i1 [[TMP14]], label %[[PRED_LOAD_IF6:.*]], label %[[PRED_LOAD_CONTINUE7:.*]] -; CHECK: [[PRED_LOAD_IF6]]: ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] +; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP10]], align 4, !alias.scope [[META3:![0-9]+]] ; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4, !alias.scope [[META3]] +; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x i32> poison, i32 [[TMP9]], i32 0 ; CHECK-NEXT: [[TMP17:%.*]] = insertelement <2 x i32> [[TMP13]], i32 [[TMP16]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE7]] -; CHECK: [[PRED_LOAD_CONTINUE7]]: -; CHECK-NEXT: [[TMP18:%.*]] = phi <2 x i32> [ [[TMP13]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP17]], %[[PRED_LOAD_IF6]] ] -; CHECK-NEXT: [[TMP19:%.*]] = sub <2 x i32> [[TMP18]], splat (i32 5) +; CHECK-NEXT: [[TMP19:%.*]] = sub <2 x i32> [[TMP17]], splat (i32 5) ; CHECK-NEXT: [[TMP20:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0 ; CHECK-NEXT: br i1 [[TMP20]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] ; CHECK: [[PRED_STORE_IF]]: @@ -56,48 +44,30 @@ define void @test_stores_noalias_via_rt_checks_after_loads(ptr %dst, ptr %src, p ; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]] ; CHECK: [[PRED_STORE_CONTINUE]]: ; CHECK-NEXT: [[TMP23:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1 -; CHECK-NEXT: br i1 [[TMP23]], label %[[PRED_STORE_IF8:.*]], label %[[PRED_STORE_CONTINUE9:.*]] -; CHECK: [[PRED_STORE_IF8]]: +; CHECK-NEXT: br i1 [[TMP23]], label %[[PRED_STORE_IF6:.*]], label %[[PRED_STORE_CONTINUE7:.*]] +; CHECK: [[PRED_STORE_IF6]]: ; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP5]] ; CHECK-NEXT: [[TMP25:%.*]] = extractelement <2 x i32> [[TMP19]], i32 1 ; CHECK-NEXT: store i32 [[TMP25]], ptr [[TMP24]], align 4, !alias.scope [[META5]], !noalias [[META7]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE9]] -; CHECK: [[PRED_STORE_CONTINUE9]]: -; CHECK-NEXT: [[TMP26:%.*]] = extractelement <2 x i1> [[TMP7]], i32 0 -; CHECK-NEXT: br i1 [[TMP26]], label %[[PRED_LOAD_IF10:.*]], label %[[PRED_LOAD_CONTINUE11:.*]] -; CHECK: [[PRED_LOAD_IF10]]: -; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] -; CHECK-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4, !alias.scope [[META3]] -; CHECK-NEXT: [[TMP29:%.*]] = insertelement <2 x i32> poison, i32 [[TMP28]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_CONTINUE11]]: -; CHECK-NEXT: [[TMP30:%.*]] = phi <2 x i32> [ poison, %[[PRED_STORE_CONTINUE9]] ], [ [[TMP29]], %[[PRED_LOAD_IF10]] ] -; CHECK-NEXT: [[TMP31:%.*]] = extractelement <2 x i1> [[TMP7]], i32 1 -; CHECK-NEXT: br i1 [[TMP31]], label %[[PRED_LOAD_IF12:.*]], label %[[PRED_LOAD_CONTINUE13:.*]] -; CHECK: [[PRED_LOAD_IF12]]: -; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] -; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4, !alias.scope [[META3]] -; CHECK-NEXT: [[TMP34:%.*]] = insertelement <2 x i32> [[TMP30]], i32 [[TMP33]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE13]] -; CHECK: [[PRED_LOAD_CONTINUE13]]: -; CHECK-NEXT: [[TMP35:%.*]] = phi <2 x i32> [ [[TMP30]], %[[PRED_LOAD_CONTINUE11]] ], [ [[TMP34]], %[[PRED_LOAD_IF12]] ] -; CHECK-NEXT: [[TMP36:%.*]] = add <2 x i32> [[TMP35]], splat (i32 10) +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE7]] +; CHECK: [[PRED_STORE_CONTINUE7]]: +; CHECK-NEXT: [[TMP36:%.*]] = add <2 x i32> [[TMP17]], splat (i32 10) ; CHECK-NEXT: [[TMP37:%.*]] = extractelement <2 x i1> [[TMP7]], i32 0 -; CHECK-NEXT: br i1 [[TMP37]], label %[[PRED_STORE_IF14:.*]], label %[[PRED_STORE_CONTINUE15:.*]] -; CHECK: [[PRED_STORE_IF14]]: +; CHECK-NEXT: br i1 [[TMP37]], label %[[PRED_STORE_IF8:.*]], label %[[PRED_STORE_CONTINUE9:.*]] +; CHECK: [[PRED_STORE_IF8]]: ; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] ; CHECK-NEXT: [[TMP39:%.*]] = extractelement <2 x i32> [[TMP36]], i32 0 ; CHECK-NEXT: store i32 [[TMP39]], ptr [[TMP38]], align 4, !alias.scope [[META5]], !noalias [[META7]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE15]] -; CHECK: [[PRED_STORE_CONTINUE15]]: +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE9]] +; CHECK: [[PRED_STORE_CONTINUE9]]: ; CHECK-NEXT: [[TMP40:%.*]] = extractelement <2 x i1> [[TMP7]], i32 1 -; CHECK-NEXT: br i1 [[TMP40]], label %[[PRED_STORE_IF16:.*]], label %[[PRED_STORE_CONTINUE17]] -; CHECK: [[PRED_STORE_IF16]]: +; CHECK-NEXT: br i1 [[TMP40]], label %[[PRED_STORE_IF10:.*]], label %[[PRED_STORE_CONTINUE11]] +; CHECK: [[PRED_STORE_IF10]]: ; CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP5]] ; CHECK-NEXT: [[TMP42:%.*]] = extractelement <2 x i32> [[TMP36]], i32 1 ; CHECK-NEXT: store i32 [[TMP42]], ptr [[TMP41]], align 4, !alias.scope [[META5]], !noalias [[META7]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE17]] -; CHECK: [[PRED_STORE_CONTINUE17]]: +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE11]] +; CHECK: [[PRED_STORE_CONTINUE11]]: ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP43]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] @@ -319,7 +289,7 @@ define void @test_noalias_store_via_runtime_checks(ptr %dst, ptr %dst.1, ptr %sr ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE30:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE28:.*]] ] ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[COND]], i32 [[TMP4]] @@ -327,79 +297,59 @@ define void @test_noalias_store_via_runtime_checks(ptr %dst, ptr %dst.1, ptr %sr ; CHECK-NEXT: [[TMP7:%.*]] = icmp ule <2 x i32> [[WIDE_LOAD]], splat (i32 11) ; CHECK-NEXT: [[TMP8:%.*]] = xor <2 x i1> [[TMP7]], splat (i1 true) ; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0 -; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; CHECK: [[PRED_STORE_IF]]: ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[DST_1]], i32 [[TMP4]] ; CHECK-NEXT: store i32 10, ptr [[TMP10]], align 4, !alias.scope [[META25:![0-9]+]], !noalias [[META27:![0-9]+]] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] -; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4, !alias.scope [[META30:![0-9]+]] -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x i32> poison, i32 [[TMP12]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP14:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP13]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]] +; CHECK: [[PRED_STORE_CONTINUE]]: ; CHECK-NEXT: [[TMP15:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1 -; CHECK-NEXT: br i1 [[TMP15]], label %[[PRED_LOAD_IF19:.*]], label %[[PRED_LOAD_CONTINUE20:.*]] -; CHECK: [[PRED_LOAD_IF19]]: +; CHECK-NEXT: br i1 [[TMP15]], label %[[PRED_STORE_IF19:.*]], label %[[PRED_STORE_CONTINUE20:.*]] +; CHECK: [[PRED_STORE_IF19]]: ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[DST_1]], i32 [[TMP5]] ; CHECK-NEXT: store i32 10, ptr [[TMP16]], align 4, !alias.scope [[META25]], !noalias [[META27]] +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE20]] +; CHECK: [[PRED_STORE_CONTINUE20]]: +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP12]], align 4, !alias.scope [[META30:![0-9]+]] ; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4, !alias.scope [[META30]] +; CHECK-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> poison, i32 [[TMP11]], i32 0 ; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> [[TMP14]], i32 [[TMP18]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE20]] -; CHECK: [[PRED_LOAD_CONTINUE20]]: -; CHECK-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ [[TMP14]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP19]], %[[PRED_LOAD_IF19]] ] -; CHECK-NEXT: [[TMP21:%.*]] = sub <2 x i32> [[TMP20]], splat (i32 5) +; CHECK-NEXT: [[TMP21:%.*]] = sub <2 x i32> [[TMP19]], splat (i32 5) ; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0 -; CHECK-NEXT: br i1 [[TMP22]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] -; CHECK: [[PRED_STORE_IF]]: +; CHECK-NEXT: br i1 [[TMP22]], label %[[PRED_STORE_IF21:.*]], label %[[PRED_STORE_CONTINUE22:.*]] +; CHECK: [[PRED_STORE_IF21]]: ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] ; CHECK-NEXT: [[TMP24:%.*]] = extractelement <2 x i32> [[TMP21]], i32 0 ; CHECK-NEXT: store i32 [[TMP24]], ptr [[TMP23]], align 4, !alias.scope [[META31:![0-9]+]], !noalias [[META32:![0-9]+]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]] -; CHECK: [[PRED_STORE_CONTINUE]]: +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE22]] +; CHECK: [[PRED_STORE_CONTINUE22]]: ; CHECK-NEXT: [[TMP25:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1 -; CHECK-NEXT: br i1 [[TMP25]], label %[[PRED_STORE_IF21:.*]], label %[[PRED_STORE_CONTINUE22:.*]] -; CHECK: [[PRED_STORE_IF21]]: +; CHECK-NEXT: br i1 [[TMP25]], label %[[PRED_STORE_IF23:.*]], label %[[PRED_STORE_CONTINUE24:.*]] +; CHECK: [[PRED_STORE_IF23]]: ; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP5]] ; CHECK-NEXT: [[TMP27:%.*]] = extractelement <2 x i32> [[TMP21]], i32 1 ; CHECK-NEXT: store i32 [[TMP27]], ptr [[TMP26]], align 4, !alias.scope [[META31]], !noalias [[META32]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE22]] -; CHECK: [[PRED_STORE_CONTINUE22]]: -; CHECK-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP7]], i32 0 -; CHECK-NEXT: br i1 [[TMP28]], label %[[PRED_LOAD_IF23:.*]], label %[[PRED_LOAD_CONTINUE24:.*]] -; CHECK: [[PRED_LOAD_IF23]]: -; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] -; CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4, !alias.scope [[META30]] -; CHECK-NEXT: [[TMP31:%.*]] = insertelement <2 x i32> poison, i32 [[TMP30]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE24]] -; CHECK: [[PRED_LOAD_CONTINUE24]]: -; CHECK-NEXT: [[TMP32:%.*]] = phi <2 x i32> [ poison, %[[PRED_STORE_CONTINUE22]] ], [ [[TMP31]], %[[PRED_LOAD_IF23]] ] -; CHECK-NEXT: [[TMP33:%.*]] = extractelement <2 x i1> [[TMP7]], i32 1 -; CHECK-NEXT: br i1 [[TMP33]], label %[[PRED_LOAD_IF25:.*]], label %[[PRED_LOAD_CONTINUE26:.*]] -; CHECK: [[PRED_LOAD_IF25]]: -; CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] -; CHECK-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4, !alias.scope [[META30]] -; CHECK-NEXT: [[TMP36:%.*]] = insertelement <2 x i32> [[TMP32]], i32 [[TMP35]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE26]] -; CHECK: [[PRED_LOAD_CONTINUE26]]: -; CHECK-NEXT: [[TMP37:%.*]] = phi <2 x i32> [ [[TMP32]], %[[PRED_LOAD_CONTINUE24]] ], [ [[TMP36]], %[[PRED_LOAD_IF25]] ] -; CHECK-NEXT: [[TMP38:%.*]] = add <2 x i32> [[TMP37]], splat (i32 10) +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE24]] +; CHECK: [[PRED_STORE_CONTINUE24]]: +; CHECK-NEXT: [[TMP38:%.*]] = add <2 x i32> [[TMP19]], splat (i32 10) ; CHECK-NEXT: [[TMP39:%.*]] = extractelement <2 x i1> [[TMP7]], i32 0 -; CHECK-NEXT: br i1 [[TMP39]], label %[[PRED_STORE_IF27:.*]], label %[[PRED_STORE_CONTINUE28:.*]] -; CHECK: [[PRED_STORE_IF27]]: +; CHECK-NEXT: br i1 [[TMP39]], label %[[PRED_STORE_IF25:.*]], label %[[PRED_STORE_CONTINUE26:.*]] +; CHECK: [[PRED_STORE_IF25]]: ; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] ; CHECK-NEXT: [[TMP41:%.*]] = extractelement <2 x i32> [[TMP38]], i32 0 ; CHECK-NEXT: store i32 [[TMP41]], ptr [[TMP40]], align 4, !alias.scope [[META31]], !noalias [[META32]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE28]] -; CHECK: [[PRED_STORE_CONTINUE28]]: +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE26]] +; CHECK: [[PRED_STORE_CONTINUE26]]: ; CHECK-NEXT: [[TMP42:%.*]] = extractelement <2 x i1> [[TMP7]], i32 1 -; CHECK-NEXT: br i1 [[TMP42]], label %[[PRED_STORE_IF29:.*]], label %[[PRED_STORE_CONTINUE30]] -; CHECK: [[PRED_STORE_IF29]]: +; CHECK-NEXT: br i1 [[TMP42]], label %[[PRED_STORE_IF27:.*]], label %[[PRED_STORE_CONTINUE28]] +; CHECK: [[PRED_STORE_IF27]]: ; CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP5]] ; CHECK-NEXT: [[TMP44:%.*]] = extractelement <2 x i32> [[TMP38]], i32 1 ; CHECK-NEXT: store i32 [[TMP44]], ptr [[TMP43]], align 4, !alias.scope [[META31]], !noalias [[META32]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE30]] -; CHECK: [[PRED_STORE_CONTINUE30]]: +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE28]] +; CHECK: [[PRED_STORE_CONTINUE28]]: ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[TMP45:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP45]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] @@ -609,7 +559,7 @@ define void @test_memory_op_between_loads_no_alias_via_rt_checks(ptr %dst, ptr % ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE26:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE28:.*]] ] ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[COND]], i32 [[TMP4]] @@ -617,62 +567,56 @@ define void @test_memory_op_between_loads_no_alias_via_rt_checks(ptr %dst, ptr % ; CHECK-NEXT: [[TMP7:%.*]] = icmp ule <2 x i32> [[WIDE_LOAD]], splat (i32 11) ; CHECK-NEXT: [[TMP8:%.*]] = xor <2 x i1> [[TMP7]], splat (i1 true) ; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0 -; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; CHECK: [[PRED_STORE_IF]]: ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[DST_1]], i32 [[TMP4]] ; CHECK-NEXT: store i32 0, ptr [[TMP10]], align 4, !alias.scope [[META48:![0-9]+]], !noalias [[META50:![0-9]+]] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] -; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4, !alias.scope [[META53:![0-9]+]] -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x i32> poison, i32 [[TMP12]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP14:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP13]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]] +; CHECK: [[PRED_STORE_CONTINUE]]: ; CHECK-NEXT: [[TMP15:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1 -; CHECK-NEXT: br i1 [[TMP15]], label %[[PRED_LOAD_IF19:.*]], label %[[PRED_LOAD_CONTINUE20:.*]] -; CHECK: [[PRED_LOAD_IF19]]: +; CHECK-NEXT: br i1 [[TMP15]], label %[[PRED_STORE_IF19:.*]], label %[[PRED_STORE_CONTINUE20:.*]] +; CHECK: [[PRED_STORE_IF19]]: ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[DST_1]], i32 [[TMP5]] ; CHECK-NEXT: store i32 0, ptr [[TMP16]], align 4, !alias.scope [[META48]], !noalias [[META50]] +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE20]] +; CHECK: [[PRED_STORE_CONTINUE20]]: +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP12]], align 4, !alias.scope [[META53:![0-9]+]] ; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4, !alias.scope [[META53]] +; CHECK-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> poison, i32 [[TMP11]], i32 0 ; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> [[TMP14]], i32 [[TMP18]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE20]] -; CHECK: [[PRED_LOAD_CONTINUE20]]: -; CHECK-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ [[TMP14]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP19]], %[[PRED_LOAD_IF19]] ] -; CHECK-NEXT: [[TMP21:%.*]] = add <2 x i32> [[TMP20]], splat (i32 10) +; CHECK-NEXT: [[TMP21:%.*]] = add <2 x i32> [[TMP19]], splat (i32 10) ; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0 -; CHECK-NEXT: br i1 [[TMP22]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] -; CHECK: [[PRED_STORE_IF]]: +; CHECK-NEXT: br i1 [[TMP22]], label %[[PRED_STORE_IF21:.*]], label %[[PRED_STORE_CONTINUE22:.*]] +; CHECK: [[PRED_STORE_IF21]]: ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] ; CHECK-NEXT: [[TMP24:%.*]] = extractelement <2 x i32> [[TMP21]], i32 0 ; CHECK-NEXT: store i32 [[TMP24]], ptr [[TMP23]], align 4, !alias.scope [[META54:![0-9]+]], !noalias [[META55:![0-9]+]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]] -; CHECK: [[PRED_STORE_CONTINUE]]: +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE22]] +; CHECK: [[PRED_STORE_CONTINUE22]]: ; CHECK-NEXT: [[TMP25:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1 -; CHECK-NEXT: br i1 [[TMP25]], label %[[PRED_STORE_IF21:.*]], label %[[PRED_STORE_CONTINUE22:.*]] -; CHECK: [[PRED_STORE_IF21]]: +; CHECK-NEXT: br i1 [[TMP25]], label %[[PRED_STORE_IF23:.*]], label %[[PRED_STORE_CONTINUE24:.*]] +; CHECK: [[PRED_STORE_IF23]]: ; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP5]] ; CHECK-NEXT: [[TMP27:%.*]] = extractelement <2 x i32> [[TMP21]], i32 1 ; CHECK-NEXT: store i32 [[TMP27]], ptr [[TMP26]], align 4, !alias.scope [[META54]], !noalias [[META55]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE22]] -; CHECK: [[PRED_STORE_CONTINUE22]]: -; CHECK-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP7]], i32 0 -; CHECK-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF23:.*]], label %[[PRED_STORE_CONTINUE24:.*]] -; CHECK: [[PRED_STORE_IF23]]: -; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] -; CHECK-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP29]], align 4, !alias.scope [[META53]] -; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] -; CHECK-NEXT: store i32 [[TMP34]], ptr [[TMP31]], align 4, !alias.scope [[META54]], !noalias [[META55]] ; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE24]] ; CHECK: [[PRED_STORE_CONTINUE24]]: -; CHECK-NEXT: [[TMP32:%.*]] = extractelement <2 x i1> [[TMP7]], i32 1 -; CHECK-NEXT: br i1 [[TMP32]], label %[[PRED_STORE_IF25:.*]], label %[[PRED_STORE_CONTINUE26]] +; CHECK-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP7]], i32 0 +; CHECK-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF25:.*]], label %[[PRED_STORE_CONTINUE26:.*]] ; CHECK: [[PRED_STORE_IF25]]: -; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] -; CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP33]], align 4, !alias.scope [[META53]] -; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP5]] -; CHECK-NEXT: store i32 [[TMP30]], ptr [[TMP35]], align 4, !alias.scope [[META54]], !noalias [[META55]] +; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] +; CHECK-NEXT: store i32 [[TMP11]], ptr [[TMP31]], align 4, !alias.scope [[META54]], !noalias [[META55]] ; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE26]] ; CHECK: [[PRED_STORE_CONTINUE26]]: +; CHECK-NEXT: [[TMP32:%.*]] = extractelement <2 x i1> [[TMP7]], i32 1 +; CHECK-NEXT: br i1 [[TMP32]], label %[[PRED_STORE_IF27:.*]], label %[[PRED_STORE_CONTINUE28]] +; CHECK: [[PRED_STORE_IF27]]: +; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP5]] +; CHECK-NEXT: store i32 [[TMP18]], ptr [[TMP35]], align 4, !alias.scope [[META54]], !noalias [[META55]] +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE28]] +; CHECK: [[PRED_STORE_CONTINUE28]]: ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP56:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/hoist-predicated-loads.ll b/llvm/test/Transforms/LoopVectorize/hoist-predicated-loads.ll index e4c893f5269bb..f6dd8564c001b 100644 --- a/llvm/test/Transforms/LoopVectorize/hoist-predicated-loads.ll +++ b/llvm/test/Transforms/LoopVectorize/hoist-predicated-loads.ll @@ -21,51 +21,20 @@ define void @test(ptr %dst, ptr %src, ptr %cond) { ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE11:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] -; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP8]], i32 0 -; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x ptr> [[TMP10]], ptr [[TMP9]], i32 1 ; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[COND]], i32 [[TMP4]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP24]], align 4, !alias.scope [[META0:![0-9]+]] ; CHECK-NEXT: [[TMP15:%.*]] = icmp ule <2 x i32> [[WIDE_LOAD]], splat (i32 11) -; CHECK-NEXT: [[TMP34:%.*]] = xor <2 x i1> [[TMP15]], splat (i1 true) -; CHECK-NEXT: [[TMP35:%.*]] = extractelement <2 x i1> [[TMP34]], i32 0 -; CHECK-NEXT: br i1 [[TMP35]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: ; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP8]], align 4, !alias.scope [[META3:![0-9]+]] +; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP9]], align 4, !alias.scope [[META3]] ; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP19]], %[[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP21:%.*]] = extractelement <2 x i1> [[TMP34]], i32 1 -; CHECK-NEXT: br i1 [[TMP21]], label %[[PRED_LOAD_IF6:.*]], label %[[PRED_LOAD_CONTINUE7:.*]] -; CHECK: [[PRED_LOAD_IF6]]: -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP9]], align 4, !alias.scope [[META3]] -; CHECK-NEXT: [[TMP23:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP22]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE7]] -; CHECK: [[PRED_LOAD_CONTINUE7]]: -; CHECK-NEXT: [[TMP36:%.*]] = phi <2 x i32> [ [[TMP20]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP23]], %[[PRED_LOAD_IF6]] ] +; CHECK-NEXT: [[TMP36:%.*]] = insertelement <2 x i32> [[TMP19]], i32 [[TMP7]], i32 1 ; CHECK-NEXT: [[TMP25:%.*]] = add <2 x i32> [[TMP36]], splat (i32 10) -; CHECK-NEXT: [[TMP30:%.*]] = extractelement <2 x i1> [[TMP15]], i32 0 -; CHECK-NEXT: br i1 [[TMP30]], label %[[PRED_LOAD_IF8:.*]], label %[[PRED_LOAD_CONTINUE9:.*]] -; CHECK: [[PRED_LOAD_IF8]]: -; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP8]], align 4, !alias.scope [[META3]] -; CHECK-NEXT: [[TMP31:%.*]] = insertelement <2 x i32> poison, i32 [[TMP26]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE9]] -; CHECK: [[PRED_LOAD_CONTINUE9]]: -; CHECK-NEXT: [[TMP33:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE7]] ], [ [[TMP31]], %[[PRED_LOAD_IF8]] ] -; CHECK-NEXT: [[TMP32:%.*]] = extractelement <2 x i1> [[TMP15]], i32 1 -; CHECK-NEXT: br i1 [[TMP32]], label %[[PRED_LOAD_IF10:.*]], label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_IF10]]: -; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP9]], align 4, !alias.scope [[META3]] -; CHECK-NEXT: [[TMP28:%.*]] = insertelement <2 x i32> [[TMP33]], i32 [[TMP27]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_CONTINUE11]]: -; CHECK-NEXT: [[TMP29:%.*]] = phi <2 x i32> [ [[TMP33]], %[[PRED_LOAD_CONTINUE9]] ], [ [[TMP28]], %[[PRED_LOAD_IF10]] ] -; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP15]], <2 x i32> [[TMP29]], <2 x i32> [[TMP25]] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP15]], <2 x i32> [[TMP36]], <2 x i32> [[TMP25]] ; CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] ; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP37]], align 4, !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 @@ -450,7 +419,7 @@ exit: ret void } -; Positive test: Same address with different alignments - should hoist with minimum alignment +; Make sure the minimum alignment is used when loads have different alignments. define void @different_alignments_same_address(ptr %dst, ptr %src, ptr %cond) { ; CHECK-LABEL: define void @different_alignments_same_address( ; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], ptr [[COND:%.*]]) { @@ -471,53 +440,22 @@ define void @different_alignments_same_address(ptr %dst, ptr %src, ptr %cond) { ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE11:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] -; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP8]], i32 0 -; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x ptr> [[TMP10]], ptr [[TMP9]], i32 1 ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[COND]], i32 [[TMP4]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP20]], align 4, !alias.scope [[META36:![0-9]+]] ; CHECK-NEXT: [[TMP15:%.*]] = icmp ule <2 x i32> [[WIDE_LOAD]], splat (i32 11) -; CHECK-NEXT: [[TMP16:%.*]] = xor <2 x i1> [[TMP15]], splat (i1 true) -; CHECK-NEXT: [[TMP17:%.*]] = extractelement <2 x i1> [[TMP16]], i32 0 -; CHECK-NEXT: br i1 [[TMP17]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: -; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP8]], align 4, !alias.scope [[META39:![0-9]+]] +; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP8]], align 2, !alias.scope [[META39:![0-9]+]] +; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP9]], align 2, !alias.scope [[META39]] ; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP35:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP19]], %[[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP21:%.*]] = extractelement <2 x i1> [[TMP16]], i32 1 -; CHECK-NEXT: br i1 [[TMP21]], label %[[PRED_LOAD_IF6:.*]], label %[[PRED_LOAD_CONTINUE7:.*]] -; CHECK: [[PRED_LOAD_IF6]]: -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP9]], align 4, !alias.scope [[META39]] -; CHECK-NEXT: [[TMP23:%.*]] = insertelement <2 x i32> [[TMP35]], i32 [[TMP22]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE7]] -; CHECK: [[PRED_LOAD_CONTINUE7]]: -; CHECK-NEXT: [[TMP24:%.*]] = phi <2 x i32> [ [[TMP35]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP23]], %[[PRED_LOAD_IF6]] ] -; CHECK-NEXT: [[TMP25:%.*]] = add <2 x i32> [[TMP24]], splat (i32 10) -; CHECK-NEXT: [[TMP30:%.*]] = extractelement <2 x i1> [[TMP15]], i32 0 -; CHECK-NEXT: br i1 [[TMP30]], label %[[PRED_LOAD_IF8:.*]], label %[[PRED_LOAD_CONTINUE9:.*]] -; CHECK: [[PRED_LOAD_IF8]]: -; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP8]], align 2, !alias.scope [[META39]] -; CHECK-NEXT: [[TMP31:%.*]] = insertelement <2 x i32> poison, i32 [[TMP26]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE9]] -; CHECK: [[PRED_LOAD_CONTINUE9]]: -; CHECK-NEXT: [[TMP33:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE7]] ], [ [[TMP31]], %[[PRED_LOAD_IF8]] ] -; CHECK-NEXT: [[TMP32:%.*]] = extractelement <2 x i1> [[TMP15]], i32 1 -; CHECK-NEXT: br i1 [[TMP32]], label %[[PRED_LOAD_IF10:.*]], label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_IF10]]: -; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP9]], align 2, !alias.scope [[META39]] -; CHECK-NEXT: [[TMP28:%.*]] = insertelement <2 x i32> [[TMP33]], i32 [[TMP27]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_CONTINUE11]]: -; CHECK-NEXT: [[TMP29:%.*]] = phi <2 x i32> [ [[TMP33]], %[[PRED_LOAD_CONTINUE9]] ], [ [[TMP28]], %[[PRED_LOAD_IF10]] ] -; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP15]], <2 x i32> [[TMP29]], <2 x i32> [[TMP25]] -; CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] -; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP34]], align 4, !alias.scope [[META41:![0-9]+]], !noalias [[META43:![0-9]+]] +; CHECK-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP19]], i32 [[TMP7]], i32 1 +; CHECK-NEXT: [[TMP26:%.*]] = add <2 x i32> [[TMP25]], splat (i32 10) +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP15]], <2 x i32> [[TMP25]], <2 x i32> [[TMP26]] +; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] +; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP35]], align 4, !alias.scope [[META41:![0-9]+]], !noalias [[META43:![0-9]+]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]] @@ -642,50 +580,19 @@ define void @duplicate_gep(ptr %dst, ptr %src, ptr %cond) { ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE11:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[COND]], i32 [[TMP4]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP6]], align 4, !alias.scope [[META46:![0-9]+]] ; CHECK-NEXT: [[TMP7:%.*]] = icmp ule <2 x i32> [[WIDE_LOAD]], splat (i32 11) -; CHECK-NEXT: [[TMP8:%.*]] = xor <2 x i1> [[TMP7]], splat (i1 true) -; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0 -; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] -; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !alias.scope [[META49:![0-9]+]] -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> poison, i32 [[TMP11]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP13:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP12]], %[[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1 -; CHECK-NEXT: br i1 [[TMP14]], label %[[PRED_LOAD_IF6:.*]], label %[[PRED_LOAD_CONTINUE7:.*]] -; CHECK: [[PRED_LOAD_IF6]]: ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] -; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4, !alias.scope [[META49]] -; CHECK-NEXT: [[TMP17:%.*]] = insertelement <2 x i32> [[TMP13]], i32 [[TMP16]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE7]] -; CHECK: [[PRED_LOAD_CONTINUE7]]: -; CHECK-NEXT: [[TMP18:%.*]] = phi <2 x i32> [ [[TMP13]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP17]], %[[PRED_LOAD_IF6]] ] -; CHECK-NEXT: [[TMP19:%.*]] = add <2 x i32> [[TMP18]], splat (i32 10) -; CHECK-NEXT: [[TMP20:%.*]] = extractelement <2 x i1> [[TMP7]], i32 0 -; CHECK-NEXT: br i1 [[TMP20]], label %[[PRED_LOAD_IF8:.*]], label %[[PRED_LOAD_CONTINUE9:.*]] -; CHECK: [[PRED_LOAD_IF8]]: -; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4, !alias.scope [[META49]] +; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP10]], align 4, !alias.scope [[META49:![0-9]+]] +; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP15]], align 4, !alias.scope [[META49]] ; CHECK-NEXT: [[TMP23:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE9]] -; CHECK: [[PRED_LOAD_CONTINUE9]]: -; CHECK-NEXT: [[TMP24:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE7]] ], [ [[TMP23]], %[[PRED_LOAD_IF8]] ] -; CHECK-NEXT: [[TMP25:%.*]] = extractelement <2 x i1> [[TMP7]], i32 1 -; CHECK-NEXT: br i1 [[TMP25]], label %[[PRED_LOAD_IF10:.*]], label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_IF10]]: -; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] -; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP26]], align 4, !alias.scope [[META49]] -; CHECK-NEXT: [[TMP28:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP27]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_CONTINUE11]]: -; CHECK-NEXT: [[TMP29:%.*]] = phi <2 x i32> [ [[TMP24]], %[[PRED_LOAD_CONTINUE9]] ], [ [[TMP28]], %[[PRED_LOAD_IF10]] ] +; CHECK-NEXT: [[TMP29:%.*]] = insertelement <2 x i32> [[TMP23]], i32 [[TMP8]], i32 1 +; CHECK-NEXT: [[TMP19:%.*]] = add <2 x i32> [[TMP29]], splat (i32 10) ; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP7]], <2 x i32> [[TMP29]], <2 x i32> [[TMP19]] ; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] ; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP30]], align 4, !alias.scope [[META51:![0-9]+]], !noalias [[META53:![0-9]+]] @@ -752,50 +659,19 @@ define void @non_unit_stride_i64(ptr %dst, ptr %src, ptr %cond) { ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE11:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[INDEX]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[COND]], i32 [[TMP6]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP8]], align 4, !alias.scope [[META56:![0-9]+]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp ule <2 x i32> [[WIDE_LOAD]], splat (i32 11) -; CHECK-NEXT: [[TMP10:%.*]] = xor <2 x i1> [[TMP9]], splat (i1 true) -; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP10]], i32 0 -; CHECK-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i32 [[TMP6]] -; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4, !alias.scope [[META59:![0-9]+]] -; CHECK-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> poison, i32 [[TMP13]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP15:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP14]], %[[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP10]], i32 1 -; CHECK-NEXT: br i1 [[TMP16]], label %[[PRED_LOAD_IF6:.*]], label %[[PRED_LOAD_CONTINUE7:.*]] -; CHECK: [[PRED_LOAD_IF6]]: ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i32 [[TMP7]] -; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4, !alias.scope [[META59]] -; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> [[TMP15]], i32 [[TMP18]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE7]] -; CHECK: [[PRED_LOAD_CONTINUE7]]: -; CHECK-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ [[TMP15]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP19]], %[[PRED_LOAD_IF6]] ] -; CHECK-NEXT: [[TMP21:%.*]] = add <2 x i32> [[TMP20]], splat (i32 10) -; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x i1> [[TMP9]], i32 0 -; CHECK-NEXT: br i1 [[TMP22]], label %[[PRED_LOAD_IF8:.*]], label %[[PRED_LOAD_CONTINUE9:.*]] -; CHECK: [[PRED_LOAD_IF8]]: -; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i32 [[TMP6]] -; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4, !alias.scope [[META59]] +; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP12]], align 4, !alias.scope [[META59:![0-9]+]] +; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP17]], align 4, !alias.scope [[META59]] ; CHECK-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> poison, i32 [[TMP24]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE9]] -; CHECK: [[PRED_LOAD_CONTINUE9]]: -; CHECK-NEXT: [[TMP26:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE7]] ], [ [[TMP25]], %[[PRED_LOAD_IF8]] ] -; CHECK-NEXT: [[TMP27:%.*]] = extractelement <2 x i1> [[TMP9]], i32 1 -; CHECK-NEXT: br i1 [[TMP27]], label %[[PRED_LOAD_IF10:.*]], label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_IF10]]: -; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i32 [[TMP7]] -; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4, !alias.scope [[META59]] -; CHECK-NEXT: [[TMP30:%.*]] = insertelement <2 x i32> [[TMP26]], i32 [[TMP29]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_CONTINUE11]]: -; CHECK-NEXT: [[TMP31:%.*]] = phi <2 x i32> [ [[TMP26]], %[[PRED_LOAD_CONTINUE9]] ], [ [[TMP30]], %[[PRED_LOAD_IF10]] ] +; CHECK-NEXT: [[TMP31:%.*]] = insertelement <2 x i32> [[TMP25]], i32 [[TMP10]], i32 1 +; CHECK-NEXT: [[TMP21:%.*]] = add <2 x i32> [[TMP31]], splat (i32 10) ; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP9]], <2 x i32> [[TMP31]], <2 x i32> [[TMP21]] ; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP6]] ; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP32]], align 4, !alias.scope [[META61:![0-9]+]], !noalias [[META63:![0-9]+]] @@ -1045,55 +921,15 @@ define void @hoist_predicated_load_with_chained_geps1(ptr %dst, ptr %src, i1 %co ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[COND]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[TMP0:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true) ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE8:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP0]], i32 0 -; CHECK-NEXT: br i1 [[TMP3]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [11 x i16], ptr [[SRC]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i64 8 -; CHECK-NEXT: [[TMP6:%.*]] = load i16, ptr [[TMP5]], align 2, !alias.scope [[META68:![0-9]+]] -; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x i16> poison, i16 [[TMP6]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP8:%.*]] = phi <2 x i16> [ poison, %[[VECTOR_BODY]] ], [ [[TMP7]], %[[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i1> [[TMP0]], i32 1 -; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_LOAD_IF3:.*]], label %[[PRED_LOAD_CONTINUE4:.*]] -; CHECK: [[PRED_LOAD_IF3]]: -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr [11 x i16], ptr [[SRC]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP10]], i64 8 -; CHECK-NEXT: [[TMP12:%.*]] = load i16, ptr [[TMP11]], align 2, !alias.scope [[META68]] -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x i16> [[TMP8]], i16 [[TMP12]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE4]] -; CHECK: [[PRED_LOAD_CONTINUE4]]: -; CHECK-NEXT: [[TMP14:%.*]] = phi <2 x i16> [ [[TMP8]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP13]], %[[PRED_LOAD_IF3]] ] -; CHECK-NEXT: br i1 [[COND]], label %[[PRED_LOAD_IF5:.*]], label %[[PRED_LOAD_CONTINUE6:.*]] -; CHECK: [[PRED_LOAD_IF5]]: -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr [11 x i16], ptr [[SRC]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[TMP15]], i64 8 -; CHECK-NEXT: [[TMP17:%.*]] = load i16, ptr [[TMP16]], align 2, !alias.scope [[META68]] -; CHECK-NEXT: [[TMP18:%.*]] = insertelement <2 x i16> poison, i16 [[TMP17]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE6]] -; CHECK: [[PRED_LOAD_CONTINUE6]]: -; CHECK-NEXT: [[TMP19:%.*]] = phi <2 x i16> [ poison, %[[PRED_LOAD_CONTINUE4]] ], [ [[TMP18]], %[[PRED_LOAD_IF5]] ] -; CHECK-NEXT: br i1 [[COND]], label %[[PRED_LOAD_IF7:.*]], label %[[PRED_LOAD_CONTINUE8]] -; CHECK: [[PRED_LOAD_IF7]]: ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr [11 x i16], ptr [[SRC]], i64 [[TMP2]] ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP20]], i64 8 -; CHECK-NEXT: [[TMP22:%.*]] = load i16, ptr [[TMP21]], align 2, !alias.scope [[META68]] -; CHECK-NEXT: [[TMP23:%.*]] = insertelement <2 x i16> [[TMP19]], i16 [[TMP22]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE8]] -; CHECK: [[PRED_LOAD_CONTINUE8]]: -; CHECK-NEXT: [[TMP24:%.*]] = phi <2 x i16> [ [[TMP19]], %[[PRED_LOAD_CONTINUE6]] ], [ [[TMP23]], %[[PRED_LOAD_IF7]] ] -; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[COND]], <2 x i16> [[TMP24]], <2 x i16> [[TMP14]] -; CHECK-NEXT: [[TMP25:%.*]] = extractelement <2 x i16> [[PREDPHI]], i32 1 -; CHECK-NEXT: store i16 [[TMP25]], ptr [[DST]], align 2, !alias.scope [[META71:![0-9]+]], !noalias [[META68]] +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr [[TMP21]], align 2, !alias.scope [[META68:![0-9]+]] +; CHECK-NEXT: store i16 [[TMP4]], ptr [[DST]], align 2, !alias.scope [[META71:![0-9]+]], !noalias [[META68]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP73:![0-9]+]] @@ -1145,55 +981,15 @@ define void @hoist_predicated_load_with_chained_geps2(ptr %dst, ptr %src, i1 %co ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[COND]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[TMP0:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true) ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE8:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr [11 x i16], ptr [[SRC]], i64 [[TMP1]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [11 x i16], ptr [[SRC]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP3]], i32 0 -; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x ptr> [[TMP5]], ptr [[TMP4]], i32 1 -; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i1> [[TMP0]], i32 0 -; CHECK-NEXT: br i1 [[TMP7]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP3]], i64 8 -; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[TMP8]], align 2, !alias.scope [[META75:![0-9]+]] -; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x i16> poison, i16 [[TMP9]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP11:%.*]] = phi <2 x i16> [ poison, %[[VECTOR_BODY]] ], [ [[TMP10]], %[[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x i1> [[TMP0]], i32 1 -; CHECK-NEXT: br i1 [[TMP12]], label %[[PRED_LOAD_IF3:.*]], label %[[PRED_LOAD_CONTINUE4:.*]] -; CHECK: [[PRED_LOAD_IF3]]: -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP4]], i64 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i16, ptr [[TMP13]], align 2, !alias.scope [[META75]] -; CHECK-NEXT: [[TMP15:%.*]] = insertelement <2 x i16> [[TMP11]], i16 [[TMP14]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE4]] -; CHECK: [[PRED_LOAD_CONTINUE4]]: -; CHECK-NEXT: [[TMP16:%.*]] = phi <2 x i16> [ [[TMP11]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP15]], %[[PRED_LOAD_IF3]] ] -; CHECK-NEXT: br i1 [[COND]], label %[[PRED_LOAD_IF5:.*]], label %[[PRED_LOAD_CONTINUE6:.*]] -; CHECK: [[PRED_LOAD_IF5]]: -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP3]], i64 8 -; CHECK-NEXT: [[TMP18:%.*]] = load i16, ptr [[TMP17]], align 2, !alias.scope [[META75]] -; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x i16> poison, i16 [[TMP18]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE6]] -; CHECK: [[PRED_LOAD_CONTINUE6]]: -; CHECK-NEXT: [[TMP20:%.*]] = phi <2 x i16> [ poison, %[[PRED_LOAD_CONTINUE4]] ], [ [[TMP19]], %[[PRED_LOAD_IF5]] ] -; CHECK-NEXT: br i1 [[COND]], label %[[PRED_LOAD_IF7:.*]], label %[[PRED_LOAD_CONTINUE8]] -; CHECK: [[PRED_LOAD_IF7]]: ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP4]], i64 8 -; CHECK-NEXT: [[TMP22:%.*]] = load i16, ptr [[TMP21]], align 2, !alias.scope [[META75]] -; CHECK-NEXT: [[TMP23:%.*]] = insertelement <2 x i16> [[TMP20]], i16 [[TMP22]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE8]] -; CHECK: [[PRED_LOAD_CONTINUE8]]: -; CHECK-NEXT: [[TMP24:%.*]] = phi <2 x i16> [ [[TMP20]], %[[PRED_LOAD_CONTINUE6]] ], [ [[TMP23]], %[[PRED_LOAD_IF7]] ] -; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[COND]], <2 x i16> [[TMP24]], <2 x i16> [[TMP16]] -; CHECK-NEXT: [[TMP25:%.*]] = extractelement <2 x i16> [[PREDPHI]], i32 1 -; CHECK-NEXT: store i16 [[TMP25]], ptr [[DST]], align 2, !alias.scope [[META78:![0-9]+]], !noalias [[META75]] +; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP21]], align 2, !alias.scope [[META75:![0-9]+]] +; CHECK-NEXT: store i16 [[TMP5]], ptr [[DST]], align 2, !alias.scope [[META78:![0-9]+]], !noalias [[META75]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP80:![0-9]+]] @@ -1262,7 +1058,7 @@ define void @hoist_all_three_loads_at_same_address(ptr %dst, ptr %src, ptr noali ; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP15]], i32 0 ; CHECK-NEXT: br i1 [[TMP16]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] ; CHECK: [[PRED_LOAD_IF]]: -; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP6]], align 4, !alias.scope [[META62:![0-9]+]] +; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP6]], align 4, !alias.scope [[META82:![0-9]+]] ; CHECK-NEXT: [[TMP18:%.*]] = insertelement <2 x i32> poison, i32 [[TMP17]], i32 0 ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] ; CHECK: [[PRED_LOAD_CONTINUE]]: @@ -1270,7 +1066,7 @@ define void @hoist_all_three_loads_at_same_address(ptr %dst, ptr %src, ptr noali ; CHECK-NEXT: [[TMP20:%.*]] = extractelement <2 x i1> [[TMP15]], i32 1 ; CHECK-NEXT: br i1 [[TMP20]], label %[[PRED_LOAD_IF2:.*]], label %[[PRED_LOAD_CONTINUE3:.*]] ; CHECK: [[PRED_LOAD_IF2]]: -; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP7]], align 4, !alias.scope [[META62]] +; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP7]], align 4, !alias.scope [[META82]] ; CHECK-NEXT: [[TMP22:%.*]] = insertelement <2 x i32> [[TMP19]], i32 [[TMP21]], i32 1 ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE3]] ; CHECK: [[PRED_LOAD_CONTINUE3]]: @@ -1280,7 +1076,7 @@ define void @hoist_all_three_loads_at_same_address(ptr %dst, ptr %src, ptr noali ; CHECK-NEXT: [[TMP26:%.*]] = extractelement <2 x i1> [[TMP25]], i32 0 ; CHECK-NEXT: br i1 [[TMP26]], label %[[PRED_LOAD_IF4:.*]], label %[[PRED_LOAD_CONTINUE5:.*]] ; CHECK: [[PRED_LOAD_IF4]]: -; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP6]], align 4, !alias.scope [[META62]] +; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP6]], align 4, !alias.scope [[META82]] ; CHECK-NEXT: [[TMP28:%.*]] = insertelement <2 x i32> poison, i32 [[TMP27]], i32 0 ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE5]] ; CHECK: [[PRED_LOAD_CONTINUE5]]: @@ -1288,7 +1084,7 @@ define void @hoist_all_three_loads_at_same_address(ptr %dst, ptr %src, ptr noali ; CHECK-NEXT: [[TMP30:%.*]] = extractelement <2 x i1> [[TMP25]], i32 1 ; CHECK-NEXT: br i1 [[TMP30]], label %[[PRED_LOAD_IF6:.*]], label %[[PRED_LOAD_CONTINUE7:.*]] ; CHECK: [[PRED_LOAD_IF6]]: -; CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP7]], align 4, !alias.scope [[META62]] +; CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP7]], align 4, !alias.scope [[META82]] ; CHECK-NEXT: [[TMP32:%.*]] = insertelement <2 x i32> [[TMP29]], i32 [[TMP31]], i32 1 ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE7]] ; CHECK: [[PRED_LOAD_CONTINUE7]]: @@ -1297,7 +1093,7 @@ define void @hoist_all_three_loads_at_same_address(ptr %dst, ptr %src, ptr noali ; CHECK-NEXT: [[TMP35:%.*]] = extractelement <2 x i1> [[TMP11]], i32 0 ; CHECK-NEXT: br i1 [[TMP35]], label %[[PRED_LOAD_IF8:.*]], label %[[PRED_LOAD_CONTINUE9:.*]] ; CHECK: [[PRED_LOAD_IF8]]: -; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP6]], align 4, !alias.scope [[META62]] +; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP6]], align 4, !alias.scope [[META82]] ; CHECK-NEXT: [[TMP37:%.*]] = insertelement <2 x i32> poison, i32 [[TMP36]], i32 0 ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE9]] ; CHECK: [[PRED_LOAD_CONTINUE9]]: @@ -1305,7 +1101,7 @@ define void @hoist_all_three_loads_at_same_address(ptr %dst, ptr %src, ptr noali ; CHECK-NEXT: [[TMP39:%.*]] = extractelement <2 x i1> [[TMP11]], i32 1 ; CHECK-NEXT: br i1 [[TMP39]], label %[[PRED_LOAD_IF10:.*]], label %[[PRED_LOAD_CONTINUE11]] ; CHECK: [[PRED_LOAD_IF10]]: -; CHECK-NEXT: [[TMP40:%.*]] = load i32, ptr [[TMP7]], align 4, !alias.scope [[META62]] +; CHECK-NEXT: [[TMP40:%.*]] = load i32, ptr [[TMP7]], align 4, !alias.scope [[META82]] ; CHECK-NEXT: [[TMP41:%.*]] = insertelement <2 x i32> [[TMP38]], i32 [[TMP40]], i32 1 ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE11]] ; CHECK: [[PRED_LOAD_CONTINUE11]]: @@ -1313,10 +1109,10 @@ define void @hoist_all_three_loads_at_same_address(ptr %dst, ptr %src, ptr noali ; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP25]], <2 x i32> [[TMP34]], <2 x i32> [[TMP24]] ; CHECK-NEXT: [[PREDPHI16:%.*]] = select <2 x i1> [[TMP11]], <2 x i32> [[TMP42]], <2 x i32> [[PREDPHI]] ; CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] -; CHECK-NEXT: store <2 x i32> [[PREDPHI16]], ptr [[TMP43]], align 4, !alias.scope [[META65:![0-9]+]], !noalias [[META62]] +; CHECK-NEXT: store <2 x i32> [[PREDPHI16]], ptr [[TMP43]], align 4, !alias.scope [[META85:![0-9]+]], !noalias [[META82]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 -; CHECK-NEXT: br i1 [[TMP44]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP67:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP44]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP87:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br [[EXIT:label %.*]] ; CHECK: [[SCALAR_PH]]: diff --git a/llvm/test/Transforms/LoopVectorize/if-conversion.ll b/llvm/test/Transforms/LoopVectorize/if-conversion.ll index a88a9b1466149..350c267445e54 100644 --- a/llvm/test/Transforms/LoopVectorize/if-conversion.ll +++ b/llvm/test/Transforms/LoopVectorize/if-conversion.ll @@ -237,22 +237,36 @@ for.end: ; preds = %for.inc, %entry ; Handle PHI with single incoming value having a full mask. ; PR34523 -; NOTE: Changing PHI inputs from undef to poison leads to change in -; behaviour of the test. Left as undef for now. -define void @PR34523() { -; CHECK-LABEL: define void @PR34523() { -; CHECK-NEXT: [[BB1:.*:]] -; CHECK-NEXT: br i1 true, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +define void @PR34523(ptr %p, i16 %val) { +; CHECK-LABEL: define void @PR34523( +; CHECK-SAME: ptr [[P:%.*]], i16 [[VAL:%.*]]) { +; CHECK-NEXT: [[BB1:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = add i16 [[VAL]], 1 +; CHECK-NEXT: [[SMAX:%.*]] = call i16 @llvm.smax.i16(i16 [[TMP0]], i16 2) +; CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[VAL]], -1 +; CHECK-NEXT: [[TMP2:%.*]] = add i16 [[SMAX]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[TMP2]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i32 [[TMP3]], 1 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i16 [[TMP2]], 3 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[TMP4]], 131068 +; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i16 +; CHECK-NEXT: [[TMP5:%.*]] = add i16 [[VAL]], [[DOTCAST]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: br i1 poison, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br i1 poison, label %[[BB5:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP4]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[BB5:.*]], label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ [[TMP5]], %[[MIDDLE_BLOCK]] ], [ [[VAL]], %[[BB1]] ] ; CHECK-NEXT: br label %[[BB2:.*]] ; CHECK: [[BB2]]: -; CHECK-NEXT: [[I:%.*]] = phi i16 [ undef, %[[SCALAR_PH]] ], [ [[_TMP2:%.*]], %[[BB4:.*]] ] +; CHECK-NEXT: [[I:%.*]] = phi i16 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[_TMP2:%.*]], %[[BB4:.*]] ] ; CHECK-NEXT: br label %[[BB3:.*]] ; CHECK: [[BB3]]: ; CHECK-NEXT: br label %[[BB4]] @@ -267,11 +281,11 @@ bb1: br label %bb2 bb2: ; preds = %bb4, %bb1 - %i = phi i16 [ undef, %bb1 ], [ %_tmp2, %bb4 ] + %i = phi i16 [ %val, %bb1 ], [ %_tmp2, %bb4 ] br label %bb3 bb3: ; preds = %bb2 - %_tmp1 = phi ptr [ undef, %bb2 ] + %_tmp1 = phi ptr [ %p, %bb2 ] br label %bb4 bb4: ; preds = %bb3 diff --git a/llvm/test/Transforms/LoopVectorize/if-reduction.ll b/llvm/test/Transforms/LoopVectorize/if-reduction.ll index 73a2203c3115b..eab9df558f608 100644 --- a/llvm/test/Transforms/LoopVectorize/if-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/if-reduction.ll @@ -1648,8 +1648,8 @@ define i32 @fcmp_0_sub_select1(ptr noalias %x, i32 %N) nounwind readonly { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 0, [[INDEX]] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 0 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 -3 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i64 -3 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x float> [[WIDE_LOAD]], <4 x float> poison, <4 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = fcmp ogt <4 x float> [[REVERSE]], zeroinitializer diff --git a/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll b/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll index 34873319176d1..df8ade647d968 100644 --- a/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll +++ b/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll @@ -58,7 +58,7 @@ thread-pre-split.loopexit: ; preds = %11, %.thread-pre-sp br i1 %arg, label %11, label %22 ;